update to work with latest Marvel Unlimited URL schemes, and add -a flag to download all issues after the given issue in a run.
This commit is contained in:
parent
ae08fb893a
commit
3f5ee7116d
16
api/api.py
16
api/api.py
@ -14,29 +14,37 @@ class Client:
|
|||||||
self.session.headers.update({
|
self.session.headers.update({
|
||||||
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
|
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
|
||||||
})
|
})
|
||||||
self.base = 'https://read-api.marvel.com/'
|
self.base = 'https://bifrost.marvel.com/'
|
||||||
|
|
||||||
|
|
||||||
def set_cookies(self, cookies):
|
def set_cookies(self, cookies):
|
||||||
self.session.cookies.update(cookies)
|
self.session.cookies.update(cookies)
|
||||||
|
|
||||||
def get_id(self, url):
|
def get_id(self, url):
|
||||||
r = self.session.get(url)
|
r = self.session.get(url)
|
||||||
regex = r'digital_comic_id : "(([0-9]+))"'
|
regex = r'"digitalComicID":(([0-9]+))'
|
||||||
|
print(f"SEARCH")
|
||||||
|
print(re.search(regex, r.text).groups())
|
||||||
return re.search(regex, r.text).group(1)
|
return re.search(regex, r.text).group(1)
|
||||||
|
|
||||||
def make_call(self, epoint, params=None):
|
def make_call(self, epoint, params=None):
|
||||||
r = self.session.get(self.base+epoint, params=params)
|
r = self.session.get(self.base+epoint, params=params)
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
def get_next_comic(self, id):
|
||||||
|
self.session.headers.update({'Referer': 'https://read.marvel.com/'})
|
||||||
|
r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?')
|
||||||
|
return r.json()['data']['results'][0].get('prev_next_issue',{}).get('next_issue_meta',{}).get('id')
|
||||||
|
|
||||||
def get_comic_meta(self, id):
|
def get_comic_meta(self, id):
|
||||||
self.session.headers.update({'Referer': 'https://read.marvel.com/'})
|
self.session.headers.update({'Referer': 'https://read.marvel.com/'})
|
||||||
r = self.make_call('issue/v1/digitalcomics/'+id+'?')
|
r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?')
|
||||||
return r.json()['data']['results'][0]['issue_meta']
|
return r.json()['data']['results'][0]['issue_meta']
|
||||||
|
|
||||||
def get_comic(self, id):
|
def get_comic(self, id):
|
||||||
params={'rand': randint(10000, 99999)}
|
params={'rand': randint(10000, 99999)}
|
||||||
r = self.make_call('asset/v1/digitalcomics/'+id+'?', params=params)
|
r = self.make_call(f'v1/catalog/digital-comics/web/assets/{id}?', params=params)
|
||||||
j = r.json()['data']['results'][0]
|
j = r.json()['data']['results'][0]
|
||||||
if not j['auth_state']['subscriber']:
|
if not j['auth_state']['subscriber']:
|
||||||
raise IneligibleError('Marvel Unlimited subscription required.')
|
raise IneligibleError('Marvel Unlimited subscription required.')
|
||||||
|
|||||||
41
mur.py
41
mur.py
@ -64,6 +64,12 @@ def parse_args():
|
|||||||
help="Write comic's metadata to JSON file.",
|
help="Write comic's metadata to JSON file.",
|
||||||
action='store_true'
|
action='store_true'
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-a', '--all',
|
||||||
|
help="Download all issues in series",
|
||||||
|
required=False,
|
||||||
|
action='store_true'
|
||||||
|
)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
def parse_cookies(cd, out_cookies={}):
|
def parse_cookies(cd, out_cookies={}):
|
||||||
@ -94,6 +100,7 @@ def check_url(url):
|
|||||||
for regex in regexes:
|
for regex in regexes:
|
||||||
match = re.match(regex, url)
|
match = re.match(regex, url)
|
||||||
if match:
|
if match:
|
||||||
|
print(match.groups())
|
||||||
return match.group(1), match.group(2)
|
return match.group(1), match.group(2)
|
||||||
|
|
||||||
def download(urls, tmp_dir, cur=0):
|
def download(urls, tmp_dir, cur=0):
|
||||||
@ -131,6 +138,8 @@ def err(e, cur, tot):
|
|||||||
print(e)
|
print(e)
|
||||||
if cur == tot:
|
if cur == tot:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# def download_and_save(id):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if hasattr(sys, 'frozen'):
|
if hasattr(sys, 'frozen'):
|
||||||
@ -144,23 +153,35 @@ def main():
|
|||||||
args = parse_args()
|
args = parse_args()
|
||||||
tot = len(args.url)
|
tot = len(args.url)
|
||||||
cur = 0
|
cur = 0
|
||||||
for url in args.url:
|
urls = [] + args.url
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
cur += 1
|
cur += 1
|
||||||
try:
|
try:
|
||||||
print("Comic {} of {}:".format(cur, tot))
|
print("Comic {} of {}:".format(cur, tot))
|
||||||
try:
|
if isinstance(url, str):
|
||||||
type, id = check_url(url)
|
try:
|
||||||
except TypeError:
|
type, id = check_url(url)
|
||||||
err('Invalid URL: '+str(url), cur, tot)
|
except TypeError:
|
||||||
continue
|
err('Invalid URL: '+str(url), cur, tot)
|
||||||
if type == "www":
|
continue
|
||||||
id = client.get_id(url)
|
if type == "www":
|
||||||
|
id = client.get_id(url)
|
||||||
|
else:
|
||||||
|
id = url
|
||||||
fmt = args.format
|
fmt = args.format
|
||||||
meta = client.get_comic_meta(id)
|
meta = client.get_comic_meta(id)
|
||||||
|
if args.all:
|
||||||
|
next_id = client.get_next_comic(id)
|
||||||
|
if next_id:
|
||||||
|
urls.append(next_id)
|
||||||
title = meta['title']
|
title = meta['title']
|
||||||
title_s = sanitize(title)
|
title_s = sanitize(title)
|
||||||
print(str(title) + "\n")
|
print(str(title) + "\n")
|
||||||
abs = os.path.join(dl_dir, '{}.{}'.format(title_s, fmt))
|
book_dir = f"{dl_dir}/{title_s.split(' #')[0]}"
|
||||||
|
if not os.path.isdir(book_dir):
|
||||||
|
os.makedirs(book_dir)
|
||||||
|
abs = os.path.join(book_dir, '{}.{}'.format(title_s, fmt))
|
||||||
if exist_check(abs):
|
if exist_check(abs):
|
||||||
err('Comic already exists locally.', cur, tot)
|
err('Comic already exists locally.', cur, tot)
|
||||||
continue
|
continue
|
||||||
@ -189,7 +210,7 @@ def main():
|
|||||||
make_cbz(abs, images)
|
make_cbz(abs, images)
|
||||||
if args.meta:
|
if args.meta:
|
||||||
print("Writing metadata to JSON file...")
|
print("Writing metadata to JSON file...")
|
||||||
meta_abs = os.path.join(dl_dir, '{}_meta.json'.format(title_s))
|
meta_abs = os.path.join(book_dir, '{}_meta.json'.format(title_s))
|
||||||
write_meta(meta_abs, meta)
|
write_meta(meta_abs, meta)
|
||||||
for i in images:
|
for i in images:
|
||||||
os.remove(i)
|
os.remove(i)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user