update to work with latest Marvel Unlimited URL schemes, and add -a flag to download all issues after the given issue in a run.

This commit is contained in:
David Gillespie 2024-09-29 01:15:59 -06:00
parent ae08fb893a
commit 3f5ee7116d
2 changed files with 43 additions and 14 deletions

View File

@ -14,14 +14,17 @@ class Client:
self.session.headers.update({
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
})
self.base = 'https://read-api.marvel.com/'
self.base = 'https://bifrost.marvel.com/'
def set_cookies(self, cookies):
self.session.cookies.update(cookies)
def get_id(self, url):
r = self.session.get(url)
regex = r'digital_comic_id : "(([0-9]+))"'
regex = r'"digitalComicID":(([0-9]+))'
print(f"SEARCH")
print(re.search(regex, r.text).groups())
return re.search(regex, r.text).group(1)
def make_call(self, epoint, params=None):
@ -29,14 +32,19 @@ class Client:
r.raise_for_status()
return r
def get_next_comic(self, id):
self.session.headers.update({'Referer': 'https://read.marvel.com/'})
r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?')
return r.json()['data']['results'][0].get('prev_next_issue',{}).get('next_issue_meta',{}).get('id')
def get_comic_meta(self, id):
self.session.headers.update({'Referer': 'https://read.marvel.com/'})
r = self.make_call('issue/v1/digitalcomics/'+id+'?')
r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?')
return r.json()['data']['results'][0]['issue_meta']
def get_comic(self, id):
params={'rand': randint(10000, 99999)}
r = self.make_call('asset/v1/digitalcomics/'+id+'?', params=params)
r = self.make_call(f'v1/catalog/digital-comics/web/assets/{id}?', params=params)
j = r.json()['data']['results'][0]
if not j['auth_state']['subscriber']:
raise IneligibleError('Marvel Unlimited subscription required.')

41
mur.py
View File

@ -64,6 +64,12 @@ def parse_args():
help="Write comic's metadata to JSON file.",
action='store_true'
)
parser.add_argument(
'-a', '--all',
help="Download all issues in series",
required=False,
action='store_true'
)
return parser.parse_args()
def parse_cookies(cd, out_cookies={}):
@ -94,6 +100,7 @@ def check_url(url):
for regex in regexes:
match = re.match(regex, url)
if match:
print(match.groups())
return match.group(1), match.group(2)
def download(urls, tmp_dir, cur=0):
@ -132,6 +139,8 @@ def err(e, cur, tot):
if cur == tot:
sys.exit(1)
# def download_and_save(id):
def main():
if hasattr(sys, 'frozen'):
cd = os.path.dirname(sys.executable)
@ -144,23 +153,35 @@ def main():
args = parse_args()
tot = len(args.url)
cur = 0
for url in args.url:
urls = [] + args.url
for url in urls:
cur += 1
try:
print("Comic {} of {}:".format(cur, tot))
try:
type, id = check_url(url)
except TypeError:
err('Invalid URL: '+str(url), cur, tot)
continue
if type == "www":
id = client.get_id(url)
if isinstance(url, str):
try:
type, id = check_url(url)
except TypeError:
err('Invalid URL: '+str(url), cur, tot)
continue
if type == "www":
id = client.get_id(url)
else:
id = url
fmt = args.format
meta = client.get_comic_meta(id)
if args.all:
next_id = client.get_next_comic(id)
if next_id:
urls.append(next_id)
title = meta['title']
title_s = sanitize(title)
print(str(title) + "\n")
abs = os.path.join(dl_dir, '{}.{}'.format(title_s, fmt))
book_dir = f"{dl_dir}/{title_s.split(' #')[0]}"
if not os.path.isdir(book_dir):
os.makedirs(book_dir)
abs = os.path.join(book_dir, '{}.{}'.format(title_s, fmt))
if exist_check(abs):
err('Comic already exists locally.', cur, tot)
continue
@ -189,7 +210,7 @@ def main():
make_cbz(abs, images)
if args.meta:
print("Writing metadata to JSON file...")
meta_abs = os.path.join(dl_dir, '{}_meta.json'.format(title_s))
meta_abs = os.path.join(book_dir, '{}_meta.json'.format(title_s))
write_meta(meta_abs, meta)
for i in images:
os.remove(i)