Added zero-padding to .img files

This commit is contained in:
christopher-owen 2020-02-20 05:48:30 +00:00 committed by GitHub
parent ee4df84a9c
commit ae08fb893a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

296
mur.py
View File

@ -19,7 +19,7 @@ from api.exceptions import IneligibleError
client = api.Client() client = api.Client()
def print_title(): def print_title():
print(""" print("""
_____ _____ _____ _____ _____ _____
| | | | __ | | | | | __ |
| | | | | | -| | | | | | | -|
@ -27,166 +27,178 @@ def print_title():
""") """)
def get_os(): def get_os():
if platform.system() == 'Windows': if platform.system() == 'Windows':
return True return True
return False return False
def set_con_title(): def set_con_title():
if get_os(): if get_os():
os.system('title MUR R1 (by Sorrow446)') os.system('title MUR R1 (by Sorrow446)')
else: else:
sys.stdout.write('\x1b]2;MUR R1 (by Sorrow446)\x07') sys.stdout.write('\x1b]2;MUR R1 (by Sorrow446)\x07')
def sanitize(fn): def sanitize(fn):
if get_os(): if get_os():
return re.sub(r'[\/:*?"><|]', '_', fn) return re.sub(r'[\/:*?"><|]', '_', fn)
else: else:
return re.sub('/', '_', fn) return re.sub('/', '_', fn)
def parse_args(): def parse_args():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Sorrow446.' description='Sorrow446.'
) )
parser.add_argument( parser.add_argument(
'-u', '--url', '-u', '--url',
help="URL - marvel.com/comics/issue/ or read.marvel.com/#/book/.", help="URL - marvel.com/comics/issue/ or read.marvel.com/#/book/.",
nargs='*', nargs='*',
required=True required=True
) )
parser.add_argument( parser.add_argument(
'-f', '--format', '-f', '--format',
help="Export format.", help="Export format.",
choices=['cbz', 'pdf'], choices=['cbz', 'pdf'],
required=True required=True
) )
parser.add_argument( parser.add_argument(
'-m', '--meta', '-m', '--meta',
help="Write comic's metadata to JSON file.", help="Write comic's metadata to JSON file.",
action='store_true' action='store_true'
) )
return parser.parse_args() return parser.parse_args()
def parse_cookies(cd, out_cookies={}): def parse_cookies(cd, out_cookies={}):
with open(os.path.join(cd, 'cookies.txt')) as f: with open(os.path.join(cd, 'cookies.txt')) as f:
for line in f.readlines(): for line in f.readlines():
if not line.startswith('#'): if not line.startswith('#'):
field = line.strip().split('\t') field = line.strip().split('\t')
out_cookies[field[5]] = field[6] out_cookies[field[5]] = field[6]
client.set_cookies(out_cookies) client.set_cookies(out_cookies)
def exist_check(f): def exist_check(f):
if os.path.isfile(f): if os.path.isfile(f):
return True return True
return False return False
def dir_setup(tmp_dir, dl_dir): def dir_setup(tmp_dir, dl_dir):
if os.path.isdir(tmp_dir): if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir) shutil.rmtree(tmp_dir)
if not os.path.isdir(dl_dir): if not os.path.isdir(dl_dir):
os.makedirs(dl_dir) os.makedirs(dl_dir)
os.makedirs(tmp_dir) os.makedirs(tmp_dir)
def check_url(url): def check_url(url):
regexes=[ regexes=[
r'http[s]://(read).marvel.com/#/book/([0-9]+$)', r'http[s]://(read).marvel.com/#/book/([0-9]+$)',
r'http[s]://(www).marvel.com/comics/issue/([0-9]+)/.+' r'http[s]://(www).marvel.com/comics/issue/([0-9]+)/.+'
] ]
for regex in regexes: for regex in regexes:
match = re.match(regex, url) match = re.match(regex, url)
if match: if match:
return match.group(1), match.group(2) return match.group(1), match.group(2)
def download(urls, tmp_dir, cur=0): def download(urls, tmp_dir, cur=0):
total = len(urls) total = len(urls)
for url in urls: for url in urls:
cur += 1 cur += 1
print('Downloading image {} of {}...'.format(cur, total)) print('Downloading image {} of {}...'.format(cur, total))
r = client.session.get(url, stream=True) r = client.session.get(url, stream=True)
r.raise_for_status() r.raise_for_status()
size = int(r.headers.get('content-length', 0)) size = int(r.headers.get('content-length', 0))
abs = os.path.join(tmp_dir, str(cur) + '.jpg') abs = os.path.join(tmp_dir, str(cur) + '.jpg')
with open(abs, 'wb') as f: with open(abs, 'wb') as f:
with tqdm(total=size, unit='B', with tqdm(total=size, unit='B',
unit_scale=True, unit_divisor=1024, unit_scale=True, unit_divisor=1024,
initial=0, miniters=1) as bar: initial=0, miniters=1) as bar:
for chunk in r.iter_content(32*1024): for chunk in r.iter_content(32*1024):
if chunk: if chunk:
f.write(chunk) f.write(chunk)
bar.update(len(chunk)) bar.update(len(chunk))
def make_pdf(abs, images, title): def make_pdf(abs, images, title):
with open(abs, 'wb') as f: with open(abs, 'wb') as f:
f.write(img2pdf.convert(images, title=title)) f.write(img2pdf.convert(images, title=title))
def make_cbz(abs, images): def make_cbz(abs, images):
with zipfile.ZipFile(abs, 'w', zipfile.ZIP_STORED) as f: with zipfile.ZipFile(abs, 'w', zipfile.ZIP_STORED) as f:
for i in images: for i in images:
f.write(i) f.write(i)
def write_meta(meta_abs, meta): def write_meta(meta_abs, meta):
with open(meta_abs, 'w') as f: with open(meta_abs, 'w') as f:
json.dump(meta, f, indent=4) json.dump(meta, f, indent=4)
def err(e, cur, tot): def err(e, cur, tot):
print(e) print(e)
if cur == tot: if cur == tot:
sys.exit(1) sys.exit(1)
def main(): def main():
if hasattr(sys, 'frozen'): if hasattr(sys, 'frozen'):
cd = os.path.dirname(sys.executable) cd = os.path.dirname(sys.executable)
else: else:
cd = os.path.dirname(__file__) cd = os.path.dirname(__file__)
tmp_dir = os.path.join(cd, 'mur_tmp') tmp_dir = os.path.join(cd, 'mur_tmp')
dl_dir = os.path.join(cd, 'MUR downloads') dl_dir = os.path.join(cd, 'MUR downloads')
dir_setup(tmp_dir, dl_dir) dir_setup(tmp_dir, dl_dir)
parse_cookies(cd) parse_cookies(cd)
args = parse_args() args = parse_args()
tot = len(args.url) tot = len(args.url)
cur = 0 cur = 0
for url in args.url: for url in args.url:
cur += 1 cur += 1
try: try:
print("Comic {} of {}:".format(cur, tot)) print("Comic {} of {}:".format(cur, tot))
try: try:
type, id = check_url(url) type, id = check_url(url)
except TypeError: except TypeError:
err('Invalid URL: '+str(url), cur, tot) err('Invalid URL: '+str(url), cur, tot)
continue continue
if type == "www": if type == "www":
id = client.get_id(url) id = client.get_id(url)
fmt = args.format fmt = args.format
meta = client.get_comic_meta(id) meta = client.get_comic_meta(id)
title = meta['title'] title = meta['title']
title_s = sanitize(title) title_s = sanitize(title)
print(str(title) + "\n") print(str(title) + "\n")
abs = os.path.join(dl_dir, '{}.{}'.format(title_s, fmt)) abs = os.path.join(dl_dir, '{}.{}'.format(title_s, fmt))
if exist_check(abs): if exist_check(abs):
err('Comic already exists locally.', cur, tot) err('Comic already exists locally.', cur, tot)
continue continue
try: try:
download(client.get_comic(id), tmp_dir) download(client.get_comic(id), tmp_dir)
except IneligibleError as e: except IneligibleError as e:
print(e) print(e)
sys.exit(1) sys.exit(1)
images = [os.path.join(tmp_dir, i) for i in os.listdir(tmp_dir)] #input("Press Enter to continue...")
print('Converting to {}...'.format(fmt.upper())) ##NEW
if fmt == 'pdf': sourcedir = "tmp_dir"; number_ofdigits = 5; extensions = (".jpg", ".jpeg")
make_pdf(abs, images, title)
else: files = os.listdir(tmp_dir)
make_cbz(abs, images) for item in files:
if args.meta: if item.endswith(extensions):
print("Writing metadata to JSON file...") name = item.split("."); zeros = number_ofdigits-len(name[0])
meta_abs = os.path.join(dl_dir, '{}_meta.json'.format(title_s)) newname = str(zeros*"0")+name[0]+"."+name[1]
write_meta(meta_abs, meta) shutil.move(tmp_dir+"/"+item, tmp_dir+"/"+newname)
for i in images: ##END NEW
os.remove(i) #input("Press Enter to continue...")
except HTTPError as e: images = [os.path.join(tmp_dir, i) for i in os.listdir(tmp_dir)]
err(e, cur, tot) print('Converting to {}...'.format(fmt.upper()))
except Exception as e: if fmt == 'pdf':
err(e, cur, tot) make_pdf(abs, images, title)
else:
make_cbz(abs, images)
if args.meta:
print("Writing metadata to JSON file...")
meta_abs = os.path.join(dl_dir, '{}_meta.json'.format(title_s))
write_meta(meta_abs, meta)
for i in images:
os.remove(i)
except HTTPError as e:
err(e, cur, tot)
except Exception as e:
err(e, cur, tot)
if __name__ == '__main__': if __name__ == '__main__':
print_title() print_title()
set_con_title() set_con_title()
main() main()