import urllib.error

import haoman8
import save_util
import json

base_url = 'https://www.haoman8.com'
comic_path = '/comic/4488'

comic_name = '我独自升级'

chapter_infos = haoman8.find_chapter_infos(base_url+comic_path)

task_file = open('./task.json', 'r', encoding='UTF8')
download_task = dict(json.load(task_file))
task_file.close()

if download_task.get(comic_name) is None:
    download_task[comic_name] = {}
count = 0
for chapter_info in chapter_infos:
    chapter_name = chapter_info[1]
    chapter_path = chapter_info[0]
    if download_task[comic_name].get(chapter_name) is None:
        img_urls = haoman8.get_img_urls_from_page(base_url + chapter_path)
        if len(img_urls) > 0:
            chapter_task = {}
            for img_url in img_urls:
                chapter_task[img_url[1]] = img_url[0]
            download_task[comic_name][chapter_name] = chapter_task
        count = count + 1
        task_file = open('./task.json', 'w', encoding='UTF8')
        json_data = json.dumps(download_task, ensure_ascii=False)
        task_file.write(json_data)
        task_file.close()
        print('Finished index chapter:\t' + chapter_name + 'current/total chapter:' + str(count) + '/' + str(len(chapter_infos)))
    else:
        print('Skip index chapter:\t' + chapter_name + 'current/total chapter:' + str(count) + '/' + str(len(chapter_infos)))

task_file = open('./task.json', 'w', encoding='UTF8')
json_data = json.dumps(download_task, ensure_ascii=False)
task_file.write(json_data)
task_file.close()








