from test import comic_id
import re
import requests
import time
import random


headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36"
}
count = 0
print(comic_id)
for index in comic_id:
    to_url = "https://www.manhuatai.com/api/getComicInfoBody"
    data = "?product_id=2&productname=mht&platformname=pc&comic_id=" + str(index)
    response = requests.get(to_url + data, headers=headers)
    response_data = response.json()
    cartoon_name = response_data["data"]["comic_name"]   # 漫画名称
    comic_chapter = response_data["data"]["comic_chapter"]   # AJAX动态请求数据
    strt_url = "https://mhpic."    # 起始URL
    end_url = "-mht.middle.webp"   # 最后URL
    chapter_url = []
    for car in comic_chapter:
        try:
            chapter_name = car["chapter_name"]   # 章节名称
            rule = car["rule"]    # AJAX动态请求URL
            end_num = car["end_num"]   # 每一画图片数量
            chapter_domain = car["chapter_domain"]    # 每部漫画对应图片地址
            # 拼接完整的URL
            for i in range(1, end_num + 1):
                data_url = re.sub(r'\$\$', str(i), rule)   # 将AJAX动态请求URL中的‘$$’换成对应漫画图片数字
                str_chapter_utl = strt_url + chapter_domain + data_url + end_url   # 拼接完整的URL
                chapter_url.append(str_chapter_utl)
                cartoon_data = requests.get(chapter_url, headers=headers)
                print(str_chapter_utl)
                count += 1
                time.sleep(random.random())
                # with open(r'C:\Users\Administrator\Pictures\漫画\\'+cartoon_name+chapter_name + ".jpg", 'wb') as f:
                #     f.write(cartoon_data.content)
            time.sleep(random.randint(1, 3))
        except:
            pass

print(count)
