import requests
import json
import execjs
import time
import os

headers = {
    'authority': 'api.zzzmh.cn',
    'accept': 'application/json, text/plain, */*',
    'accept-language': 'zh-CN,zh;q=0.9',
    'content-type': 'application/json;charset=UTF-8',
    'origin': 'https://bz.zzzmh.cn',
    'referer': 'https://bz.zzzmh.cn/',
    'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': 'cors',
    'sec-fetch-site': 'same-site',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}

print("为不给该网站增加太大的负担，每次抓取的数据页不能大于 5页 ！")
star_number = int(input("起始页："))
end_number = int(input("结束页："))
# star_number = 1
# end_number = 2
if star_number<1:
    print("起始页不能小于1")
elif end_number > 5:
    print("结束页不能大于5")

for i in range(star_number, end_number+1):

    json_data = {
        'size': 24,
        'current': i,
        'sort': 0,
        'category': 0,
        'resolution': 0,
        'color': 0,
        'categoryId': 0,
        'ratio': 0,
    }
    response = requests.post('https://api.zzzmh.cn/bz/v3/getData', headers=headers, json=json_data).json()
    # 获取文件上级目录
    current_path = os.path.abspath(__file__)
    parent_path = os.path.dirname(current_path)
    js_file = open(parent_path+"\解析.js","r",encoding="utf-8").read()
    data = execjs.compile(js_file).call("parse",response['result'])
    parse_data = json.loads(data)

    download_headers = {
        'authority': 'api.zzzmh.cn',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'none',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    }

    for index,item in enumerate(parse_data['list']):

        url = 'https://api.zzzmh.cn/bz/v3/getUrl/'+''+item["i"]+''+str(20)
        responses = requests.get(url, headers=download_headers)

        img_headers={
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; ) Gecko/20100101 Firefox/61.0',
            'Referer':'https://bz.zzzmh.cn',
        }
        ss = requests.get(url,headers=img_headers)
        img_name = item["i"]+".jpg"
        if(len(str(ss.text)) > 200):
            with open(img_name,"wb") as f:
                f.write(ss.content)
            print(f"图片：{img_name} 下载成功！")
            time.sleep(1)
        else:
            print(f"图片：{img_name} 解析失败！")
