# import os
# import  time
# import requests
# from bs4 import BeautifulSoup
# from urllib import parse
#
#
# # 定义请求头
# HEADERS = {
#     "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36"
# }
#
# # 定义 API URL
# API_URL = "https://api.zzzmh.cn/v2/bz/v3/getData"
# IMAGE_URL_TEMPLATE = "https://api.zzzmh.cn/v2/bz/v3/getUrl/{item_id}29"
#
# # 定义保存图片的文件夹
# IMAGE_FOLDER = '极简imgs'
#
# # 确保保存图片的文件夹存在
# if not os.path.exists(IMAGE_FOLDER):
#     os.makedirs(IMAGE_FOLDER)
#
#
# def download_image(item_id, img_count):
#     """
#     下载单张图片
#     :param item_id: 图片 ID
#     :param img_count: 当前图片序号
#     """
#     time.sleep(1)
#     try:
#         img_url = IMAGE_URL_TEMPLATE.format(item_id=item_id)
#         response = requests.get(img_url)
#         response.raise_for_status()
#         file_path = os.path.join(IMAGE_FOLDER, f'image{img_count + 1}.jpg')
#         with open(file_path, 'wb') as file:
#             print(f"正在下载第 {img_count + 1} 张图片...")
#             file.write(response.content)
#         return True
#     except requests.RequestException as e:
#         print(f"下载图片时出错: {e}")
#         return False
#
#
# def get_wallpaper_data(page):
#     """
#     获取壁纸数据
#     :param page: 当前页码
#     :return: 壁纸数据列表
#     """
#     payload = {
#         'size': 24,
#         'current': page,
#         'sort': 0,
#         'category': 0,
#         'resolution': 0,
#         'color': 0,
#         'categoryId': 0,
#         'ratio': 0,
#     }
#     try:
#         response = requests.post(API_URL, headers=HEADERS, json=payload)
#         response.raise_for_status()
#         return response.json().get('data', {}).get('list', [])
#     except requests.RequestException as e:
#         print(f"请求数据时出错: {e}")
#         return []
#
#
# def main():
#     img_count = 0
#     current_page = 1
#     target_count = 100
#
#     while img_count < target_count:
#         data_list = get_wallpaper_data(current_page)
#         for item in data_list:
#             if img_count >= target_count:
#                 break
#             item_id = item.get('i')
#             if item_id and download_image(item_id, img_count):
#                 img_count += 1
#         current_page += 1
#
#     print(f"已成功下载 {img_count} 张图片。")
#
#
# if __name__ == "__main__":
#     main()


import requests
import os
import time

root = "./images"
if not os.path.exists(root):
    os.mkdir(root)
thumb_root = os.path.join(root, "thumb_images")
if not os.path.exists(thumb_root):
    os.mkdir(thumb_root)
preview_root = os.path.join(root, "preview_images")
if not os.path.exists(preview_root):
    os.mkdir(preview_root)
big_root = os.path.join(root, "big_images")
if not os.path.exists(big_root):
    os.mkdir(big_root)



for page in range(1, 1734):
    print(f"正在爬取第{page}页")
    url = "https://api.zzzmh.cn/v2/bz/v3/getData"
    json_data = {
        "size": 24,
        "current": page,
        "sort": 0,
        "category": 0,
        "resolution": 0,
        "color": 0,
        "categoryId": 0,
        "ratio": 0
    }
    res = requests.post(url, json=json_data)
    if res.status_code == 200:
        for item in res.json()['data']['list']:
            i = item['i']
            t = item['t']
            print(f"正在爬取图片{i}")
            # 缩略图
            thumb_url = f'https://api.zzzmh.cn/v2/bz/v3/getUrl/{i}{t*10}'
            thumb_res = requests.get(thumb_url)
            with open(f"{thumb_root}/thumb_{i}.jpg", "wb") as f:
                f.write(thumb_res.content)

            # 预览图
            preview_url = f'https://api.zzzmh.cn/v2/bz/v3/getUrl/{i}{t*10+1}'
            preview_res = requests.get(preview_url)
            with open(f"{preview_root}/preview_{i}.jpg", "wb") as f:
                f.write(preview_res.content)
            # 高清图
            big_url = f'https://api.zzzmh.cn/v2/bz/v3/getUrl/{i}{t*10+9}'
            big_res = requests.get(big_url)
            with open(f"{big_root}/preview_{i}.jpg", "wb") as f:
                f.write(big_res.content)

            time.sleep(1)
    else:
        print(f"获取页面数据异常")

    break
