import json
import os
import time

import requests
from lxml import etree
from random import choice
from concurrent.futures import ThreadPoolExecutor

header = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 QuarkPC/1.3.0.30",
]
# print(page_id_2)
# url = "https://car.autohome.com.cn/pic/series-t/"
url = "https://car.autohome.com.cn/pic/series/"


def get_url(url, id, page):
    real_url = url + str(id) + "-1-p" + str(page) + ".html"
    # print(real_url)
    return real_url


def get_url2(url, id, page):
    real_url = url + str(id) + "-10-p" + str(page) + ".html"
    # print(real_url)
    return real_url


def count_images_in_folder(new_folder_path):
    """
    统计指定文件夹内的图片数量。

    :param folder_path: 图片所在文件夹的路径
    :return: 图片文件数量
    """
    # 定义一个图片扩展名列表
    image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp']  # 可根据实际情况添加其他格式

    # 获取文件夹内所有文件的名称
    files = os.listdir(new_folder_path)

    # 过滤出图片文件
    images = [file for file in files if os.path.splitext(file)[1].lower() in image_extensions]

    # 返回图片文件数量
    return len(images)


index = 0


def get_img(real_url, new_folder_path):
    global header
    ueser_agent = choice(header)
    headers = {
        "User-Agent": ueser_agent
    }
    response_1 = requests.get(real_url, headers=headers)
    if response_1.status_code == 200:
        response = response_1.text
        # print(response)
        tree = etree.HTML(response)
        # time.sleep(1)
        # print(list_img)
        try:
            list_img = tree.xpath('/html/body/div[2]/div/div[2]/div[7]/div/div[2]/div[2]/ul/li')
            if len(list_img) != 0:
                for li in list_img:
                    global index
                    index += 1
                    img_url = li.xpath('./a/img/@src')[0]
                    # img_name = li.xpath('./a/@title')[0]
                    img_response = requests.get("https:" + img_url)
                    # print(img_url,img_name)
                    # img_name = img_name + '-' + str(index) + ".jpg"

                    img_name = f"{new_folder_path}/{index:05d}.jpg"
                    if not os.path.exists(img_name):
                        with open(f"{new_folder_path}/{index:05d}.jpg", 'wb') as f:
                            f.write(img_response.content)
                    else:
                        continue
        except:
            print(real_url + 'null')
    else:
        print(f"{real_url}下载失败")


def get_init():
    file = open('./car_url_500.json', 'r', encoding='utf-8')
    page_id = list(json.load(file))
    file.close()
    file = open('./car_url_500.json', 'r', encoding='utf-8')
    page_id_2 = json.load(file)
    global index
    for brand in page_id:
        # 检查文件夹是否存在，如果不存在则创建
        # try:
        #     # if not os.path.exists(f"./+{brand}"):
        #     #     os.makedirs(brand)
        # except OSError:
        sun_brand_list = list(page_id_2[f'{brand}'])

        for sun_brand in sun_brand_list:
            # 在父目录下创建新文件夹
            new_folder_path = os.path.join(brand, sun_brand)
            try:
                if not os.path.exists(new_folder_path):
                    os.makedirs(new_folder_path, exist_ok=True)
                value = page_id_2[f'{brand}'][f'{sun_brand}']
                print(value)
                count = count_images_in_folder(new_folder_path)
                if count < 100:
                    for page in range(1, 4):
                        real_url = get_url(url, value, page)
                        # print(real_url)
                        # print(real_url)
                        # if index <= 120:
                        get_img(real_url, new_folder_path)
                        time.sleep(1)
                        if index >= 100:
                            break
                        if page >= 2 and index < 100:
                            print(new_folder_path + "不足一百张----------------------------------------------------")
                            for page2 in range(1, 3):
                                real_url2 = get_url2(url, value, page2)
                                get_img(real_url2, new_folder_path)
                                if index >= 100:
                                    break
                                time.sleep(1)
                        # else:
                        #     continue
                    print(f"{sun_brand}" + str(index) + "下载完成")
                    index = 0
                else:
                    print(new_folder_path + "图片数量大于100")
            except OSError:
                print(f"{brand}+{sun_brand} 失败------------------------------------------------")
                continue
    # pass


if __name__ == '__main__':
    get_init()
    # with ThreadPoolExecutor(max_workers=10) as t:
    #     for i in range(1, 100):
    #         t.submit(get_init,page_id,page_id_2)
    # path_brand = f"{sun_brand}"
    # if not os.path.exists(f"{sun_brand}"):
    #     os.makedirs(sun_brand)
    #     print(sun_brand)
# print(page_id[0])


#
# response = requests.get(url).text
# etree = etree.HTML(response)
# list = etree.xpath('/html/body/div[2]/div/div[2]/div[7]/div/div[2]/div[2]/ul/li')
# index = 0
# for li in list:
#     index += 1
#     img_url = li.xpath('./a/img/@src')[0]
#     img_name = li.xpath('./a/@title')[0]
#     img_response = requests.get("https:"+img_url)
#     # print(img_url,img_name)
#     img_name = img_name + str(index) + ".jpg"
#     with open(f'{img_name}', 'wb') as f:
#         f.write(img_response.content)
# #
# #
# # print(list)
# 'https://car.autohome.com.cn/pic/series/7344-1.html'

print(111111)