import json
import os
import time

import requests
import bs4
from lxml import etree
from concurrent.futures import ThreadPoolExecutor


# print(page_id_2)
url = "https://car.autohome.com.cn/pic/series-t/"


def get_url(url, id, page):
    real_url = url + str(id) + "-1-p" + str(page) + ".html"
    # print(real_url)
    return real_url


index = 0
def get_img(real_url, new_folder_path):
    response_1 = requests.get(real_url)
    if response_1.status_code == 200:
        response = response_1.text
        # print(response)
        tree = etree.HTML(response)
        # time.sleep(1)
        list_img = tree.xpath('/html/body/div[2]/div/div[2]/div[7]/div/div[2]/div[2]/ul/li')
        # print(list_img)
        for li in list_img:
            global index
            index += 1
            img_url = li.xpath('./a/img/@src')[0]
            img_name = li.xpath('./a/@title')[0]
            img_response = requests.get("https:" + img_url)
            # print(img_url,img_name)
            img_name = img_name + '-' + str(index) + ".jpg"
            with open(new_folder_path + "/" + f'{img_name}', 'wb') as f:
                f.write(img_response.content)
                # print(f"{img_name}下载成功")
    else:
        print(f"{real_url}下载失败")

def get_init():
    with open('./car_url_500.json', 'r', encoding='utf-8') as file:
        page_id_2 = json.load(file)

    max_workers = 5  # 根据实际情况调整线程池大小
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        for brand, sun_brands in page_id_2.items():
            # 确保品牌目录存在
            brand_path = brand
            if not os.path.exists(brand_path):
                os.makedirs(brand_path)

            for sun_brand, value in sun_brands.items():
                # 创建子品牌目录
                new_folder_path = os.path.join(brand_path, sun_brand)
                if not os.path.exists(new_folder_path):
                    os.makedirs(new_folder_path)

                # 使用线程池执行下载任务
                futures = []
                for page in range(1, 4):
                    real_url = get_url(url, value, page)
                    future = executor.submit(get_img, real_url, new_folder_path)
                    futures.append(future)

                # 等待所有页面的下载任务完成
                for future in futures:
                    future.result()  # 这里会阻塞直到对应的任务完成


if __name__ == '__main__':

    get_init()
    # with ThreadPoolExecutor(max_workers=10) as t:
    #     for i in range(1, 100):
    #         t.submit(get_init,page_id,page_id_2)
        # path_brand = f"{sun_brand}"
        # if not os.path.exists(f"{sun_brand}"):
        #     os.makedirs(sun_brand)
        #     print(sun_brand)
# print(page_id[0])


#
# response = requests.get(url).text
# etree = etree.HTML(response)
# list = etree.xpath('/html/body/div[2]/div/div[2]/div[7]/div/div[2]/div[2]/ul/li')
# index = 0
# for li in list:
#     index += 1
#     img_url = li.xpath('./a/img/@src')[0]
#     img_name = li.xpath('./a/@title')[0]
#     img_response = requests.get("https:"+img_url)
#     # print(img_url,img_name)
#     img_name = img_name + str(index) + ".jpg"
#     with open(f'{img_name}', 'wb') as f:
#         f.write(img_response.content)
# #
# #
# # print(list)
