# 使用requests库爬取汽车之家（https://car.autohome.com.cn）网站上的所有品牌汽车的名称和所有品牌下面的子型号名称，并存入csv文件
import requests
import csv
from bs4 import BeautifulSoup
import time

base_url = 'https://car.autohome.com.cn/config/series/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# 用于存储品牌和子型号数据的列表
brands = []
models = []

brand_list_url = 'https://car.autohome.com.cn/beijing/'
response = requests.get(brand_list_url, headers=headers)
if response.status_code == 200:
    soup = BeautifulSoup(response.content, 'lxml')
    example_brand_url = 'https://car.autohome.com.cn/config/series/brand_id.html'  # 替换为实际品牌页面URL
    response = requests.get(example_brand_url, headers=headers)
    if response.status_code == 200:
        soup = BeautifulSoup(response.content, 'lxml')

        print("...")
else:
    print(f"Failed to retrieve brand list page with status code {response.status_code}")

example_brands = ['Brand1', 'Brand2', 'Brand3']
example_models = [['Model1-1', 'Model1-2'], ['Model2-1'], ['Model3-1', 'Model3-2', 'Model3-3']]

with open('car_brands_and_models.csv', 'w', newline='', encoding='utf-8') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(['Brand', 'Model'])
    for brand, model_list in zip(example_brands, example_models):
        for model in model_list:
            writer.writerow([brand, model])