import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
def fetch_page(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    return response.text


def parse_page(html, date):
    soup = BeautifulSoup(html, 'html.parser')
    brands = []
    for item in soup.select('.rk-item'):
        brand = item.select_one('.rk-car-name').text
        sales = item.select_one('.rk-car-num').text.replace("辆", "")
        # status=item.select_one('.rk-img-box db-i v-al-m').text.find('src')
        brands.append({'brand': brand, 'sales': sales, 'date': date})
    return brands


def clean_data(brands):
    """
    清洗提取的品牌和销量信息。

    参数:
    brands (list): 包含品牌和销量信息的字典列表。

    返回:
    list: 清洗后的品牌和销量信息字典列表。
    """
    for brand in brands:
        brand['sales'] = int(brand['sales'].replace(',', '').strip())  # Remove ',' and convert to integer
    return brands


def save_to_csv(brands, filename):
    """
    将品牌和销量信息保存到 CSV 文件。

    参数:
    brands (list): 包含品牌和销量信息的字典列表。
    filename (str): CSV 文件名。
    """
    df = pd.DataFrame(brands)
    df.to_csv(filename, mode='a', index=False, header=not pd.io.common.file_exists(filename))


if __name__ == '__main__':
    all_brands = []
    for i in range(7, 8):
        date = '2024-0' + str(i)+'-01'
        print("正在爬取:" + str(i)+'月的数据')
        for page in range(1, 13):
            print("正在爬取第"+str(page)+'页')
            url = 'https://car.yiche.com/salesrank/brandsales/?date=' + date + '&page=' + str(page)
            page_content = fetch_page(url)
            brands = parse_page(page_content, date)
            cleaned_brands = clean_data(brands)
            all_brands.extend(cleaned_brands)

    save_to_csv(all_brands, '七月brands_sales.csv')
    print("数据爬取完毕，已保存到csv文件。")
