# 问题：数据爬取问题：直接爬取网页是爬不到数据的，使用浏览器开发者工具打开F12后查找与car-list相关的代码块后
# 在去爬取数据

# 生成图表的问题
import requests
from bs4 import BeautifulSoup
import pandas as pd
from data_utils import process_data
from vis_utils import generate_plots

# 设置请求头，模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
}

def fetch_brands(url):
    """
    提取青岛地区的品牌列表
    """
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')
    
    brands = []#存储品牌信息的列表
    # 查找所有品牌链接
    for brand_item in soup.select('.brand-list a.item'):
        brand_name = brand_item.text.strip()#去除多余的空格或/
        brand_url = brand_item['href']#获取属性href的值
        brands.append({
            '品牌': brand_name,
            '链接': f"https://www.ttpai.cn{brand_url}"  # 拼接完整 URL
        })
    
    return brands

def fetch_car_data(brand_url):
    response = requests.get(brand_url, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')
    
    cars = []
    for item in soup.select('.car-list .item'):
        try:
            # 获取链接
            link = item.select_one('a')['href']
            # 获取车辆描述文本
            text_content = item.select_one('a').text.strip()
            
            #存储一个键值对
            cars.append({
                '链接': link,
                '文本内容': text_content
            })
        except Exception as e:
            print(f"解析某条数据失败: {str(e)}")
    
    return cars

def main():
    # 青岛地区品牌列表页面
    base_url = "https://www.ttpai.cn/qingdao/list"
    
    # 获取品牌列表
    brands = fetch_brands(base_url)
    print(f"成功获取 {len(brands)} 个品牌：")
    for brand in brands:
        print(f"{brand['品牌']} - {brand['链接']}")
    
    all_cars = []
    # 遍历每个品牌的链接，爬取车辆数据
    for brand in brands:
        try:
            print(f"正在爬取品牌 {brand['品牌']} 的数据...")
            cars = fetch_car_data(brand['链接'])
            all_cars.extend(cars)
            print(f"成功爬取 {len(cars)} 条数据")
        except Exception as e:
            print(f"爬取品牌 {brand['品牌']} 数据失败: {str(e)}")
    
    # 转换为 DataFrame 并保存
    # 可以通过列名去访问数据
    df = pd.DataFrame(all_cars)
    if df.empty:
        print("未获取到有效数据！请检查页面结构或分页逻辑。")
        return
    
    df.to_csv("./data/青岛二手车数据.csv", index=False)

    processed_df = process_data(df)
    generate_plots(processed_df)
    print("数据爬取、处理以及可视化完成！")

if __name__ == "__main__":
    main()
