# 由于 che168.com 网站的反爬虫机制过于复杂, 导致爬取效率极低, 故放弃爬取该网站的数据
import requests
from lxml import etree
from bs4 import BeautifulSoup
import pymongo
import time  # 新增导入time模块
import random  # 新增导入random模块

headers_list = [
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
]

def fetch_page_content(page_number):
    url = f"https://www.che168.com/china/a0_0msdgscncgpi1lto8csp{page_number}exx0/?pvareaid=102179#currengpostion"
    headers = random.choice(headers_list)
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        return response.text
    else:
        print(f"Failed to retrieve page {page_number}")
        return None

def parse_page_content(html_content):
    tree = etree.HTML(html_content)
    links = []
    # 使用 XPATH 获取每台二手车的网址
    for i in range(1, 57):  # 每页有57辆车
        element = tree.xpath(f'/html/body/div[12]/div[1]/ul/li[{i}]/a/@href')
        if element:
            links.append("https://www.che168.com/" + element[0])  # 修复URL拼接错误
    return links

def get_photo(photo_url):
    headers = random.choice(headers_list)
    response = requests.get(photo_url, headers=headers)
    if response.status_code == 200:
        return response.content
    else:
        print(f"Failed to retrieve photo {photo_url}")
        return None

def parse_car_detail(links):
    client = pymongo.MongoClient("mongodb://120.53.220.118:27000/")
    db = client["car_database"]
    collection = db["che168_car_collection"]

    for link in links:  # 处理所有链接
        headers = random.choice(headers_list)
        response = requests.get(link, headers=headers)
        retry_count = 0
        while response.status_code != 200 and retry_count < 3:
            print(f"Retrying to fetch link: {link}")
            time.sleep(1)
            headers = random.choice(headers_list)
            response = requests.get(link, headers=headers)
            retry_count += 1
        if response.status_code == 200:
            html_content = response.text
            # tree = etree.HTML(html_content)
            soup = BeautifulSoup(html_content, 'html.parser')
            # 获取车辆详细信息
            brand_element = soup.select_one('h3.car-brand-name')
            retry_delay = 1
            while not brand_element:
                print(f"Retrying to fetch brand for link: {link}, waiting for {min(retry_delay, 10)} seconds")
                time.sleep(min(retry_delay, 10))  # 每次重试间隔递增，但不超过10秒
                headers = random.choice(headers_list)
                response = requests.get(link, headers=headers)
                if response.status_code == 200:
                    html_content = response.text
                    soup = BeautifulSoup(html_content, 'html.parser')
                    brand_element = soup.select_one('h3.car-brand-name')
                retry_delay *= 2  # 每次重试间隔时间加倍
            brand_bs = brand_element.text.strip()
            print(f"Brand (BeautifulSoup): {brand_bs}")
            # year = tree.xpath('//span[@class="car-year"]/text()')[0].strip()
            # mileage = tree.xpath('//span[@class="car-mileage"]/text()')[0].strip()
            # color = tree.xpath('//span[@class="car-color"]/text()')[0].strip()
            # price = tree.xpath('//span[@class="car-price"]/text()')[0].strip() + " 万"
            # # 获取车辆配置信息
            # configuration = tree.xpath('//span[@class="car-configuration"]/text()')[0].strip()
            # condition_description = tree.xpath('//span[@class="car-condition-description"]/text()')[0].strip()
            # photo_url = tree.xpath('//div[@class="car-photo"]/img/@src')[0]
            # photo = get_photo(photo_url)

            # car_data = {
            #     "Brand": brand_bs,
            #     "Year": year,
            #     "Mileage": mileage,
            #     "Color": color,
            #     "Price": price,
            #     "Configuration": configuration,
            #     "ConditionDescription": condition_description,
            #     "Photo": photo
            # }

            # collection.insert_one(car_data)
            # print(f"Inserted car data for Brand: {brand_bs}, Year: {year}")
        time.sleep(2)  # 每个车辆间隔1秒

def main():
    total_cars = 0
    page_number = 1
    while total_cars < 57:
        html_content = fetch_page_content(page_number)
        if html_content:
            # 获取当前页面中每个车辆的详细链接
            links = parse_page_content(html_content)
            if not links:
                continue  # 如果links为空则重新执行
            # 处理每个车辆的详细链接
            parse_car_detail(links)
            total_cars += len(links)
            # print(f"Total cars processed: {total_cars}")
        page_number += 1

if __name__ == "__main__":
    main()
