import requests
import pandas as pd
import time
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class CarScraper:
    def __init__(self, url, headers, cookies):
        self.url = url
        self.headers = headers
        self.cookies = cookies
        self.all_car_data = []

    def fetch_car_data(self, page_index, retries=3, delay=5):
        data = {
            "q": "",
            "page_index": str(page_index),
            "page_size": "25"
        }

        try:
            logging.info(f"正在爬取第 {page_index} 页...")  # 打印当前页数
            response = requests.post(self.url, headers=self.headers, cookies=self.cookies, data=data, verify=False)

            # 处理 502 错误，重试机制
            if response.status_code == 502 and retries > 0:
                logging.warning(f"502 Error on page {page_index}. Retrying {retries} more times...")
                time.sleep(delay)
                return self.fetch_car_data(page_index, retries=retries - 1, delay=delay)
            elif response.status_code == 200:
                response_data = response.json()
                car_list = response_data.get('car_list', [])
                page_car_data = []

                for car in car_list:
                    name = car.get('name', '')
                    phone_num = car.get('phone_num', '')
                    combined_info = f"{car.get('plate_num', '')}-{car.get('brand', '')}"

                    page_car_data.append({
                        'name': name,
                        'phone_num': phone_num,
                        'combined_info': combined_info
                    })
                return page_car_data
            else:
                logging.error(f"Error on page {page_index}, status code: {response.status_code}")
                return []

        except Exception as e:
            logging.error(f"Exception occurred on page {page_index}: {e}")
            return []

    def fetch_all_car_data(self, total_pages):
        with ThreadPoolExecutor(max_workers=30) as executor:  # 使用 10 个线程
            futures = [executor.submit(self.fetch_car_data, page_index) for page_index in range(1, total_pages + 1)]

            for future in as_completed(futures):
                self.all_car_data.extend(future.result())


    def save_to_excel(self, filename="car_info.xlsx"):
        df = pd.DataFrame(self.all_car_data)
        df.to_excel(filename, index=False, encoding='utf-8')
        logging.info(f"数据已成功导出到 {filename}")

    def scrape(self, total_pages=4410, filename="car_info.xlsx"):
        logging.info(f"开始爬取 {total_pages} 页数据...")
        self.fetch_all_car_data(total_pages)
        self.save_to_excel(filename)
        logging.info("爬取和保存过程完成！")


# 配置请求头和 cookies
headers = {
    "Accept": "*/*",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
    "Origin": "http://xbqc.aichedian.com",
    "Referer": "http://xbqc.aichedian.com/vip/car-list/",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
    "X-Requested-With": "XMLHttpRequest"
}
cookies = {
    "login_phone": "",
    "username": "17798537872",
    "timestamp": "1742522564",
    "token": "5a1af6e0c210d0d78fa23691dcde37f8",
    "chain_id": "11",
    "tt": "9",
    "tc_url": "",
    "is_single": "0",
    "sn": "\"\\\\u5357\\\\u4e0a\\\\u534e\\\\u4e3a\\\\u96e8\\\\u82b1\\\\u8679\\\\u60a6\\\\u57ce\\\\u5e97\"",
    "last_tenant_id": "1",
    "boss": "0",
    "un": "\"\\\\u8679\\\\u60a6\\\\u57ce\"",
    "tc": "\"\\\\u4f7f\\\\u7528\\\\u6280\\\\u5de7\\\\uff1a\\\\u4e00\\\\u4e2a\\\\u4f1a\\\\u5458\\\\u53ef\\\\u4ee5\\\\u7ed1\\\\u5b9a\\\\u591a\\\\u4e2a\\\\u8f66\\\\u724c\\\\uff0c\\\\u9002\\\\u7528\\\\u4e8e\\\\u4f01\\\\u4e1a\\\\u548c\\\\u4e00\\\\u4e2a\\\\u5bb6\\\\u5ead\\\\u6709\\\\u591a\\\\u8f86\\\\u8f66\\\\u7684\\\\u60c5\\\\u51b5\""
}
url = "http://xbqc.aichedian.com/qsearch/api/v1/car_search/"
# 初始化并运行爬虫
scraper = CarScraper(url, headers, cookies)
scraper.scrape(total_pages=4410, filename="car_info.xlsx")
