import re
import time
import random
import csv
import logging
import os
import json
from typing import Dict, Optional, List
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

# -------------------------- 初始化配置与目录 --------------------------
RECORD_DIR = "records"
if not os.path.exists(RECORD_DIR):
    os.makedirs(RECORD_DIR)
    print(f"📂 已创建数据保存目录：{os.path.abspath(RECORD_DIR)}")

# 日志配置
logging.basicConfig(
    filename=os.path.join(RECORD_DIR, 'crawl_errors.log'),
    level=logging.ERROR,
    format='%(asctime)s - %(levelname)s - %(message)s',
    encoding='utf-8'
)
MISSING_DATA_LOG = os.path.join(RECORD_DIR, 'missing_core_data.log')
USED_IDS_FILE = os.path.join(RECORD_DIR, 'used_ids.txt')  # 随机ID去重池

# -------------------------- 全局配置 --------------------------
TARGET_COUNT = 10000  # 目标总数据量
BATCH_SIZE = 1000  # 每1000条保存一个CSV文件（核心调整）
RETRY_LIMIT = 3  # 网络请求重试次数
MIN_DELAY = 1  # 最小请求延迟（秒）
MAX_DELAY = 3  # 最大请求延迟（秒）
ID_RETRY_LIMIT = 10  # 随机ID生成重试次数（防冲突）

# 懂车帝接口配置
BASE_URL = "https://www.dongchedi.com/motor/pc/car/brand/select_series_v2"
PARAMS = {'aid': 1839, 'app_name': 'auto_web_pc'}
CITY_NAME = "全国"
PAGE_LIMIT = 30

# 断点与记录文件
CHECKPOINT_FILE = os.path.join(RECORD_DIR, "crawl_checkpoint.txt")
BATCH_RECORD_FILE = os.path.join(RECORD_DIR, "last_batch.txt")

# CSV表头配置
CSV_HEADERS = [
    "id",  # 8位随机唯一ID
    "品牌ID", "品牌名称", "系列名称",
    "车型ID", "车型名称", "车型价格",
    "厂商", "级别", "能源类型", "上市时间",
    "车辆配置"  # JSON格式配置字段
]

# 需封装为JSON的配置字段
JSON_FIELDS = [
    "发动机型号", "排量(L)", "最大马力(Ps)", "最大功率(kW)", "最大扭矩(N·m)",
    "变速箱类型", "挡位个数", "车身结构", "长(mm)", "宽(mm)", "高(mm)",
    "轴距(mm)", "驱动方式", "前悬架类型", "后悬架类型", "助力类型", "车体结构"
]

# 车型核心信息备用解析规则（防class变更）
CAR_NAME_CLASSES = ['cell_car__28WzZ', 'car-name', 'auto-title']
CAR_PRICE_CLASSES = ['cell_price__1jlTy', 'price-box', 'car-price']


# -------------------------- 错误处理函数 --------------------------
def log_error(error_type: str, message: str, suggestion: str, scope: str):
    full_message = (
        f"\n【错误类型】{error_type}\n"
        f"【具体信息】{message}\n"
        f"【影响范围】{scope}\n"
        f"【解决建议】{suggestion}\n"
    )
    print(full_message)
    logging.error(full_message)


def log_missing_core_data(car_id: int, missing_fields: list):
    message = f"车型ID={car_id} 缺失核心字段：{','.join(missing_fields)}\n"
    with open(MISSING_DATA_LOG, 'a', encoding='utf-8') as f:
        f.write(message)
    print(f"⚠️  已记录缺失数据：车型ID={car_id} 缺失{missing_fields}")


# -------------------------- 随机ID生成与去重 --------------------------
def load_used_ids() -> set:
    """加载历史ID去重池"""
    if not os.path.exists(USED_IDS_FILE):
        return set()
    try:
        with open(USED_IDS_FILE, 'r', encoding='utf-8') as f:
            return set(int(line.strip()) for line in f if line.strip().isdigit())
    except Exception as e:
        log_error(
            error_type="ID记录错误",
            message=f"加载历史ID失败：{str(e)}",
            scope="随机ID唯一性校验",
            suggestion="删除used_ids.txt后重新运行（可能少量重复）"
        )
        return set()


def save_used_ids(used_ids: set):
    """追加保存新生成的ID到去重池"""
    try:
        with open(USED_IDS_FILE, 'a', encoding='utf-8') as f:
            for id_num in used_ids:
                f.write(f"{id_num}\n")
    except Exception as e:
        log_error(
            error_type="ID记录错误",
            message=f"保存新ID失败：{str(e)}",
            scope="随机ID唯一性",
            suggestion="检查records目录写入权限"
        )


def generate_unique_id(used_ids: set) -> Optional[int]:
    """生成8位随机唯一ID（10000000-99999999）"""
    for _ in range(ID_RETRY_LIMIT):
        new_id = random.randint(10000000, 99999999)
        if new_id not in used_ids:
            used_ids.add(new_id)
            return new_id
    log_error(
        error_type="ID生成失败",
        message=f"连续{ID_RETRY_LIMIT}次ID冲突",
        scope="当前车型",
        suggestion="稍候重试或删除used_ids.txt"
    )
    return None


# -------------------------- 工具函数 --------------------------
def get_random_headers():
    """生成随机请求头（防反爬）"""
    try:
        ua = UserAgent()
        return {
            "User-Agent": ua.random,
            "Referer": "https://www.dongchedi.com/auto",
            "Accept-Language": "zh-CN,zh;q=0.9"
        }
    except Exception as e:
        log_error(
            error_type="依赖错误",
            message=f"UserAgent生成失败：{str(e)}",
            scope="请求伪装",
            suggestion="1. 升级fake_useragent：pip install --upgrade fake_useragent\n2. 改用固定UA"
        )
        return {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
            "Referer": "https://www.dongchedi.com/auto"
        }


def fetch_car_series(page: int) -> Optional[Dict]:
    """获取品牌车型列表（带重试）"""
    for retry in range(RETRY_LIMIT):
        try:
            time.sleep(random.uniform(MIN_DELAY, MAX_DELAY))
            response = requests.post(
                url=BASE_URL,
                params=PARAMS,
                data={'sort_new': 'hot_desc', 'city_name': CITY_NAME, 'limit': PAGE_LIMIT, 'page': page},
                headers=get_random_headers(),
                timeout=20
            )
            if response.status_code in [403, 429]:
                log_error(
                    error_type="反爬拦截",
                    message=f"列表请求被拦（状态码：{response.status_code}）",
                    scope=f"第{page}页数据",
                    suggestion="1. 暂停10-30分钟\n2. 增大延迟\n3. 换网络"
                )
                return None
            response.raise_for_status()
            return response.json()
        except requests.Timeout:
            if retry == RETRY_LIMIT - 1:
                log_error(
                    error_type="网络超时",
                    message=f"列表请求超时（{RETRY_LIMIT}次重试）",
                    scope=f"第{page}页",
                    suggestion="1. 检查网络\n2. 延长超时至30秒"
                )
                return None
        except Exception as e:
            if retry == RETRY_LIMIT - 1:
                log_error(
                    error_type="请求错误",
                    message=f"列表请求异常：{str(e)}",
                    scope=f"第{page}页",
                    suggestion="查看错误日志"
                )
                return None


def extract_config_info(soup: BeautifulSoup) -> Dict:
    """提取车型配置信息（扁平化）"""
    try:
        flat_config = {}
        config_blocks = soup.find_all('div', attrs={'name': re.compile(r'config-body-\d+')})
        for block in config_blocks:
            block_title = block.find('h3', class_='cell_title__1COfA')
            if not block_title:
                continue
            for row in block.find_all('div', class_='table_row__yVX1h'):
                label_elem = row.find('label', class_='cell_label__ZtXlw')
                value_elem = row.find('div', class_='cell_normal__37nRi')
                if label_elem and value_elem:
                    label = label_elem.text.strip()
                    value = value_elem.text.strip()
                    if label == "长*宽*高(mm)" and '×' in value:
                        length, width, height = value.split('×')[:3]
                        flat_config["长(mm)"] = length.strip()
                        flat_config["宽(mm)"] = width.strip()
                        flat_config["高(mm)"] = height.strip()
                    else:
                        flat_config[label] = value
        return flat_config
    except Exception as e:
        log_error(
            error_type="配置解析错误",
            message=f"提取配置失败：{str(e)}",
            scope="当前车型配置",
            suggestion="检查页面结构"
        )
        return {}


def find_element_with_backup(soup: BeautifulSoup, tag: str, class_list: list) -> Optional[BeautifulSoup]:
    """多class备用查找（防页面变更）"""
    for class_name in class_list:
        elem = soup.find(tag, class_=class_name)
        if elem:
            return elem
    return None


def fetch_car_detail(car_id: int) -> Optional[Dict]:
    """获取车型详情（带重试）"""
    detail_url = f"https://www.dongchedi.com/auto/params-carIds-{car_id}"
    for retry in range(RETRY_LIMIT):
        try:
            time.sleep(random.uniform(MIN_DELAY, MAX_DELAY))
            response = requests.get(
                url=detail_url,
                headers=get_random_headers(),
                timeout=20
            )
            if response.status_code in [403, 429]:
                log_error(
                    error_type="反爬拦截",
                    message=f"详情请求被拦（ID：{car_id}）",
                    scope=f"车型ID={car_id}",
                    suggestion="1. 暂停爬取\n2. 换网络"
                )
                return None
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')

            # 提取核心信息（备用class）
            car_name_elem = find_element_with_backup(soup, 'a', CAR_NAME_CLASSES)
            car_price_elem = find_element_with_backup(soup, 'span', CAR_PRICE_CLASSES)
            missing_fields = []
            if not car_name_elem:
                missing_fields.append("车型名称")
            if not car_price_elem:
                missing_fields.append("车型价格")
            if missing_fields:
                log_missing_core_data(car_id, missing_fields)
                return None

            # 提取配置并返回
            flat_config = extract_config_info(soup)
            return {
                "车型ID": car_id,
                "车型名称": car_name_elem.text.strip(),
                "车型价格": car_price_elem.text.strip(), **flat_config
            }
        except Exception as e:
            if retry == RETRY_LIMIT - 1:
                log_error(
                    error_type="详情请求错误",
                    message=f"详情获取失败（ID：{car_id}）：{str(e)}",
                    scope=f"车型ID={car_id}",
                    suggestion="手动访问URL确认"
                )
                return None


# -------------------------- 断点与文件操作 --------------------------
def get_last_batch_number() -> int:
    """获取最后批次号（断点续爬用）"""
    try:
        with open(BATCH_RECORD_FILE, 'r', encoding='utf-8') as f:
            return int(f.read().strip())
    except (FileNotFoundError, ValueError):
        return 0


def save_last_batch_number(batch_num: int):
    """保存当前批次号"""
    try:
        with open(BATCH_RECORD_FILE, 'w', encoding='utf-8') as f:
            f.write(str(batch_num))
    except Exception as e:
        log_error(
            error_type="批次记录错误",
            message=f"保存批次号失败：{str(e)}",
            scope="断点续爬",
            suggestion="检查records目录权限"
        )


def save_checkpoint(page: int, current_count: int):
    """保存爬取断点（页码+总条数）"""
    try:
        with open(CHECKPOINT_FILE, 'w', encoding='utf-8') as f:
            f.write(f"{page},{current_count}")
    except Exception as e:
        log_error(
            error_type="断点保存错误",
            message=f"保存断点失败：{str(e)}",
            scope="断点续爬",
            suggestion="检查records目录权限"
        )


def load_checkpoint() -> tuple:
    """加载爬取断点"""
    try:
        with open(CHECKPOINT_FILE, 'r', encoding='utf-8') as f:
            page, count = f.read().strip().split(',')
            return int(page), int(count)
    except (FileNotFoundError, ValueError):
        return 1, 0


def write_batch_to_csv(data_batch: List[Dict], batch_num: int, new_ids: set):
    """写入千条数据到CSV并保存新ID"""
    filename = f"batch_{batch_num:03d}.csv"
    filepath = os.path.join(RECORD_DIR, filename)
    try:
        with open(filepath, 'w', newline='', encoding='utf-8-sig') as f:
            writer = csv.DictWriter(f, fieldnames=CSV_HEADERS)
            writer.writeheader()
            for item in data_batch:
                writer.writerow(item)
        # 保存本批次新ID到去重池
        save_used_ids(new_ids)
        print(f"\n✅ 批次{batch_num}保存完成：{filepath}（{len(data_batch)}条数据，{len(new_ids)}个新ID）")
        return True
    except PermissionError:
        log_error(
            error_type="权限错误",
            message=f"无权限写入：{filepath}",
            scope=f"批次{batch_num}",
            suggestion="1. 关闭已打开的CSV\n2. 检查目录权限"
        )
        return False
    except Exception as e:
        log_error(
            error_type="CSV写入错误",
            message=f"写入批次{batch_num}失败：{str(e)}",
            scope=f"批次{batch_num}",
            suggestion="检查数据特殊字符"
        )
        return False


# -------------------------- 主函数（核心流程） --------------------------
def main():
    print(f"🚀 懂车帝车型爬虫启动（每{BATCH_SIZE}条保存1个CSV，实时显示ID）")
    print(f"🎯 目标：{TARGET_COUNT}条数据，ID格式：8位随机唯一整数")

    # 加载断点、批次、历史ID
    start_page, current_total = load_checkpoint()
    last_batch = get_last_batch_number()
    current_batch_num = last_batch + 1
    used_ids = load_used_ids()  # 全局ID去重池
    current_batch_data = []  # 本批次数据容器
    current_batch_ids = set()  # 本批次新ID容器

    # 断点续爬提示
    if current_total > 0:
        print(
            f"🔄 断点续爬：第{start_page}页 | 累计{current_total}条 | 批次{current_batch_num} | 历史ID{len(used_ids)}个")

    page = start_page
    try:
        while current_total < TARGET_COUNT:
            print(f"\n📄 正在爬取第{page}页品牌列表...")
            series_data = fetch_car_series(page)

            # 检查列表数据有效性
            if not series_data or "data" not in series_data or "series" not in series_data["data"]:
                log_error(
                    error_type="数据无效",
                    message=f"第{page}页无有效品牌数据",
                    scope=f"第{page}页",
                    suggestion="检查接口或稍候重试"
                )
                page += 1
                save_checkpoint(page, current_total)
                continue

            # 遍历品牌
            for brand in series_data["data"]["series"]:
                if current_total >= TARGET_COUNT:
                    break

                brand_id = brand.get("id", "N/A")
                brand_name = brand.get("brand_name", "未知品牌")
                outter_name = brand.get("outter_name", "未知系列")
                car_ids = brand.get("car_ids", [])

                if not car_ids:
                    print(f"⚠️  品牌「{brand_name}」无车型数据，跳过")
                    continue

                # 遍历车型
                for car_id in car_ids:
                    if current_total >= TARGET_COUNT:
                        break

                    # 生成8位随机唯一ID
                    current_id = generate_unique_id(used_ids)
                    if not current_id:
                        print(f"   ❌ 车型ID[{car_id}]：ID生成失败，跳过")
                        continue
                    current_id_str = str(current_id)
                    current_batch_ids.add(current_id)

                    # 获取车型详情
                    print(
                        f"   🚗 正在爬取：车型ID[{car_id}]（总进度：{current_total}/{TARGET_COUNT} | 批次进度：{len(current_batch_data)}/{BATCH_SIZE}）")
                    car_detail = fetch_car_detail(car_id)

                    if car_detail:
                        # 构建车辆配置JSON
                        json_config = {}
                        for field in JSON_FIELDS:
                            json_config[field] = car_detail.get(field, "N/A")

                        # 组装完整数据
                        full_data = {
                            "id": current_id_str,
                            "品牌ID": brand_id,
                            "品牌名称": brand_name,
                            "系列名称": outter_name,
                            "车型ID": car_detail["车型ID"],
                            "车型名称": car_detail["车型名称"],
                            "车型价格": car_detail["车型价格"],
                            "厂商": car_detail.get("厂商", "N/A"),
                            "级别": car_detail.get("级别", "N/A"),
                            "能源类型": car_detail.get("能源类型", "N/A"),
                            "上市时间": car_detail.get("上市时间", "N/A"),
                            "车辆配置": json.dumps(json_config, ensure_ascii=False)
                        }

                        # 加入批次容器并更新进度
                        current_batch_data.append(full_data)
                        current_total += 1

                        # 实时打印ID（核心需求）
                        print(
                            f"   ✅ 爬取成功：ID={current_id_str} | 车型={car_detail['车型名称']} | 价格={car_detail['车型价格']}")

                        # 检查是否达到千条批次
                        if len(current_batch_data) >= BATCH_SIZE:
                            write_batch_to_csv(current_batch_data, current_batch_num, current_batch_ids)
                            save_last_batch_number(current_batch_num)
                            # 重置批次容器
                            current_batch_data = []
                            current_batch_ids = set()
                            current_batch_num += 1
                            save_checkpoint(page, current_total)

            # 页码递增并保存断点
            page += 1
            save_checkpoint(page, current_total)

        # 处理最后不足千条的剩余数据
        if current_batch_data:
            write_batch_to_csv(current_batch_data, current_batch_num, current_batch_ids)
            save_last_batch_number(current_batch_num)

        # 爬取完成提示
        print(f"\n🎉 爬取任务完成！")
        print(
            f"📊 最终统计：共{current_total}条数据 | {current_batch_num}个CSV文件 | 保存路径：{os.path.abspath(RECORD_DIR)}")
        print(f"🔑 ID唯一性：累计生成{len(used_ids)}个8位随机唯一ID")

    # 手动中断处理
    except KeyboardInterrupt:
        if current_batch_data:
            write_batch_to_csv(current_batch_data, current_batch_num, current_batch_ids)
            save_last_batch_number(current_batch_num)
        save_checkpoint(page, current_total)
        print(f"\n⏸️  手动中断爬取，已保存当前进度：")
        print(f"   - 累计数据：{current_total}条")
        print(f"   - 最后批次：{current_batch_num}（未完成）")
        print(f"   - 最后ID：{list(current_batch_ids)[-1] if current_batch_ids else '无'}")

    # 意外崩溃处理
    except Exception as e:
        log_error(
            error_type="爬虫崩溃",
            message=f"主程序意外终止：{str(e)}",
            scope="全局",
            suggestion="查看crawl_errors.log获取详细堆栈"
        )


# -------------------------- 入口执行 --------------------------
if __name__ == "__main__":
    # 自动安装依赖
    try:
        from fake_useragent import UserAgent
    except ImportError:
        print("⚠️  检测到缺失依赖，正在自动安装...")
        import subprocess

        result = subprocess.run(
            ["pip", "install", "fake_useragent", "requests", "beautifulsoup4"],
            capture_output=True, text=True
        )
        if result.returncode != 0:
            log_error(
                error_type="依赖安装失败",
                message=f"安装命令输出：{result.stderr}",
                scope="全局",
                suggestion="手动执行：pip install fake_useragent requests beautifulsoup4"
            )
            exit(1)
        from fake_useragent import UserAgent

    # 启动主程序
    main()