import os
import json
from typing import List

import pandas as pd
from axiom_boot.api.controller import BaseController, post
from axiom_boot.api.models import ApiResponse, success_response
from axiom_boot.di import controller, autowired
from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper import ScraperEngine, Target
from axiom_boot.core.exceptions import BusinessException
from axiom_boot.task.task_manager import TaskManager
from src.scraper.models.async_task_status_vo import AsyncTaskStatusVO
from src.scraper.actions.made_in_china_actions import perform_search, close_popups_if_present
from src.scraper.extractors.made_in_china_extractor.about_us_extractor import AboutUsExtractor
from src.scraper.extractors.made_in_china_extractor.contact_extractor import ContactExtractor
from src.scraper.extractors.made_in_china_extractor.homepage_extractor import HomepageExtractor
from src.scraper.extractors.made_in_china_extractor.list_extractor import ListExtractor
from src.scraper.models.SupplierItem import SupplierItem
from src.tasks.scraper_jobs import discover_suppliers_task, scrape_single_supplier_task

logger = get_logger(__name__)


@controller("/made-in-china", tags=["中国制造网爬虫"])
class MadeInChinaController(BaseController):

    def __init__(
        self, 
        engine: ScraperEngine = autowired(),
        task_manager: TaskManager = autowired()
    ):
        self._engine = engine
        self._task_manager = task_manager

    @post(
        "/batch-start",
        summary="【异步】启动生产者-消费者批量爬取任务",
        response_model=ApiResponse[AsyncTaskStatusVO]
    )
    async def batch_start_task(self) -> ApiResponse:
        """
        将一个【生产者】任务提交到后台，该任务会发现所有供应商并为它们分发独立的【消费者】任务。
        API会立即返回生产者的任务ID。
        """
        logger.info("接收到生产者-消费者批量爬取任务请求...")
        
        task_function_name = discover_suppliers_task.__name__
        task_display_name = "scraper.discover_suppliers"
        
        task_id = await self._task_manager.submit_task(task_function_name)
        
        if not task_id:
            raise BusinessException("生产者任务提交失败，未能获取到任务ID。")
            
        logger.info(f"生产者任务已成功入队，任务ID: {task_id}")
        
        task_vo = AsyncTaskStatusVO(
            task_id=task_id,
            task_name=task_display_name,
            status="queued"
        )
        
        return success_response(data=task_vo)
        
    @post(
        "/batch-aggregate",
        summary="【聚合】将所有抓取的JSON结果合并为单个Excel文件",
        response_model=ApiResponse[str]
    )
    def batch_aggregate_results(self) -> ApiResponse[str]:
        """
        在所有消费者任务完成后，调用此接口来合并结果。
        """
        json_dir = os.path.join("storage", "scraper_results", "json")
        if not os.path.isdir(json_dir):
            raise BusinessException(f"结果目录 {json_dir} 不存在。")

        all_suppliers: List[SupplierItem] = []
        for filename in os.listdir(json_dir):
            if filename.endswith(".json"):
                file_path = os.path.join(json_dir, filename)
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                        all_suppliers.append(SupplierItem.model_validate(data))
                except (json.JSONDecodeError, IOError) as e:
                    logger.error(f"读取或解析JSON文件 {file_path} 时出错: {e}")
                    continue
        
        if not all_suppliers:
            raise BusinessException("在结果目录中没有找到任何有效的JSON文件。")

        output_dir = os.path.join("storage", "scraper_results")
        excel_path = os.path.join(output_dir, "shandong_suppliers_aggregated.xlsx")
        
        self._save_to_excel(all_suppliers, excel_path)
        
        # 清理临时的JSON文件
        for filename in os.listdir(json_dir):
            os.remove(os.path.join(json_dir, filename))
        os.rmdir(json_dir)
            
        return success_response(data=f"成功聚合 {len(all_suppliers)} 个供应商数据到 {excel_path}")

    @post(
        "/start",
        summary="【同步】启动中国制造网三阶段爬取任务（用于调试）",
        response_model=ApiResponse[str]
    )
    async def start_task(self) -> ApiResponse:
        """
        一个统一的、严格按照三阶段流程执行的爬虫任务入口。
        1. 阶段一：从列表页获取供应商主页。
        2. 阶段二：从供应商主页获取详情页链接。
        3. 阶段三：从详情页抓取完整信息。
        """
        logger.info("启动三阶段爬取任务...")

        # --- 统一配置 ---
        cookie_path = os.path.join("src", "scraper", "extractors", "made_in_china_extractor", "cookie.json")
        try:
            with open(cookie_path, 'r', encoding='utf-8') as f:
                cookies = json.load(f)
        except (FileNotFoundError, json.JSONDecodeError) as e:
            raise BusinessException(f"无法加载 Cookie: {e}")

        # === 阶段一: 搜索并提取第一个供应商的主页链接 ===
        logger.info("【阶段一】开始: 从列表页提取供应商主页链接。")
        list_target = Target(
            url="https://www.made-in-china.com/",
            downloader="playwright",
            extractor=ListExtractor(),
            metadata={
                "debug": True, "cookies": cookies, "search_keyword": "shandong", "search_type": "supplier",
                "post_load_actions": [close_popups_if_present, perform_search]
            }
        )
        
        list_items = await self._engine.scrape(list_target)
        if not list_items:
            raise BusinessException("【阶段一】失败: 未能从列表页提取到任何供应商。")
        
        base_item = list_items[0]
        if not isinstance(base_item, SupplierItem) or not base_item.company_url:
            raise BusinessException("【阶段一】失败: 列表提取器返回了无效的数据。")

        logger.info(f"【阶段一】成功: 提取到供应商 '{base_item.company_name}' 的主页链接: {base_item.company_url}")

        # === 阶段二: 从供应商主页提取 "About Us" 和 "Contact Us" 的真实链接 ===
        logger.info(f"【阶段二】开始: 从主页 {base_item.company_url} 提取详情页链接。")
        homepage_target = Target(
            url=base_item.company_url,
            downloader="playwright",
            extractor=HomepageExtractor(),
            metadata={"debug": False, "cookies": cookies, "item": base_item}
        )
        
        await self._engine.scrape(homepage_target)

        if not base_item.company_url or not base_item.contact_url:
             raise BusinessException(f"【阶段二】失败: 未能在主页 {base_item.company_url} 上找到关于或联系我们页面的链接。")
        
        logger.info(f"【阶段二】成功: 'About Us' -> {base_item.company_url}, 'Contact Us' -> {base_item.contact_url}")

        # === 阶段三: 串行抓取两个详情页的完整信息 ===
        logger.info("【阶段三】开始: 抓取两个详情页的完整信息。")
        
        final_item = base_item
        
        if final_item.company_url:
            logger.info(f"正在抓取“关于我们”页面: {final_item.company_url}")
            about_us_target = Target(
                url=final_item.company_url,
                downloader="playwright",
                extractor=AboutUsExtractor(),
                metadata={"debug": False, "cookies": cookies, "item": final_item}
            )
            await self._engine.scrape(about_us_target)

        if final_item.contact_url:
            logger.info(f"正在抓取“联系我们”页面: {final_item.contact_url}")
            contact_target = Target(
                url=final_item.contact_url,
                downloader="playwright",
                extractor=ContactExtractor(),
                metadata={"debug": False, "cookies": cookies, "item": final_item}
            )
            await self._engine.scrape(contact_target)
            
        logger.info(f"【阶段三】成功: 已完成对 {final_item.company_name} 的全量信息抓取。")
        
        output_dir = os.path.join("storage", "scraper_results")
        safe_company_name = "".join(c for c in final_item.company_name if c.isalnum() or c in (' ', '_')).rstrip()
        file_name = f"{safe_company_name}.xlsx"
        file_path = os.path.join(output_dir, file_name)
        
        self._save_to_excel([final_item], file_path)
        
        return success_response(data=f"数据已成功保存至: {file_path}")

    def _save_to_excel(self, suppliers: List[SupplierItem], file_path: str):
        """
        将供应商数据列表保存到 Excel 文件。

        Args:
            suppliers: SupplierItem 对象的列表。
            file_path: Excel 文件的保存路径。
        """
        if not suppliers:
            return

        column_mapping = {
            # 基础信息
            "company_name": "公司名称",
            "is_audited_supplier": "认证供应商",
            "business_type": "业务类型",
            "main_products": "主要产品",
            "year_of_establishment": "成立年份",
            "number_of_employees": "员工人数",
            "registered_capital": "注册资本",
            "plant_area": "厂房面积",

            # 联系信息
            "contact_person_name": "联系人",
            "contact_person_title": "职位",
            "telephone": "电话",
            "mobile_phone": "手机",
            "address": "地址",
            "company_url": "公司主页",
            "contact_url": "联系页面",

            # 贸易能力
            "main_markets": "主要市场",
            "number_of_foreign_trading_staff": "外贸员工人数",
            "export_year": "出口年份",
            "nearest_port": "最近港口",
            "incoterms": "贸易术语",
            "terms_of_payment": "支付条款",
            "average_lead_time": "平均交货期",
            "repeat_buyers_choice": "回头客占比",

            # 生产能力
            "production_lines": "生产线数量",
            "r_and_d_engineers": "研发工程师人数",
            "oem_service_available": "提供OEM服务",
            "odm_service_available": "提供ODM服务",
            "production_machines": "生产设备",
            
            # 认证与其他
            "management_system_certification": "管理体系认证",
            "cooperated_with_fortune_500": "与500强合作",
        }

        data = [s.model_dump() for s in suppliers]
        df = pd.DataFrame(data)

        ordered_columns = [key for key in column_mapping.keys() if key in df.columns]
        df = df[ordered_columns]
        
        df.rename(columns=column_mapping, inplace=True)

        output_dir = os.path.dirname(file_path)
        os.makedirs(output_dir, exist_ok=True)
        
        df.to_excel(file_path, index=False, engine='openpyxl')
        logger.info(f"成功将 {len(suppliers)} 个供应商数据保存到 Excel 文件: {file_path}")