import os
import json
from typing import List

import pandas as pd

from axiom_boot.task import task
from axiom_boot.scraper import ScraperEngine, Target
from axiom_boot.logging.setup import get_logger
from axiom_boot.core.exceptions import BusinessException
from axiom_boot.task.task_manager import TaskManager

from src.scraper.actions.made_in_china_actions import close_popups_if_present
from src.scraper.extractors.made_in_china_extractor.about_us_extractor import AboutUsExtractor
from src.scraper.extractors.made_in_china_extractor.contact_extractor import ContactExtractor
from src.scraper.extractors.made_in_china_extractor.homepage_extractor import HomepageExtractor
from src.scraper.extractors.made_in_china_extractor.list_extractor import ListExtractor
from src.scraper.models.SupplierItem import SupplierItem


logger = get_logger(__name__)


@task(name="scraper.scrape_single_supplier", timeout=600)  # 为单个供应商设置10分钟的超时
async def scrape_single_supplier_task(
    item_dict: dict,
    # 依赖由 @task 装饰器自动注入
    engine: ScraperEngine,
):
    """
    【消费者】任务：处理单个供应商的详细信息抓取，并将结果保存为独立的JSON文件。
    """
    base_item = SupplierItem.model_validate(item_dict)
    logger.info(f"【消费者】开始处理: {base_item.company_name}")

    cookie_path = os.path.join("src", "scraper", "extractors", "made_in_china_extractor", "cookie.json")
    try:
        with open(cookie_path, 'r', encoding='utf-8') as f:
            cookies = json.load(f)
    except (FileNotFoundError, json.JSONDecodeError) as e:
        logger.error(f"无法为供应商 {base_item.company_name} 加载 Cookie，任务中止: {e}")
        return

    try:
        # --- 阶段二: 从主页提取详情页链接 ---
        homepage_target = Target(
            url=base_item.company_url, downloader="playwright", extractor=HomepageExtractor(),
            metadata={"debug": False, "cookies": cookies, "item": base_item}
        )
        await engine.scrape(homepage_target)
        
        if not base_item.company_url or not base_item.contact_url:
            logger.warning(f"【阶段二】失败: 未能在主页 {base_item.company_url} 上找到关于或联系我们页面的链接。")
            return
        
        # --- 阶段三: 抓取详情页完整信息 ---
        final_item = base_item
        if final_item.company_url:
            await engine.scrape(Target(
                url=final_item.company_url, downloader="playwright", extractor=AboutUsExtractor(),
                metadata={"debug": False, "cookies": cookies, "item": final_item}
            ))
        if final_item.contact_url:
            await engine.scrape(Target(
                url=final_item.contact_url, downloader="playwright", extractor=ContactExtractor(),
                metadata={"debug": False, "cookies": cookies, "item": final_item}
            ))
        
        logger.info(f"【阶段三】成功完成对 {final_item.company_name} 的抓取。")

        # --- 保存成果 ---
        output_dir = os.path.join("storage", "scraper_results", "json")
        os.makedirs(output_dir, exist_ok=True)
        safe_company_name = "".join(c for c in final_item.company_name if c.isalnum()).rstrip()
        file_path = os.path.join(output_dir, f"{safe_company_name}.json")
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(final_item.model_dump(), f, ensure_ascii=False, indent=4)
        logger.info(f"【消费者】成功: {final_item.company_name} 的数据已保存至 {file_path}")

    except Exception as e:
        logger.error(f"处理供应商 {base_item.company_name} 时发生意外错误: {e}", exc_info=True)


@task(name="scraper.discover_suppliers", timeout=3600)  # 1小时足以完成发现任务
async def discover_suppliers_task(
    # 依赖由 @task 装饰器自动注入
    engine: ScraperEngine,
    task_manager: TaskManager
):
    """
    【生产者】任务：遍历所有列表页，发现所有供应商，并为每个供应商分发一个独立的【消费者】任务。
    """
    logger.info("【生产者】启动供应商发现任务...")

    cookie_path = os.path.join("src", "scraper", "extractors", "made_in_china_extractor", "cookie.json")
    try:
        with open(cookie_path, 'r', encoding='utf-8') as f:
            cookies = json.load(f)
    except (FileNotFoundError, json.JSONDecodeError) as e:
        logger.error(f"无法加载 Cookie，生产者任务中止: {e}")
        raise

    SEARCH_KEYWORD = "shandong"
    TOTAL_PAGES = 40
    total_enqueued = 0

    for page_num in range(1, TOTAL_PAGES + 1):
        try:
            logger.info(f"【生产者】正在扫描列表页: {page_num}/{TOTAL_PAGES}")
            list_url = f"https://www.made-in-china.com/company-search/{SEARCH_KEYWORD}/C1/{page_num}.html"
            list_target = Target(
                url=list_url, downloader="playwright", extractor=ListExtractor(),
                metadata={"debug": False, "cookies": cookies}
            )
            list_items = await engine.scrape(list_target)

            if not list_items:
                logger.warning(f"列表页 {page_num} 未发现任何供应商，可能已达末页。")
                break

            logger.info(f"列表页 {page_num} 发现 {len(list_items)} 个供应商，正在分发【消费者】任务...")
            for item in list_items:
                if isinstance(item, SupplierItem) and item.company_url:
                    await task_manager.submit_task(
                        "scrape_single_supplier_task",
                        item_dict=item.model_dump()
                    )
                    total_enqueued += 1
                else:
                    logger.warning(f"发现无效的供应商条目，已跳过: {item}")
        except Exception as e:
            logger.error(f"扫描列表页 {page_num} 时发生错误: {e}，跳过此页。", exc_info=True)
            continue
    
    logger.info(f"【生产者】任务完成！总共为 {total_enqueued} 个供应商创建了独立的抓取任务。") 