from typing import Dict, Any
from langgraph.graph import StateGraph, END

from states import CrawlerState, create_initial_state
from utils.logger import logger
from nodes import (
    start_node,
    fetch_html_node,
    preprocess_html_node,
    extract_business_info_node,
    find_related_files_node,
    save_data_node,
    finalize_result_node,
    get_next_file_index,
    should_continue_processing,
    should_skip_agent_processing,
    html_fetch_success
)


def create_crawler_workflow():
    """创建爬虫工作流"""
    workflow = StateGraph(CrawlerState)
    
    # 添加节点
    workflow.add_node("start", start_node)
    workflow.add_node("fetch_html", fetch_html_node)
    workflow.add_node("preprocess_html", preprocess_html_node)
    workflow.add_node("extract_business_info", extract_business_info_node)
    workflow.add_node("find_files", find_related_files_node)
    workflow.add_node("save_data", save_data_node)
    workflow.add_node("get_next_file", get_next_file_index)
    workflow.add_node("finalize", finalize_result_node)
    
    # 设置入口点
    workflow.set_entry_point("start")
    
    # 添加边和条件边
    workflow.add_edge("start", "fetch_html")
    
    # HTML获取后的条件分支
    workflow.add_conditional_edges(
        "fetch_html",
        html_fetch_success,
        {
            "success": "preprocess_html",
            "error": "finalize"
        }
    )
    
    # 前置信息提取后的条件分支：检查是否跳过Agent处理
    workflow.add_conditional_edges(
        "preprocess_html",
        should_skip_agent_processing,
        {
            "continue_agent": "extract_business_info",  # 继续Agent处理
            "skip_to_save": "save_data"                # 跳过Agent直接保存
        }
    )
    
    # Agent商业信息提取流程
    workflow.add_edge("extract_business_info", "find_files")
    workflow.add_edge("find_files", "save_data")
    
    # 判断是否继续处理其他文件
    workflow.add_conditional_edges(
        "save_data",
        should_continue_processing,
        {
            "continue": "get_next_file",  # 继续处理下一个HTML文件
            "finish": "finalize"          # 完成处理
        }
    )
    
    # 获取下一个文件后继续处理
    workflow.add_edge("get_next_file", "fetch_html")
    
    workflow.add_edge("finalize", END)
    
    return workflow.compile()


def run_crawler(html_index: int, url: str = None, max_files: int = 5) -> Dict[str, Any]:
    """运行爬虫工作流
    
    Args:
        html_index: HTML文件索引
        url: 可选的URL信息
        max_files: 最大处理文件数量
    
    Returns:
        处理结果字典
    """
    logger.info(f"\n=== 开始运行爬虫工作流 ===")
    logger.info(f"HTML索引: {html_index}")
    logger.info(f"={'=' * 10}【{html_index}】{'=' * 10}")
    
    # 创建工作流
    workflow = create_crawler_workflow()
    
    # 创建初始状态
    initial_state = create_initial_state(html_index, url, max_files)
    
    try:
        # 运行工作流
        result = workflow.invoke(initial_state)
        
        # 显示从数据库获取的URL信息
        actual_url = result.get('url')
        if actual_url:
            logger.info(f"数据库URL: {actual_url}")
        else:
            logger.info(f"传入URL: {url or 'None'}")
        
        # 输出处理结果摘要
        final_result = result.get("final_result", {})
        if final_result and "error" not in final_result:
            logger.info(f"💾 结果已保存到: data\\results\\single_result_{html_index}.json")
        
        logger.info("\n=== 工作流执行完成 ===")
        
        # 返回最终结果
        return result.get("final_result", {})
    
    except Exception as e:
        error_msg = f"工作流执行失败: {str(e)}"
        logger.error(f"\n=== 工作流执行失败 ===")
        logger.error(error_msg)
        
        return {
            "error": error_msg,
            "processed_files": 0,
            "total_emails": 0,
            "total_phones": 0,
            "social_media_platforms": 0,
            "has_company_info": False
        }
