import asyncio
import os
from typing import Dict, Any
import logging

# 配置日志
logger = logging.getLogger(__name__)

# 使用依赖注入减少循环导入

def generate_search_query(state: Dict[str, Any], llm=None, search_prompt=None):
    """节点1：根据用户问题生成搜索查询"""
    if llm is None or search_prompt is None:
        from src.research_core.model import get_llm
        from src.research_core.prompts import SEARCH_PROMPT
        llm = llm or get_llm()
        search_prompt = search_prompt or SEARCH_PROMPT
    
    messages = search_prompt.format_messages(question=state['question'])
    
    # 添加异常处理以应对网络连接问题
    try:
        response = llm.invoke(messages)
        search_query = response.content if hasattr(response, 'content') else str(response)
    except Exception as e:
        logger.error(f"LLM调用失败: {e}")
        # 返回一个默认的搜索查询，确保流程可以继续
        search_query = state['question']  # 使用原始问题作为搜索查询
    
    return {"search_query": search_query.strip() if isinstance(search_query, str) else str(search_query).strip()}

def should_continue(state):
    """条件边函数：决定图是继续循环还是结束"""
    if state.get('research_complete', False):
        return "end"  # 去往终点
    else:
        return "continue"  # 回去重新生成查询

async def execute_web_search(state: Dict[str, Any], search_tool=None, kg_tool=None):
    """节点2：执行网络搜索（使用MCP工具）和知识图谱查询（并行执行）"""
    if search_tool is None or kg_tool is None:
        from src.tools.search_tool import MultimodalSearchTool
        from src.tools.knowledge_graph_tool import query_knowledge_graph
        search_tool = search_tool or MultimodalSearchTool()
        kg_tool = kg_tool or query_knowledge_graph
    
    try:
        # 创建两个异步任务并行执行
        search_task = asyncio.get_event_loop().run_in_executor(
            None, search_tool.invoke, state['search_query']
        )
        kg_task = asyncio.get_event_loop().run_in_executor(
            None, kg_tool.invoke, {"query": state['search_query']}
        )
        
        # 等待两个任务完成
        search_data, kg_results = await asyncio.gather(search_task, kg_task, return_exceptions=True)
        
        # 处理搜索结果
        if isinstance(search_data, Exception):
            search_results = f"搜索失败: {str(search_data)}"
            images = []
            tables = []
        else:
            # 确保search_data是字典类型再调用get方法
            if isinstance(search_data, dict):
                search_results = search_data.get("text_results", "")
                images = search_data.get("images", [])
                tables = search_data.get("tables", [])
            else:
                search_results = str(search_data) if search_data else ""
                images = []
                tables = []
        
        # 处理知识图谱结果
        if isinstance(kg_results, Exception):
            kg_results = f"知识图谱查询失败: {str(kg_results)}"
        elif not isinstance(kg_results, str):
            # 确保kg_results是字符串类型
            kg_results = str(kg_results) if kg_results else ""
        
        # 合并搜索结果和知识图谱结果
        combined_results = f"网络搜索结果:\n{search_results}\n\n知识图谱信息:\n{kg_results}"
        
        return {
            "search_results": combined_results,
            "images": images,
            "tables": tables
        }
    except Exception as e:
        return {
            "search_results": f"搜索失败: {str(e)}",
            "images": [],
            "tables": []
        }

def evaluate_results(state: Dict[str, Any], llm=None, reflection_prompt=None):
    """节点3：评估搜索结果并决定是否需要继续搜索"""
    if llm is None or reflection_prompt is None:
        from src.research_core.model import get_llm
        from src.research_core.prompts import REFLECTION_PROMPT
        llm = llm or get_llm()
        reflection_prompt = reflection_prompt or REFLECTION_PROMPT
    
    messages = reflection_prompt.format_messages(
        question=state['question'],
        search_results=state['search_results']
    )
    
    # 添加异常处理以应对网络连接问题
    try:
        response = llm.invoke(messages)
        evaluation = response.content if hasattr(response, 'content') else str(response)
    except Exception as e:
        logger.error(f"评估阶段LLM调用失败: {e}")
        # 默认返回需要继续搜索，确保流程可以继续
        return {
            "research_complete": False
        }
    
    # 更智能的评估逻辑，不仅仅依赖简单的字符串匹配
    eval_str = evaluation.lower() if isinstance(evaluation, str) else str(evaluation).lower()
    
    # 检查明确的充分性指示词
    sufficient_indicators = ['sufficient', '充足', '充分', '足够', 'complete', 'comprehensive']
    insufficient_indicators = ['insufficient', '不足', '缺乏', '不充分', '不够', 'incomplete', 'inadequate']
    
    # 检查是否明确表示足够
    is_sufficient = any(indicator in eval_str for indicator in sufficient_indicators)
    
    # 检查是否明确表示不足
    is_insufficient = any(indicator in eval_str for indicator in insufficient_indicators)
    
    # 如果有明确的指示，则根据指示决定
    if is_sufficient and not is_insufficient:
        return {
            "research_complete": True,
            "final_answer": state['search_results']
        }
    elif is_insufficient and not is_sufficient:
        return {
            "research_complete": False
        }
    
    # 如果没有明确指示，使用原有的简单匹配作为备选方案
    if "sufficient" in eval_str or "充足" in eval_str:
        return {
            "research_complete": True,
            "final_answer": state['search_results']
        }
    else:
        return {
            "research_complete": False
        }

def generate_final_answer(state: Dict[str, Any], llm=None, final_answer_prompt=None):
    """节点4：生成最终答案"""
    if llm is None or final_answer_prompt is None:
        from src.research_core.model import get_llm
        from src.research_core.prompts import FINAL_ANSWER_PROMPT
        llm = llm or get_llm()
        final_answer_prompt = final_answer_prompt or FINAL_ANSWER_PROMPT
    
    messages = final_answer_prompt.format_messages(
        question=state['question'],
        search_results=state['search_results']
    )
    
    # 添加异常处理以应对网络连接问题
    try:
        response = llm.invoke(messages)
        final_answer = response.content if hasattr(response, 'content') else str(response)
    except Exception as e:
        logger.error(f"生成最终答案阶段LLM调用失败: {e}")
        # 返回默认答案，确保流程可以完成
        final_answer = f"基于以下搜索结果无法生成完整答案:\n\n{state['search_results']}"
    
    return {
        "final_answer": final_answer.strip() if isinstance(final_answer, str) else str(final_answer).strip()
    }