"""
增强版多代理工作流模块
结合标准版、优化版和流式处理工作流的优点，提供高性能、高质量的研究助理功能
"""

from langgraph.graph import StateGraph, END
from typing import Optional, Dict, Any, List
import re
import hashlib
import time
import logging
import asyncio
from dataclasses import dataclass, field

# 性能监控
from src.utils.metrics import track_performance

# 缓存管理
from src.research_core.cache_manager import cache_manager

# 延迟初始化组件
llm = None
search_tool = None
multimodal_search_tool = None

logger = logging.getLogger(__name__)

def _initialize_components():
    """延迟初始化组件"""
    global llm, search_tool, multimodal_search_tool
    if llm is None:
        from src.research_core.model import get_llm
        llm = get_llm()
    if search_tool is None:
        from src.tools.search_tool import get_search_tool
        search_tool = get_search_tool()
    if multimodal_search_tool is None:
        from src.tools.search_tool import MultimodalSearchTool
        multimodal_search_tool = MultimodalSearchTool()

def _sanitize_content(content: str, max_length: int = 3000) -> str:
    """
    清理内容以避免触发内容安全策略
    
    Args:
        content: 需要清理的内容
        max_length: 最大长度限制
        
    Returns:
        str: 清理后的内容
    """
    if not content:
        return ""
    
    # 移除多余的空白字符
    sanitized = re.sub(r'\s+', ' ', content)
    
    # 移除可能导致安全问题的特殊字符组合
    sanitized = re.sub(r'[<>{}[\]]+', '', sanitized)
    
    # 移除潜在的脚本标签和危险关键字
    dangerous_patterns = [
        r'(?i)<script.*?>.*?</script>',
        r'(?i)(javascript|vbscript|data):',
        r'(?i)on\w+\s*=',
        r'(?i)(script|alert|eval|exec|system|os\.)',
    ]
    
    for pattern in dangerous_patterns:
        sanitized = re.sub(pattern, '', sanitized)
    
    # 限制长度
    if len(sanitized) > max_length:
        sanitized = sanitized[:max_length] + "...(内容已截断)"
    
    return sanitized.strip()

# 定义状态
@dataclass
class EnhancedMultiAgentState:
    """增强版多代理状态"""
    question: str = ""
    search_query: str = ""
    search_results: str = ""
    research_complete: bool = False
    final_answer: str = ""
    images: Optional[List[dict]] = None
    tables: Optional[List[dict]] = None
    multimodal_content: Optional[dict] = None
    search_strategy: str = ""
    analysis_results: Dict[str, Any] = field(default_factory=dict)
    agent_assignments: Dict[str, Any] = field(default_factory=dict)
    previous_states: List[Dict[str, str]] = field(default_factory=list)
    cache_key: Optional[str] = None
    iteration_count: int = 0
    reflection: Optional[str] = None
    visualizations: Dict[str, Any] = field(default_factory=dict)
    next_step: Optional[str] = None
    content_quality_score: float = 0.5
    decision_history: List[str] = field(default_factory=list)
    recursion_depth: int = 0
    previous_strategy: str = ""
    quality_score: float = 0.5
    needs_improvement: bool = False

class WorkflowOptimizer:
    """工作流优化器"""
    
    def __init__(self):
        self.max_iterations = 5  # 设置合理的迭代次数上限
        self.max_recursion_limit = 50
        self.iteration_count = 0
        self.previous_states = set()
        self.recursion_depth = 0
    
    def check_recursion_limit(self, state: Dict[str, Any]) -> bool:
        """检查是否达到递归限制"""
        current_recursion_depth = state.get("recursion_depth", 0)
        if current_recursion_depth >= self.max_recursion_limit:
            return True
        return False
    
    def should_terminate(self, state: Dict[str, Any]) -> bool:
        """决定是否应该终止工作流"""
        self.iteration_count += 1
        
        # 检查迭代次数
        if self.iteration_count >= self.max_iterations:
            return True
            
        # 检查状态重复
        state_content = str(sorted(state.items()))
        state_hash = hashlib.md5(state_content.encode()).hexdigest()
        if state_hash in self.previous_states:
            return True
            
        self.previous_states.add(state_hash)
        
        # 检查内容质量
        answer = state.get("final_answer", state.get("answer", ""))
        if len(answer) > 200:  # 答案足够长
            sections = answer.count("\n\n")
            if sections >= 1:  # 包含足够的段落
                return True
                
        return False

workflow_optimizer = WorkflowOptimizer()

# 代理函数
@track_performance("search_strategist")
async def async_search_strategist_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步搜索策略专家：制定最优搜索策略"""
    _initialize_components()
    from src.research_core.prompts import SEARCH_STRATEGIST_PROMPT
    
    # 清理问题内容
    sanitized_question = _sanitize_content(state.question)
    
    # 如果问题为空，返回默认策略
    if not sanitized_question:
        return {"search_strategy": "人工智能 最新发展"}
    
    # 获取先前的策略和分析结果（如果存在）
    previous_strategy = state.previous_strategy
    analysis = state.analysis_results.get('content', '') if isinstance(state.analysis_results, dict) else ''
    
    # 添加迭代计数和时间戳
    iteration_count = state.iteration_count
    timestamp = int(time.time())
    
    # 构建模板变量
    template_vars = {
        "question": sanitized_question,
        "previous_strategy": f"先前的搜索策略：\n{previous_strategy}\n" if previous_strategy else "",
        "analysis": f"分析结果：\n{analysis}\n" if analysis else ""
    }
    
    messages = SEARCH_STRATEGIST_PROMPT.format_messages(**template_vars)
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            search_strategy = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_strategy = _sanitize_content(str(search_strategy))
            # 添加迭代信息和时间戳以避免缓存
            final_strategy = f"{sanitized_strategy.strip()} (迭代{iteration_count}, 时间戳{timestamp})"
            return {"search_strategy": final_strategy}
        else:
            return {"search_strategy": sanitized_question}
    except Exception as e:
        logger.error(f"搜索策略专家执行失败: {e}")
        # 返回基于问题的简单策略
        return {"search_strategy": sanitized_question}

@track_performance("search_expert")
async def async_search_expert_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步搜索专家：执行搜索任务"""
    _initialize_components()
    
    try:
        # 检查搜索策略是否为空
        search_strategy = state.search_strategy
        if not search_strategy or len(search_strategy.strip()) < 1:
            # 如果搜索策略为空，使用问题作为搜索词
            search_strategy = state.question or '最新资讯'
        
        # 检查缓存
        cache_key = hashlib.md5(search_strategy.encode('utf-8')).hexdigest()
        cached_result = cache_manager.get(cache_key)
        if cached_result:
            logger.info(f"缓存命中: {search_strategy[:30]}...")
            return cached_result
            
        # 使用多模态搜索工具执行搜索
        if multimodal_search_tool is None:
            _initialize_components()
        if multimodal_search_tool is not None:
            search_data = await asyncio.to_thread(multimodal_search_tool.invoke, search_strategy)
            search_results = search_data["text_results"]
            
            # 清理搜索结果以避免内容安全问题
            sanitized_results = _sanitize_content(search_results, max_length=5000)
            
            result = {
                "search_results": sanitized_results,
                "images": search_data.get("images", []),
                "tables": search_data.get("tables", [])
            }
            
            # 缓存结果
            cache_manager.set(cache_key, result)
            
            return result
        else:
            return {
                "search_results": "搜索工具初始化失败",
                "images": [],
                "tables": []
            }
    except Exception as e:
        logger.error(f"搜索专家执行失败: {e}")
        return {
            "search_results": f"搜索执行失败: {str(e)}",
            "images": [],
            "tables": []
        }

@track_performance("analysis_expert")
async def async_analysis_expert_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步分析专家：深度分析搜索结果"""
    _initialize_components()
    from src.research_core.prompts import ANALYST_PROMPT
    
    # 清理输入内容以避免内容安全问题
    sanitized_question = _sanitize_content(state.question)
    sanitized_strategy = _sanitize_content(state.search_strategy)
    sanitized_results = _sanitize_content(state.search_results)
    
    # 如果结果太长，只取前4000个字符以提高性能
    if len(sanitized_results) > 4000:
        sanitized_results = sanitized_results[:4000] + "...(内容已截断以提高性能)"
    
    # 如果没有足够的内容进行分析，直接返回
    if not sanitized_results or len(sanitized_results.strip()) < 10:
        return {"analysis_results": {"content": "搜索结果内容不足，无法进行深度分析。"}}
    
    # 构建包含多模态内容的上下文
    context = f"搜索结果:\n{sanitized_results}"
    
    # 添加图像信息到上下文
    if state.images:
        image_descriptions = []
        for i, image in enumerate(state.images):
            if image and isinstance(image, dict):
                image_descriptions.append(f"图像 {i+1}: {image.get('description', '无描述')}")
        if image_descriptions:
            context += f"\n\n相关图像:\n" + "\n".join(image_descriptions)
    
    # 添加表格信息到上下文
    if state.tables:
        table_descriptions = []
        for i, table in enumerate(state.tables):
            if table and isinstance(table, dict):
                table_descriptions.append(f"表格 {i+1}: {table.get('title', '无标题')}")
        if table_descriptions:
            context += f"\n\n相关表格:\n" + "\n".join(table_descriptions)
    
    messages = ANALYST_PROMPT.format_messages(
        question=sanitized_question,
        search_strategy=sanitized_strategy,
        search_results=context
    )
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            analysis_results = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_analysis = _sanitize_content(str(analysis_results))
            return {"analysis_results": {"content": sanitized_analysis}}
        else:
            return {"analysis_results": {"content": "语言模型初始化失败"}}
    except Exception as e:
        logger.error(f"分析专家执行失败: {e}")
        return {"analysis_results": {"content": f"分析执行失败: {str(e)}"}}

@track_performance("quality_assurance")
async def async_quality_assurance_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步质量保证专家：评估结果质量"""
    _initialize_components()
    from src.research_core.prompts import REFLECTION_PROMPT
    
    # 清理输入内容
    sanitized_question = _sanitize_content(state.question)
    sanitized_results = _sanitize_content(state.search_results, max_length=3000)
    
    # 如果没有足够的内容进行评估，直接返回不充分
    if not sanitized_results or len(sanitized_results.strip()) < 10:
        return {"reflection": "insufficient"}
    
    # 构建包含多模态内容的上下文
    context = f"搜索结果:\n{sanitized_results}"
    
    # 添加图像信息到上下文
    images = state.images or []
    if images:
        image_descriptions = []
        for i, image in enumerate(images):
            if image and isinstance(image, dict):
                image_descriptions.append(f"图像 {i+1}: {image.get('description', '无描述')}")
        if image_descriptions:
            context += f"\n\n相关图像:\n" + "\n".join(image_descriptions)
    
    # 添加表格信息到上下文
    tables = state.tables or []
    if tables:
        table_descriptions = []
        for i, table in enumerate(tables):
            if table and isinstance(table, dict):
                table_descriptions.append(f"表格 {i+1}: {table.get('title', '无标题')}")
        if table_descriptions:
            context += f"\n\n相关表格:\n" + "\n".join(table_descriptions)
    
    messages = REFLECTION_PROMPT.format_messages(
        question=sanitized_question,
        search_results=context
    )
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            reflection = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_reflection = _sanitize_content(str(reflection))
            
            # 确保返回的是期望的格式
            sanitized_reflection = sanitized_reflection.strip().lower()
            if "sufficient" in sanitized_reflection:
                return {"reflection": "sufficient"}
            elif "insufficient" in sanitized_reflection:
                return {"reflection": "insufficient"}
            else:
                # 如果返回的内容不是期望的格式，根据内容长度判断
                if len(sanitized_reflection) > 100:
                    return {"reflection": "sufficient"}
                else:
                    return {"reflection": "insufficient"}
        else:
            return {"reflection": "insufficient"}
    except Exception as e:
        logger.error(f"质量保证专家执行失败: {e}")
        # 出现其他错误时，也默认认为结果不充分
        return {"reflection": "insufficient"}

def _evaluate_content_quality(analysis_content: str, search_results: str) -> float:
    """评估内容质量，返回0-1之间的分数"""
    score = 0.5  # 默认中等质量
    
    # 1. 内容长度评估
    if analysis_content and len(analysis_content) > 300:
        score += 0.2
    elif analysis_content and len(analysis_content) > 150:
        score += 0.1
    
    # 2. 关键词丰富度评估
    if search_results and analysis_content:
        # 简单的关键词匹配
        search_words = set(search_results.lower().split())
        analysis_words = set(analysis_content.lower().split())
        if search_words and analysis_words:
            keyword_coverage = len(search_words.intersection(analysis_words)) / max(1, len(search_words))
            score += keyword_coverage * 0.2
    
    # 3. 结构完整性评估
    if analysis_content:
        if "总结" in analysis_content or "conclusion" in analysis_content.lower():
            score += 0.1
        if "分析" in analysis_content or "analysis" in analysis_content.lower():
            score += 0.05
        if "建议" in analysis_content or "recommendation" in analysis_content.lower():
            score += 0.05
    
    # 4. 缓存友好性评估 - 避免重复内容
    if analysis_content:
        # 检查内容的多样性
        unique_sentences = len(set(analysis_content.split('.')))
        total_sentences = len(analysis_content.split('.'))
        if total_sentences > 0:
            diversity_ratio = unique_sentences / total_sentences
            score += diversity_ratio * 0.1
    
    # 确保分数在0-1之间
    return max(0, min(1, score))

@track_performance("improve_search")
async def async_improve_search_strategy(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步改进搜索策略"""
    _initialize_components()
    from src.research_core.prompts import IMPROVE_SEARCH_PROMPT
    import time
    
    # 清理输入内容
    sanitized_question = _sanitize_content(state.question)
    sanitized_strategy = _sanitize_content(state.search_strategy)
    
    # 安全地获取分析结果内容
    analysis_results = state.analysis_results or {}
    if isinstance(analysis_results, dict):
        sanitized_analysis = _sanitize_content(analysis_results.get('content', ''))
    else:
        sanitized_analysis = ''
    
    # 如果没有足够的信息进行改进，返回原策略并添加时间戳
    if not sanitized_strategy and not sanitized_analysis:
        timestamped_strategy = f"{sanitized_question if sanitized_question else '通用搜索策略'} {int(time.time())}"
        return {"search_strategy": timestamped_strategy}
    
    # 添加迭代计数和时间戳以确保策略变化
    iteration_count = state.iteration_count
    timestamp = int(time.time())
    
    messages = IMPROVE_SEARCH_PROMPT.format_messages(
        question=sanitized_question,
        previous_strategy=sanitized_strategy,
        analysis=sanitized_analysis
    )
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            improved_strategy = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_improved_strategy = _sanitize_content(str(improved_strategy))
            # 添加时间戳和迭代信息以避免缓存
            final_strategy = f"{sanitized_improved_strategy.strip()} (迭代{iteration_count}, 时间戳{timestamp})"
            return {"search_strategy": final_strategy}
        else:
            timestamped_strategy = f"{state.search_strategy or sanitized_question} {timestamp}"
            return {"search_strategy": timestamped_strategy}
    except Exception as e:
        logger.error(f"改进搜索策略执行失败: {e}")
        # 出现其他错误时，也返回原策略并添加时间戳
        timestamped_strategy = f"{state.search_strategy or sanitized_question} {int(time.time())}"
        return {"search_strategy": timestamped_strategy}

@track_performance("data_visualization")
async def async_data_visualization_expert_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步数据可视化专家：创建数据可视化描述"""
    _initialize_components()
    from src.research_core.prompts import DATA_VISUALIZATION_PROMPT
    
    # 清理输入内容
    sanitized_question = _sanitize_content(state.question)
    sanitized_results = _sanitize_content(state.search_results, max_length=3000)
    
    # 安全地获取分析结果内容
    analysis_results = state.analysis_results or {}
    if isinstance(analysis_results, dict):
        sanitized_analysis = _sanitize_content(analysis_results.get('content', ''), max_length=2000)
    else:
        sanitized_analysis = ''
    
    # 如果没有足够的内容进行可视化，返回默认内容
    if not sanitized_results or not sanitized_analysis:
        return {"visualizations": {"content": "没有足够的数据进行可视化。"}}
    
    # 如果结果太长，只取前3000个字符以提高性能
    if len(sanitized_results) > 3000:
        sanitized_results = sanitized_results[:3000] + "...(内容已截断以提高性能)"
    
    if len(sanitized_analysis) > 2000:
        sanitized_analysis = sanitized_analysis[:2000] + "...(内容已截断以提高性能)"
    
    messages = DATA_VISUALIZATION_PROMPT.format_messages(
        question=sanitized_question,
        search_results=sanitized_results,
        analysis=sanitized_analysis
    )
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            visualization_results = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_visualization = _sanitize_content(str(visualization_results))
            return {"visualizations": {"content": sanitized_visualization}}
        else:
            return {"visualizations": {"content": "语言模型初始化失败"}}
    except Exception as e:
        logger.error(f"数据可视化专家执行失败: {e}")
        return {"visualizations": {"content": f"可视化生成失败: {str(e)}"}}

@track_performance("writing_expert")
async def async_writing_expert_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步写作专家：生成最终答案"""
    _initialize_components()
    from src.research_core.prompts import WRITER_PROMPT
    
    # 安全地获取各种状态字段
    search_strategy = state.search_strategy or ''
    search_results = state.search_results or ''
    
    # 安全地获取分析结果内容
    analysis_results = state.analysis_results or {}
    if isinstance(analysis_results, dict):
        analysis_content = analysis_results.get('content', '') or ''
    else:
        analysis_content = ''
    
    # 构建包含多模态内容的上下文
    context = f"搜索策略:\n{search_strategy or '无'}\n\n搜索结果:\n{search_results or '无'}\n\n分析结果:\n{analysis_content or '无'}"
    
    # 添加可视化信息到上下文
    visualizations = state.visualizations or {}
    if isinstance(visualizations, dict) and visualizations.get('content'):
        context += f"\n\n数据可视化:\n{visualizations.get('content')}"
    
    # 添加图像信息到上下文
    images = state.images or []
    if images:
        image_descriptions = []
        for i, image in enumerate(images):
            if image and isinstance(image, dict):
                image_descriptions.append(f"图像 {i+1}: {image.get('description', '无描述')} [URL: {image.get('url', '无链接')}]")
        if image_descriptions:
            context += f"\n\n相关图像:\n" + "\n".join(image_descriptions)
    
    # 添加表格信息到上下文
    tables = state.tables or []
    if tables:
        table_descriptions = []
        for i, table in enumerate(tables):
            if table and isinstance(table, dict):
                table_descriptions.append(f"表格 {i+1}: {table.get('title', '无标题')}")
        if table_descriptions:
            context += f"\n\n相关表格:\n" + "\n".join(table_descriptions)
    
    # 清理内容以避免内容安全问题
    sanitized_strategy = _sanitize_content(search_strategy)
    sanitized_context = _sanitize_content(context, max_length=8000)
    sanitized_question = _sanitize_content(state.question)
    
    # 如果上下文内容为空，提供默认内容
    if not sanitized_context or len(sanitized_context.strip()) < 10:
        sanitized_context = "未能获取到足够的研究材料，请尝试重新查询。"
    
    messages = WRITER_PROMPT.format_messages(
        search_strategy=sanitized_strategy,
        context=sanitized_context,
        question=sanitized_question
    )
    
    try:
        if llm is None:
            _initialize_components()
        if llm is not None:
            response = await asyncio.to_thread(llm.invoke, messages)
            final_answer = response.content if hasattr(response, 'content') else str(response)
            # 清理输出内容
            sanitized_answer = _sanitize_content(str(final_answer))
            return {"final_answer": sanitized_answer}
        else:
            return {"final_answer": "语言模型初始化失败"}
    except Exception as e:
        logger.error(f"写作专家执行失败: {e}")
        return {"final_answer": f"答案生成失败: {str(e)}"}

@track_performance("coordinator")
async def async_coordinator_agent(state: EnhancedMultiAgentState) -> Dict[str, Any]:
    """异步协调员：决定下一步行动"""
    _initialize_components()
    
    # 增加迭代计数
    iteration_count = state.iteration_count + 1
    max_iterations = 3  # 限制最大迭代次数
    
    # 获取并更新递归深度
    recursion_depth = state.recursion_depth + 1
    
    # 清理输入内容
    sanitized_question = _sanitize_content(state.question)
    sanitized_results = _sanitize_content(state.search_results)
    analysis_results = state.analysis_results or {}
    sanitized_analysis = _sanitize_content(analysis_results.get('content', '') if isinstance(analysis_results, dict) else '')
    sanitized_strategy = _sanitize_content(state.search_strategy)
    reflection = state.reflection or ''
    
    # 内容质量评估
    content_quality_score = _evaluate_content_quality(sanitized_analysis, sanitized_results)
    
    # 决策历史分析
    decision_history = state.decision_history or []
    repeated_decision = len(decision_history) >= 2 and len(set(decision_history[-3:])) == 1
    
    # 数据可视化潜力评估
    visualization_potential = 0.5  # 默认中等潜力
    
    # 综合决策逻辑
    if iteration_count >= max_iterations:
        next_step = "generate_answer"  # 达到最大迭代次数
    elif workflow_optimizer.check_recursion_limit({"recursion_depth": recursion_depth}):
        next_step = "generate_answer"  # 达到递归限制
    elif "sufficient" in reflection.lower() and content_quality_score > 0.7 and visualization_potential > 0.6:
        next_step = "visualize_data"  # 内容质量高且适合可视化
    elif "sufficient" in reflection.lower() and content_quality_score > 0.7:
        next_step = "generate_answer"  # 内容质量高且评估充分
    elif content_quality_score > 0.8:
        next_step = "generate_answer"  # 内容质量非常高
    elif content_quality_score < 0.3 and iteration_count < max_iterations:
        next_step = "improve_search"  # 内容质量低，需要改进搜索
    elif repeated_decision:
        next_step = "generate_answer"  # 避免决策循环
    else:
        # 默认选择
        next_step = "generate_answer" if iteration_count >= 2 else "improve_search"
    
    # 如果搜索策略和搜索结果没有变化，避免重复搜索
    prev_states = state.previous_states or []
    if iteration_count > 1:
        # 检查是否有实质性的新内容
        current_state_summary = {
            "search_strategy": sanitized_strategy[:30] if sanitized_strategy else "",
            "search_results_length": len(sanitized_results)
        }
        
        # 如果当前状态与之前状态相似，直接生成答案
        if current_state_summary in prev_states:
            next_step = "generate_answer"
        else:
            prev_states.append(current_state_summary)
            # 限制历史状态记录数量
            if len(prev_states) > 3:
                prev_states = prev_states[-3:]
    
    # 确保返回有效的步骤
    valid_steps = ["improve_search", "generate_answer", "visualize_data"]
    if next_step not in valid_steps:
        next_step = "generate_answer"
    
    # 确保在任何情况下都不会无限循环
    if iteration_count > max_iterations + 2:
        next_step = "generate_answer"
    
    # 更新决策历史
    decision_history.append(next_step)
    if len(decision_history) > 5:
        decision_history = decision_history[-5:]
    
    result = {
        "next_step": next_step,
        "iteration_count": iteration_count,
        "decision_history": decision_history,
        "content_quality_score": content_quality_score,
        "recursion_depth": recursion_depth
    }
    
    # 添加状态跟踪
    if iteration_count > 1:
        result["previous_states"] = prev_states
    
    logger.debug(f"协调器决策: 迭代次数={iteration_count}, 递归深度={recursion_depth}, 下一步={next_step}")
    return result

def create_enhanced_multi_agent_workflow():
    """创建增强版多代理工作流"""
    _initialize_components()
    
    # 创建工作流图
    workflow = StateGraph(EnhancedMultiAgentState)
    
    # 添加节点
    workflow.add_node("search_strategist", async_search_strategist_agent)
    workflow.add_node("search_expert", async_search_expert_agent)
    workflow.add_node("analysis_expert", async_analysis_expert_agent)
    workflow.add_node("quality_assurance", async_quality_assurance_agent)
    workflow.add_node("improve_search", async_improve_search_strategy)
    workflow.add_node("data_visualization_expert", async_data_visualization_expert_agent)
    workflow.add_node("writing_expert", async_writing_expert_agent)
    workflow.add_node("coordinator", async_coordinator_agent)
    
    # 设置边
    workflow.set_entry_point("search_strategist")
    workflow.add_edge("search_strategist", "search_expert")
    workflow.add_edge("search_expert", "analysis_expert")
    workflow.add_edge("analysis_expert", "quality_assurance")
    workflow.add_edge("quality_assurance", "coordinator")
    workflow.add_edge("improve_search", "search_strategist")
    workflow.add_edge("data_visualization_expert", "writing_expert")
    
    # 添加条件边
    def route_after_coordinator(state: EnhancedMultiAgentState) -> str:
        next_step = state.next_step if state.next_step else "improve_search"
        if next_step == "generate_answer":
            return "data_visualization_expert"
        elif next_step == "improve_search":
            return "improve_search"
        elif next_step == "visualize_data":
            return "data_visualization_expert"
        else:
            return "improve_search"
    
    workflow.add_conditional_edges(
        "coordinator",
        route_after_coordinator,
        {
            "improve_search": "improve_search",
            "data_visualization_expert": "data_visualization_expert"
        }
    )
    
    # 设置结束点
    workflow.add_edge("writing_expert", END)
    
    # 编译工作流
    app = workflow.compile()
    
    return app

# 全局工作流实例
_enhanced_workflow_instance = None

def get_enhanced_multi_agent_workflow():
    """获取增强版多代理工作流实例"""
    global _enhanced_workflow_instance
    if _enhanced_workflow_instance is None:
        _enhanced_workflow_instance = create_enhanced_multi_agent_workflow()
    return _enhanced_workflow_instance

# 创建并导出graph变量供外部使用
graph = create_enhanced_multi_agent_workflow()