"""
项目管理工作流
基于LangGraph构建项目管理专用的状态图工作流
"""

from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.runnables import RunnableConfig
from typing import Dict, Any, Optional, List
from src.research_core.project_state import ProjectManagementState
from src.research_core.project_agents import (
    project_coordinator_agent, product_manager_agent,
    ui_ux_designer_agent, developer_agent, tester_agent,
    collaboration_agent, feedback_agent, phase_transition_agent,
    finalize_prompt
)
from src.research_core.iteration_utils import (
    add_iteration_record, add_decision_record,
    calculate_quality_metrics, should_proceed_to_next_phase,
    get_feedback_based_decisions, update_adaptive_threshold
)
from src.config.settings import settings
import asyncio
import logging
from functools import wraps
from datetime import datetime
import json

logger = logging.getLogger(__name__)

def create_project_workflow_parallel():
    """
    创建支持并行执行的项目管理工作流
    
    Returns:
        StateGraph: 配置好的项目管理工作流
    """
    # 创建状态图
    workflow = StateGraph(ProjectManagementState)

    # 包装代理函数以自动记录迭代
    def wrapped_agent(agent_func, agent_type):
        @wraps(agent_func)
        def wrapper(state):
            result = agent_func(state)
            # 自动记录迭代历史
            from src.research_core.iteration_utils import add_iteration_record
            state = add_iteration_record(state, agent_type, result)
            return result
        return wrapper

    # 添加节点（带迭代记录）
    workflow.add_node("project_coordinator", wrapped_agent(project_coordinator_agent, "coordinator"))
    workflow.add_node("product_manager", wrapped_agent(product_manager_agent, "product_manager"))
    workflow.add_node("ui_ux_designer", wrapped_agent(ui_ux_designer_agent, "ui_ux_designer"))
    workflow.add_node("developer", wrapped_agent(developer_agent, "developer"))
    workflow.add_node("tester", wrapped_agent(tester_agent, "tester"))
    workflow.add_node("collaboration", wrapped_agent(collaboration_agent, "collaboration"))
    workflow.add_node("feedback", wrapped_agent(feedback_agent, "feedback"))
    workflow.add_node("phase_transition", wrapped_agent(phase_transition_agent, "phase_transition"))

    # 设置入口点
    workflow.set_entry_point("project_coordinator")

    # 添加主要工作流边（串行执行）
    workflow.add_edge("project_coordinator", "product_manager")
    workflow.add_edge("product_manager", "ui_ux_designer")
    workflow.add_edge("ui_ux_designer", "developer")
    workflow.add_edge("developer", "tester")
    workflow.add_edge("tester", "phase_transition")

    # 添加可以并行执行的边
    # 协作和反馈节点可以在项目协调员执行后并行执行
    workflow.add_edge("project_coordinator", "collaboration")
    workflow.add_edge("project_coordinator", "feedback")

    # 添加条件边 - 阶段转换决策（增强版）
    def route_phase_transition(state: ProjectManagementState) -> str:
        """决定阶段转换后的下一步"""
        from src.research_core.iteration_utils import get_feedback_based_decisions, add_decision_record
        
        next_phase = state.get("next_phase", "")
        current_phase = state.get("project_phase", "requirements")
        
        # 检查是否有未处理的用户反馈
        user_feedback = state.get("user_feedback", "")
        feedback_processed = state.get("feedback_processed", True)
        
        # 如果有未处理的用户反馈，重新执行当前阶段
        if user_feedback and not feedback_processed:
            # 标记反馈已处理
            decision_updates = add_decision_record(state, "user_feedback_handled", 
                                      {"user_feedback": user_feedback},
                                      "处理用户反馈并重新执行当前阶段")
            # 更新状态中的决策历史
            if "decision_history" in decision_updates:
                state["decision_history"] = decision_updates["decision_history"]
                
            # 标记反馈已处理
            state["feedback_processed"] = True
            
            # 根据当前阶段决定返回哪个节点
            return _get_current_phase_node(current_phase)
        
        # 集成反馈分析
        feedback_decisions = get_feedback_based_decisions(state)
        
        # 基于反馈调整决策
        if "development" in feedback_decisions and current_phase == "testing":
            # 记录反馈驱动的决策
            decision_updates = add_decision_record(state, "feedback_driven_rollback", 
                                      {"feedback_decisions": feedback_decisions},
                                      "基于团队反馈返回开发阶段")
            # 更新状态中的决策历史
            if "decision_history" in decision_updates:
                state["decision_history"] = decision_updates["decision_history"]
            return "developer"
            
        if "design" in feedback_decisions and current_phase in ["development", "testing"]:
            # 记录反馈驱动的决策
            decision_updates = add_decision_record(state, "feedback_driven_redesign", 
                                      {"feedback_decisions": feedback_decisions},
                                      "基于团队反馈需要重新设计")
            # 更新状态中的决策历史
            if "decision_history" in decision_updates:
                state["decision_history"] = decision_updates["decision_history"]
            return "ui_ux_designer"
        
        if next_phase == "completed":
            return "end"
        elif next_phase == current_phase:
            # 需要继续当前阶段的工作
            return _get_current_phase_node(current_phase)
        else:
            # 进入下一阶段
            return "project_coordinator"

    def _get_current_phase_node(phase: str) -> str:
        """根据当前阶段获取对应的节点"""
        phase_node_mapping = {
            "requirements": "product_manager",
            "design": "ui_ux_designer",
            "development": "developer",
            "testing": "tester"
        }
        return phase_node_mapping.get(phase, "project_coordinator")

    workflow.add_conditional_edges(
        "phase_transition",
        route_phase_transition,
        {
            "end": END,
            "project_coordinator": "project_coordinator",
            "product_manager": "product_manager",
            "ui_ux_designer": "ui_ux_designer",
            "developer": "developer",
            "tester": "tester"
        }
    )

    # 添加条件边 - 测试结果决策（增强版）
    def route_after_testing(state: ProjectManagementState) -> str:
        """根据测试结果决定下一步"""
        from src.research_core.iteration_utils import should_proceed_to_next_phase
        
        test_results = state.get("test_results", {})
        current_results = {"test_results": test_results}
        
        # 使用自适应阈值决策
        if should_proceed_to_next_phase(state, current_results):
            return "phase_transition"
        else:
            # 检查迭代次数，避免无限循环
            iteration_count = state.get("iteration_count", 0)
            max_iteration_limit = getattr(settings, 'WORKFLOW_MAX_ITERATION_LIMIT', 2)  # 从配置读取
            
            # 检查当前阶段的特定迭代限制
            current_phase = state.get("project_phase", "requirements")
            phase_specific_strategies = getattr(settings, 'WORKFLOW_PHASE_SPECIFIC_STRATEGIES', {})
            phase_max_iterations = phase_specific_strategies.get(current_phase, {}).get("max_iterations", max_iteration_limit)
            
            if iteration_count >= min(phase_max_iterations, max_iteration_limit):
                return "project_coordinator"  # 由协调员处理异常
            return "developer"

    workflow.add_conditional_edges(
        "tester",
        route_after_testing,
        {
            "phase_transition": "phase_transition",
            "developer": "developer",
            "project_coordinator": "project_coordinator"
        }
    )

    compiled_workflow = workflow.compile()

    return compiled_workflow


graph = create_project_workflow_parallel()

def create_project_workflow_pool():
    """
    创建项目工作流池管理多个项目
    
    Returns:
        Dict[str, StateGraph]: 项目工作流池
    """
    return {}

def get_project_workflow(workflow_pool: Dict[str, Any], project_id: str, parallel: bool = False) -> StateGraph:
    """
    获取或创建项目工作流
    
    Args:
        workflow_pool: 工作流池
        project_id: 项目ID
        parallel: 是否使用并行工作流
        
    Returns:
        StateGraph: 项目工作流实例
    """
    workflow_key = f"{project_id}_parallel" if parallel else project_id
    if workflow_key not in workflow_pool:
        # 只支持并行工作流
        workflow_pool[workflow_key] = create_project_workflow_parallel()
    return workflow_pool[workflow_key]

# 全局工作流池实例
project_workflow_pool = create_project_workflow_pool()

def get_global_workflow_pool() -> Dict[str, Any]:
    """获取全局工作流池"""
    return project_workflow_pool

def execute_project_workflow(workflow, initial_state: ProjectManagementState, config: Optional[Dict[str, Any]] = None):
    """
    执行项目工作流
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        config: 执行配置
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    if config is None:
        config = {"configurable": {"thread_id": initial_state["project_id"]}}
    
    try:
        # 直接使用已编译的工作流对象，避免重复编译
        compiled_workflow = workflow
        
        # 转换配置为RunnableConfig类型
        runnable_config = RunnableConfig(**config) if config else RunnableConfig()
        
        # 记录工作流开始执行时间
        start_time = datetime.now()
        logger.info(f"开始执行项目工作流: {initial_state['project_id']} at {start_time}")
        
        # 执行工作流
        final_state = None
        last_step = None
        step_count = 0
        
        # 增加执行超时检查
        max_execution_time = getattr(settings, 'WORKFLOW_MAX_EXECUTION_TIME', 3600)  # 默认1小时
        
        for step in compiled_workflow.stream(initial_state, runnable_config):
            last_step = step
            step_count += 1
            # 检查执行时间是否超时
            if (datetime.now() - start_time).total_seconds() > max_execution_time:
                raise TimeoutError(f"工作流执行超时，超过{max_execution_time}秒")
                
            # 记录每个步骤的执行情况
            logger.debug(f"工作流步骤 {step_count} 执行完成: {list(step.keys()) if isinstance(step, dict) else 'step data'}")
            
            # 检查是否有错误状态
            if isinstance(step, dict):
                for node_result in step.values():
                    if isinstance(node_result, dict) and "error" in node_result:
                        logger.warning(f"节点执行出错: {node_result['error']}")
        
        # 获取最终状态
        if hasattr(compiled_workflow, 'get_state'):
            final_state = compiled_workflow.get_state(config=runnable_config)
            final_values = final_state.values if hasattr(final_state, 'values') else final_state
        else:
            # 如果没有get_state方法，使用最后一步的状态
            final_values = last_step if last_step else initial_state
            
        # 记录工作流执行结束时间
        end_time = datetime.now()
        execution_duration = (end_time - start_time).total_seconds()
        logger.info(f"项目工作流执行完成: {initial_state['project_id']} at {end_time}, 耗时: {execution_duration:.2f}秒, 共执行 {step_count} 步")
        
        # 收集执行指标
        execution_info = {
            "start_time": start_time.isoformat(),
            "end_time": end_time.isoformat(),
            "duration_seconds": execution_duration,
            "step_count": step_count
        }
        collect_workflow_metrics(initial_state["project_id"], execution_info)
            
        return {
            "success": True,
            "final_state": final_values,
            "project_id": initial_state["project_id"],
            "execution_info": execution_info
        }
    except TimeoutError as e:
        logger.error(f"执行项目工作流超时: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"],
            "timeout": True
        }
    except Exception as e:
        logger.error(f"执行项目工作流失败: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"]
        }

def execute_project_workflow_with_feedback(workflow, initial_state: ProjectManagementState, 
                                         user_feedback: str, config: Optional[Dict[str, Any]] = None):
    """
    带用户反馈执行项目工作流
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        user_feedback: 用户反馈
        config: 执行配置
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    # 更新状态以包含用户反馈
    initial_state["user_feedback"] = user_feedback
    initial_state["feedback_processed"] = False
    
    # 添加反馈到反馈历史
    feedback_history = initial_state.get("feedback_history", []).copy()
    feedback_history.append({
        "feedback": user_feedback,
        "timestamp": datetime.now().isoformat(),
        "type": "user_feedback"
    })
    initial_state["feedback_history"] = feedback_history
    
    # 直接使用已编译的工作流对象，避免重复编译
    return execute_project_workflow(workflow, initial_state, config)

def get_workflow_status(workflow, config: Dict[str, Any]) -> Dict[str, Any]:
    """
    获取工作流状态
    
    Args:
        workflow: 工作流实例
        config: 配置信息
        
    Returns:
        Dict[str, Any]: 工作流状态信息
    """
    try:
        # 直接使用已编译的工作流对象
        compiled_workflow = workflow
        # 转换配置为RunnableConfig类型
        runnable_config = RunnableConfig(**config) if config else RunnableConfig()
        if hasattr(compiled_workflow, 'get_state'):
            state = compiled_workflow.get_state(config=runnable_config)
            return {
                "status": "active" if state.next else "completed",
                "current_node": state.next[0] if state.next else "end",
                "values": state.values if hasattr(state, 'values') else state,
                "metadata": state.metadata if hasattr(state, 'metadata') else {}
            }
        else:
            return {
                "status": "unknown",
                "error": "无法获取工作流状态"
            }
    except Exception as e:
        logger.error(f"获取工作流状态失败: {str(e)}", exc_info=True)
        return {
            "status": "error",
            "error": str(e)
        }

def reset_workflow(workflow, config: Dict[str, Any]) -> bool:
    """
    重置工作流状态
    
    Args:
        workflow: 工作流实例
        config: 配置信息
        
    Returns:
        bool: 重置是否成功
    """
    try:
        # 直接使用已编译的工作流对象
        compiled_workflow = workflow
        # 检查是否有检查点管理器
        if hasattr(compiled_workflow, 'checkpointer') and compiled_workflow.checkpointer:
            # 使用检查点管理器清除状态
            thread_id = config.get("configurable", {}).get("thread_id")
            if thread_id:
                # 清除特定线程的状态
                compiled_workflow.checkpointer.delete({"configurable": {"thread_id": thread_id}})
                logger.info(f"工作流 {thread_id} 状态已重置")
                return True
            else:
                logger.warning("未提供线程ID，无法重置工作流状态")
                return False
        else:
            logger.warning("工作流不支持状态重置操作")
            return False
    except Exception as e:
        logger.error(f"重置工作流失败: {str(e)}", exc_info=True)
        return False

def export_workflow_state(workflow, config: Dict[str, Any]) -> Dict[str, Any]:
    """
    导出工作流状态
    
    Args:
        workflow: 工作流实例
        config: 配置信息
        
    Returns:
        Dict[str, Any]: 工作流状态数据
    """
    try:
        status = get_workflow_status(workflow, config)
        if status["status"] == "error":
            return status
            
        # 添加导出时间戳
        export_data = {
            "exported_at": datetime.now().isoformat(),
            "workflow_status": status
        }
        
        logger.info("工作流状态导出成功")
        return export_data
    except Exception as e:
        logger.error(f"导出工作流状态失败: {str(e)}", exc_info=True)
        return {
            "status": "error",
            "error": str(e)
        }

def import_workflow_state(workflow, state_data: Dict[str, Any], config: Dict[str, Any]) -> bool:
    """
    导入工作流状态（模拟实现）
    
    Args:
        workflow: 工作流实例
        state_data: 要导入的状态数据
        config: 配置信息
        
    Returns:
        bool: 导入是否成功
    """
    try:
        # 这里只是一个模拟实现，实际的导入逻辑需要根据具体需求实现
        logger.info(f"导入工作流状态: {state_data.get('exported_at', 'unknown')}")
        # 在实际实现中，这里需要根据state_data恢复工作流状态
        return True
    except Exception as e:
        logger.error(f"导入工作流状态失败: {str(e)}", exc_info=True)
        return False

async def async_execute_project_workflow(workflow, initial_state: ProjectManagementState, config: Optional[Dict[str, Any]] = None):
    """
    异步执行项目工作流
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        config: 执行配置
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    # 使用asyncio创建异步任务
    loop = asyncio.get_event_loop()
    result = await loop.run_in_executor(
        None, 
        lambda: execute_project_workflow_concurrent(workflow, initial_state, config)
    )
    return result

class WorkflowVersionManager:
    """
    工作流版本管理器
    用于管理项目工作流的不同版本，支持版本控制和回滚功能
    """
    
    def __init__(self):
        self.versions: Dict[str, List[Dict[str, Any]]] = {}
        self.max_versions_per_project = 10
    
    def save_version(self, project_id: str, workflow_state: Dict[str, Any], version_name: Optional[str] = None) -> str:
        """
        保存工作流版本
        
        Args:
            project_id: 项目ID
            workflow_state: 工作流状态
            version_name: 版本名称（可选）
            
        Returns:
            str: 版本ID
        """
        if project_id not in self.versions:
            self.versions[project_id] = []
        
        version_id = f"v{len(self.versions[project_id]) + 1}"
        version_data = {
            "version_id": version_id,
            "version_name": version_name or f"Version {len(self.versions[project_id]) + 1}",
            "timestamp": datetime.now().isoformat(),
            "workflow_state": workflow_state
        }
        
        self.versions[project_id].append(version_data)
        
        # 限制版本数量
        if len(self.versions[project_id]) > self.max_versions_per_project:
            self.versions[project_id] = self.versions[project_id][-self.max_versions_per_project:]
        
        logger.info(f"项目 {project_id} 的工作流版本 {version_id} 已保存")
        return version_id
    
    def get_versions(self, project_id: str) -> List[Dict[str, Any]]:
        """
        获取项目的所有版本
        
        Args:
            project_id: 项目ID
            
        Returns:
            List[Dict[str, Any]]: 版本列表
        """
        return self.versions.get(project_id, [])
    
    def get_version(self, project_id: str, version_id: str) -> Optional[Dict[str, Any]]:
        """
        获取特定版本
        
        Args:
            project_id: 项目ID
            version_id: 版本ID
            
        Returns:
            Optional[Dict[str, Any]]: 版本数据
        """
        versions = self.versions.get(project_id, [])
        for version in versions:
            if version["version_id"] == version_id:
                return version
        return None
    
    def rollback_to_version(self, project_id: str, version_id: str) -> bool:
        """
        回滚到指定版本
        
        Args:
            project_id: 项目ID
            version_id: 版本ID
            
        Returns:
            bool: 回滚是否成功
        """
        version = self.get_version(project_id, version_id)
        if not version:
            logger.error(f"项目 {project_id} 的版本 {version_id} 不存在")
            return False
        
        logger.info(f"项目 {project_id} 已回滚到版本 {version_id}")
        return True

# 创建全局版本管理器实例
workflow_version_manager = WorkflowVersionManager()

def save_workflow_version(project_id: str, workflow_state: Dict[str, Any], version_name: Optional[str] = None) -> str:
    """
    保存工作流版本
    
    Args:
        project_id: 项目ID
        workflow_state: 工作流状态
        version_name: 版本名称（可选）
        
    Returns:
        str: 版本ID
    """
    return workflow_version_manager.save_version(project_id, workflow_state, version_name)

def get_workflow_versions(project_id: str) -> List[Dict[str, Any]]:
    """
    获取项目的所有工作流版本
    
    Args:
        project_id: 项目ID
        
    Returns:
        List[Dict[str, Any]]: 版本列表
    """
    return workflow_version_manager.get_versions(project_id)

def rollback_workflow_to_version(project_id: str, version_id: str) -> bool:
    """
    回滚工作流到指定版本
    
    Args:
        project_id: 项目ID
        version_id: 版本ID
        
    Returns:
        bool: 回滚是否成功
    """
    return workflow_version_manager.rollback_to_version(project_id, version_id)

class WorkflowMetricsCollector:
    """
    工作流指标收集器
    用于收集和管理项目工作流的执行指标
    """
    
    def __init__(self):
        self.metrics: Dict[str, List[Dict[str, Any]]] = {}
    
    def record_execution(self, project_id: str, execution_info: Dict[str, Any]):
        """
        记录工作流执行信息
        
        Args:
            project_id: 项目ID
            execution_info: 执行信息
        """
        if project_id not in self.metrics:
            self.metrics[project_id] = []
        
        metric_data = {
            "timestamp": datetime.now().isoformat(),
            "execution_info": execution_info
        }
        
        self.metrics[project_id].append(metric_data)
        
        # 限制每个项目的指标数量
        if len(self.metrics[project_id]) > 100:
            self.metrics[project_id] = self.metrics[project_id][-100:]
        
        logger.debug(f"项目 {project_id} 的执行指标已记录")
    
    def get_metrics(self, project_id: str) -> List[Dict[str, Any]]:
        """
        获取项目的工作流指标
        
        Args:
            project_id: 项目ID
            
        Returns:
            List[Dict[str, Any]]: 指标列表
        """
        return self.metrics.get(project_id, [])
    
    def get_average_execution_time(self, project_id: str) -> float:
        """
        获取项目的平均执行时间
        
        Args:
            project_id: 项目ID
            
        Returns:
            float: 平均执行时间（秒）
        """
        metrics = self.metrics.get(project_id, [])
        if not metrics:
            return 0.0
        
        total_time = sum(metric["execution_info"].get("duration_seconds", 0) for metric in metrics)
        return total_time / len(metrics)
    
    def get_average_step_count(self, project_id: str) -> float:
        """
        获取项目的平均步骤数
        
        Args:
            project_id: 项目ID
            
        Returns:
            float: 平均步骤数
        """
        metrics = self.metrics.get(project_id, [])
        if not metrics:
            return 0.0
        
        total_steps = sum(metric["execution_info"].get("step_count", 0) for metric in metrics)
        return total_steps / len(metrics)

# 创建全局指标收集器实例
workflow_metrics_collector = WorkflowMetricsCollector()

def collect_workflow_metrics(project_id: str, execution_info: Dict[str, Any]):
    """
    收集工作流执行指标
    
    Args:
        project_id: 项目ID
        execution_info: 执行信息
    """
    workflow_metrics_collector.record_execution(project_id, execution_info)

def get_workflow_metrics(project_id: str) -> List[Dict[str, Any]]:
    """
    获取工作流指标
    
    Args:
        project_id: 项目ID
        
    Returns:
        List[Dict[str, Any]]: 指标列表
    """
    return workflow_metrics_collector.get_metrics(project_id)

def get_average_execution_time(project_id: str) -> float:
    """
    获取平均执行时间
    
    Args:
        project_id: 项目ID
        
    Returns:
        float: 平均执行时间（秒）
    """
    return workflow_metrics_collector.get_average_execution_time(project_id)

def get_average_step_count(project_id: str) -> float:
    """
    获取平均步骤数
    
    Args:
        project_id: 项目ID
        
    Returns:
        float: 平均步骤数
    """
    return workflow_metrics_collector.get_average_step_count(project_id)

def pause_workflow(workflow, config: Dict[str, Any]) -> Dict[str, Any]:
    """
    暂停工作流执行
    
    Args:
        workflow: 工作流实例
        config: 配置信息
        
    Returns:
        Dict[str, Any]: 暂停状态信息
    """
    try:
        # 获取当前工作流状态
        status = get_workflow_status(workflow, config)
        if status["status"] == "error":
            return status
            
        # 添加暂停时间戳
        pause_data = {
            "paused_at": datetime.now().isoformat(),
            "workflow_status": status,
            "message": "工作流已暂停"
        }
        
        logger.info("工作流已暂停")
        return {
            "status": "paused",
            "data": pause_data
        }
    except Exception as e:
        logger.error(f"暂停工作流失败: {str(e)}", exc_info=True)
        return {
            "status": "error",
            "error": str(e)
        }

def resume_workflow(workflow, config: Dict[str, Any], state_data: Optional[Dict[str, Any]] = None) -> bool:
    """
    恢复工作流执行
    
    Args:
        workflow: 工作流实例
        config: 配置信息
        state_data: 要恢复的状态数据（可选）
        
    Returns:
        bool: 恢复是否成功
    """
    try:
        # 如果提供了状态数据，则尝试恢复状态
        if state_data and isinstance(state_data, dict):
            logger.info(f"尝试从保存的状态恢复工作流: {state_data.get('paused_at', 'unknown')}")
            # 在实际实现中，这里需要根据state_data恢复工作流状态
            # 由于LangGraph的限制，这里仅记录日志
        
        logger.info("工作流恢复执行")
        return True
    except Exception as e:
        logger.error(f"恢复工作流失败: {str(e)}", exc_info=True)
        return False

def execute_project_workflow_selector(workflow, initial_state: ProjectManagementState, config: Optional[Dict[str, Any]] = None, parallel: bool = False):
    """
    根据参数选择执行项目工作流的方式
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        config: 执行配置
        parallel: 是否使用并行执行
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    # 默认使用并发执行
    return execute_project_workflow_concurrent(workflow, initial_state, config)

def execute_project_workflow_concurrent(workflow, initial_state: ProjectManagementState, config: Optional[Dict[str, Any]] = None):
    """
    并发执行项目工作流
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        config: 执行配置
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    if config is None:
        config = {"configurable": {"thread_id": initial_state["project_id"]}}
    
    try:
        # 直接使用已编译的工作流对象，避免重复编译
        compiled_workflow = workflow
        
        # 转换配置为RunnableConfig类型
        runnable_config = RunnableConfig(**config) if config else RunnableConfig()
        
        # 记录工作流开始执行时间
        start_time = datetime.now()
        logger.info(f"开始并发执行项目工作流: {initial_state['project_id']} at {start_time}")
        
        # 异步执行工作流以支持并发
        async def run_workflow():
            final_state = None
            last_step = None
            step_count = 0
            
            # 增加执行超时检查
            max_execution_time = getattr(settings, 'WORKFLOW_MAX_EXECUTION_TIME', 3600)  # 默认1小时
            
            # 异步执行工作流
            async for step in compiled_workflow.astream(initial_state, runnable_config):
                last_step = step
                step_count += 1
                # 检查执行时间是否超时
                if (datetime.now() - start_time).total_seconds() > max_execution_time:
                    raise TimeoutError(f"工作流执行超时，超过{max_execution_time}秒")
                    
                # 记录每个步骤的执行情况
                logger.debug(f"工作流步骤 {step_count} 并发执行完成: {list(step.keys()) if isinstance(step, dict) else 'step data'}")
                
                # 检查是否有错误状态
                if isinstance(step, dict):
                    for node_result in step.values():
                        if isinstance(node_result, dict) and "error" in node_result:
                            logger.warning(f"节点执行出错: {node_result['error']}")
            
            return last_step, step_count
        
        # 在事件循环中运行异步函数
        try:
            loop = asyncio.get_event_loop()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            
        last_step, step_count = loop.run_until_complete(run_workflow())
        
        # 获取最终状态
        if hasattr(compiled_workflow, 'get_state'):
            final_state = compiled_workflow.get_state(config=runnable_config)
            final_values = final_state.values if hasattr(final_state, 'values') else final_state
        else:
            # 如果没有get_state方法，使用最后一步的状态
            final_values = last_step if last_step else initial_state
            
        # 记录工作流执行结束时间
        end_time = datetime.now()
        execution_duration = (end_time - start_time).total_seconds()
        logger.info(f"项目工作流并发执行完成: {initial_state['project_id']} at {end_time}, 耗时: {execution_duration:.2f}秒, 共执行 {step_count} 步")
        
        # 收集执行指标
        execution_info = {
            "start_time": start_time.isoformat(),
            "end_time": end_time.isoformat(),
            "duration_seconds": execution_duration,
            "step_count": step_count,
            "concurrent_execution": True
        }
        collect_workflow_metrics(initial_state["project_id"], execution_info)
            
        return {
            "success": True,
            "final_state": final_values,
            "project_id": initial_state["project_id"],
            "execution_info": execution_info
        }
    except TimeoutError as e:
        logger.error(f"并发执行项目工作流超时: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"],
            "timeout": True
        }
    except Exception as e:
        logger.error(f"并发执行项目工作流失败: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"]
        }

async def async_execute_project_workflow_concurrent(workflow, initial_state: ProjectManagementState, config: Optional[Dict[str, Any]] = None):
    """
    异步并发执行项目工作流
    
    Args:
        workflow: 工作流实例
        initial_state: 初始状态
        config: 执行配置
        
    Returns:
        Dict[str, Any]: 执行结果
    """
    if config is None:
        config = {"configurable": {"thread_id": initial_state["project_id"]}}
    
    try:
        # 直接使用已编译的工作流对象，避免重复编译
        compiled_workflow = workflow
        
        # 转换配置为RunnableConfig类型
        runnable_config = RunnableConfig(**config) if config else RunnableConfig()
        
        # 记录工作流开始执行时间
        start_time = datetime.now()
        logger.info(f"开始异步并发执行项目工作流: {initial_state['project_id']} at {start_time}")
        
        # 异步执行工作流
        final_state = None
        last_step = None
        step_count = 0
        
        # 增加执行超时检查
        max_execution_time = getattr(settings, 'WORKFLOW_MAX_EXECUTION_TIME', 3600)  # 默认1小时
        
        # 异步执行工作流
        async for step in compiled_workflow.astream(initial_state, runnable_config):
            last_step = step
            step_count += 1
            # 检查执行时间是否超时
            if (datetime.now() - start_time).total_seconds() > max_execution_time:
                raise TimeoutError(f"工作流执行超时，超过{max_execution_time}秒")
                
            # 记录每个步骤的执行情况
            logger.debug(f"工作流步骤 {step_count} 异步并发执行完成: {list(step.keys()) if isinstance(step, dict) else 'step data'}")
            
            # 检查是否有错误状态
            if isinstance(step, dict):
                for node_result in step.values():
                    if isinstance(node_result, dict) and "error" in node_result:
                        logger.warning(f"节点执行出错: {node_result['error']}")
        
        # 获取最终状态
        if hasattr(compiled_workflow, 'get_state'):
            final_state = compiled_workflow.get_state(config=runnable_config)
            final_values = final_state.values if hasattr(final_state, 'values') else final_state
        else:
            # 如果没有get_state方法，使用最后一步的状态
            final_values = last_step if last_step else initial_state
            
        # 记录工作流执行结束时间
        end_time = datetime.now()
        execution_duration = (end_time - start_time).total_seconds()
        logger.info(f"项目工作流异步并发执行完成: {initial_state['project_id']} at {end_time}, 耗时: {execution_duration:.2f}秒, 共执行 {step_count} 步")
        
        # 收集执行指标
        execution_info = {
            "start_time": start_time.isoformat(),
            "end_time": end_time.isoformat(),
            "duration_seconds": execution_duration,
            "step_count": step_count,
            "concurrent_execution": True
        }
        collect_workflow_metrics(initial_state["project_id"], execution_info)
            
        return {
            "success": True,
            "final_state": final_values,
            "project_id": initial_state["project_id"],
            "execution_info": execution_info
        }
    except TimeoutError as e:
        logger.error(f"异步并发执行项目工作流超时: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"],
            "timeout": True
        }
    except Exception as e:
        logger.error(f"异步并发执行项目工作流失败: {str(e)}", exc_info=True)
        return {
            "success": False,
            "error": str(e),
            "project_id": initial_state["project_id"]
        }
