#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
多Agent调度策略LangGraph版 - 汇率预测系统
集成LangGraph框架，实现图状态管理、声明式工作流和智能调度
"""

import asyncio
import json
import logging
import os
import time
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Optional, TypedDict, Annotated
import operator

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score

# LangGraph imports
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from typing_extensions import TypedDict

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('langgraph_multi_agent.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 配置matplotlib中文显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False

# ==================== 状态定义 ====================

def update_dict(left: Optional[Dict], right: Optional[Dict]) -> Optional[Dict]:
    """字典更新reducer"""
    if right is None:
        return left
    if left is None:
        return right
    result = left.copy()
    result.update(right)
    return result

def update_string(left: Optional[str], right: Optional[str]) -> Optional[str]:
    """字符串更新reducer"""
    return right if right is not None else left

def update_int(left: int, right: int) -> int:
    """整数更新reducer"""
    return right

def update_float(left: float, right: float) -> float:
    """浮点数更新reducer"""
    return right

class AgentState(TypedDict):
    """LangGraph状态定义"""
    # 基本配置
    currency_pair: Annotated[str, update_string]
    prediction_days: Annotated[int, update_int]
    
    # 数据存储（使用字典格式以支持序列化）
    exchange_rate_data: Annotated[Optional[Dict[str, Any]], update_dict]
    sentiment_data: Annotated[Optional[Dict[str, Any]], update_dict]
    policy_data: Annotated[Optional[Dict[str, Any]], update_dict]
    event_data: Annotated[Optional[Dict[str, Any]], update_dict]
    
    # 分析结果
    predictions: Annotated[Optional[Dict[str, Any]], update_dict]
    impact_analysis: Annotated[Optional[Dict[str, Any]], update_dict]
    visualizations: Annotated[Optional[Dict[str, str]], update_dict]
    
    # 执行状态
    messages: Annotated[List[str], operator.add]
    error_message: Annotated[Optional[str], update_string]
    retry_count: Annotated[int, update_int]
    execution_start_time: Annotated[float, update_float]
    
    # 性能指标
    agent_metrics: Annotated[Dict[str, Dict[str, Any]], update_dict]
    
    # 最终报告
    final_report: Annotated[Optional[Dict[str, Any]], update_dict]

class SchedulingStrategy(Enum):
    """调度策略枚举"""
    SEQUENTIAL = "sequential"  # 顺序执行
    PARALLEL = "parallel"  # 并行执行
    PIPELINE = "pipeline"  # 流水线执行
    ADAPTIVE = "adaptive"  # 自适应执行
    EXPERT_COMMITTEE = "expert_committee"  # 专家委员会

# ==================== 工具函数 ====================

def add_message(state: AgentState, message: str) -> AgentState:
    """添加消息到状态"""
    messages = state.get("messages", [])
    messages.append(f"[{datetime.now().strftime('%H:%M:%S')}] {message}")
    updated_state = state.copy()
    updated_state["messages"] = messages
    return updated_state

def record_agent_metric(state: AgentState, agent_name: str, 
                       execution_time: float, success: bool, 
                       error_message: str = None) -> AgentState:
    """记录Agent性能指标"""
    metrics = state.get("agent_metrics", {})
    if agent_name not in metrics:
        metrics[agent_name] = {
            "total_executions": 0,
            "successful_executions": 0,
            "total_time": 0.0,
            "average_time": 0.0,
            "success_rate": 0.0,
            "last_execution_time": None,
            "last_error": None
        }
    
    agent_metrics = metrics[agent_name]
    agent_metrics["total_executions"] += 1
    agent_metrics["total_time"] += execution_time
    agent_metrics["average_time"] = agent_metrics["total_time"] / agent_metrics["total_executions"]
    agent_metrics["last_execution_time"] = datetime.now().isoformat()
    
    if success:
        agent_metrics["successful_executions"] += 1
        agent_metrics["last_error"] = None
    else:
        agent_metrics["last_error"] = error_message
    
    agent_metrics["success_rate"] = agent_metrics["successful_executions"] / agent_metrics["total_executions"]
    
    updated_state = state.copy()
    updated_state["agent_metrics"] = metrics
    return updated_state

# ==================== LangGraph节点函数 ====================

async def data_collector_node(state: AgentState) -> AgentState:
    """数据收集节点"""
    start_time = time.time()
    logger.info("开始执行数据收集节点...")
    
    try:
        # 并行收集所有数据
        tasks = [
            _generate_exchange_rate_data_async(state["currency_pair"]),
            _generate_sentiment_data_async(),
            _generate_policy_data_async(),
            _generate_event_data_async()
        ]
        
        exchange_data, sentiment_data, policy_data, event_data = await asyncio.gather(*tasks)
        
        execution_time = time.time() - start_time
        state = record_agent_metric(state, "data_collector", execution_time, True)
        state = add_message(state, f"数据收集完成，耗时 {execution_time:.2f}秒")
        
        # 只更新需要更新的键，避免重复设置
        updated_state = state.copy()
        updated_state["exchange_rate_data"] = exchange_data
        updated_state["sentiment_data"] = sentiment_data
        updated_state["policy_data"] = policy_data
        updated_state["event_data"] = event_data
        
        return updated_state
        
    except Exception as e:
        execution_time = time.time() - start_time
        error_msg = f"数据收集失败: {str(e)}"
        logger.error(error_msg)
        
        state = record_agent_metric(state, "data_collector", execution_time, False, error_msg)
        state = add_message(state, error_msg)
        
        updated_state = state.copy()
        updated_state["error_message"] = error_msg
        return updated_state

async def prediction_node(state: AgentState) -> AgentState:
    """预测分析节点"""
    start_time = time.time()
    logger.info("开始执行预测分析节点...")
    
    try:
        if state.get("exchange_rate_data") is None:
            raise ValueError("缺少汇率数据")
        
        # 并行训练多个模型
        tasks = [
            _train_linear_regression_async(state),
            _train_random_forest_async(state)
        ]
        
        lr_result, rf_result = await asyncio.gather(*tasks)
        
        # 创建集成预测
        ensemble_result = _create_ensemble_prediction(lr_result, rf_result)
        
        predictions = {
            'linear_regression': lr_result,
            'random_forest': rf_result,
            'ensemble': ensemble_result
        }
        
        execution_time = time.time() - start_time
        state = record_agent_metric(state, "prediction", execution_time, True)
        state = add_message(state, f"预测分析完成，耗时 {execution_time:.2f}秒")
        
        updated_state = state.copy()
        updated_state["predictions"] = predictions
        return updated_state
        
    except Exception as e:
        execution_time = time.time() - start_time
        error_msg = f"预测分析失败: {str(e)}"
        logger.error(error_msg)
        
        state = record_agent_metric(state, "prediction", execution_time, False, error_msg)
        state = add_message(state, error_msg)
        
        updated_state = state.copy()
        updated_state["error_message"] = error_msg
        return updated_state

async def impact_analysis_node(state: AgentState) -> AgentState:
    """影响因素分析节点"""
    start_time = time.time()
    logger.info("开始执行影响因素分析节点...")
    
    try:
        required_data = ["exchange_rate_data", "sentiment_data", "policy_data", "event_data"]
        if any(state.get(data) is None for data in required_data):
            raise ValueError("缺少必要的数据")
        
        # 并行分析不同影响因素
        tasks = [
            _analyze_sentiment_impact_async(state),
            _analyze_policy_impact_async(state),
            _analyze_event_impact_async(state)
        ]
        
        sentiment_impact, policy_impact, event_impact = await asyncio.gather(*tasks)
        
        # 综合影响分析
        combined_analysis = await _combine_impact_analysis_async(
            sentiment_impact, policy_impact, event_impact
        )
        
        execution_time = time.time() - start_time
        state = record_agent_metric(state, "impact_analysis", execution_time, True)
        state = add_message(state, f"影响因素分析完成，耗时 {execution_time:.2f}秒")
        
        updated_state = state.copy()
        updated_state["impact_analysis"] = combined_analysis
        return updated_state
        
    except Exception as e:
        execution_time = time.time() - start_time
        error_msg = f"影响因素分析失败: {str(e)}"
        logger.error(error_msg)
        
        state = record_agent_metric(state, "impact_analysis", execution_time, False, error_msg)
        state = add_message(state, error_msg)
        
        updated_state = state.copy()
        updated_state["error_message"] = error_msg
        return updated_state

async def visualization_node(state: AgentState) -> AgentState:
    """可视化节点"""
    start_time = time.time()
    logger.info("开始执行可视化节点...")
    
    try:
        # 确保输出目录存在
        output_dir = 'output_images_langgraph'
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        # 并行生成多个图表
        tasks = [
            _create_prediction_chart_async(state, output_dir),
            _create_impact_analysis_chart_async(state, output_dir),
            _create_model_comparison_chart_async(state, output_dir)
        ]
        
        chart_paths = await asyncio.gather(*tasks)
        
        visualizations = {
            'prediction_chart': chart_paths[0],
            'impact_chart': chart_paths[1],
            'comparison_chart': chart_paths[2]
        }
        
        execution_time = time.time() - start_time
        state = record_agent_metric(state, "visualization", execution_time, True)
        state = add_message(state, f"可视化生成完成，耗时 {execution_time:.2f}秒")
        
        updated_state = state.copy()
        updated_state["visualizations"] = visualizations
        return updated_state
        
    except Exception as e:
        execution_time = time.time() - start_time
        error_msg = f"可视化生成失败: {str(e)}"
        logger.error(error_msg)
        
        state = record_agent_metric(state, "visualization", execution_time, False, error_msg)
        state = add_message(state, error_msg)
        
        updated_state = state.copy()
        updated_state["error_message"] = error_msg
        return updated_state

async def coordinator_node(state: AgentState) -> AgentState:
    """协调节点"""
    start_time = time.time()
    logger.info("开始执行协调节点...")
    
    try:
        # 生成最终报告
        final_report = await _generate_final_report_async(state)
        
        execution_time = time.time() - start_time
        total_execution_time = time.time() - state["execution_start_time"]
        
        state = record_agent_metric(state, "coordinator", execution_time, True)
        state = add_message(state, f"协调完成，总耗时 {total_execution_time:.2f}秒")
        
        updated_state = state.copy()
        updated_state["final_report"] = final_report
        return updated_state
        
    except Exception as e:
        execution_time = time.time() - start_time
        error_msg = f"协调失败: {str(e)}"
        logger.error(error_msg)
        
        state = record_agent_metric(state, "coordinator", execution_time, False, error_msg)
        state = add_message(state, error_msg)
        
        updated_state = state.copy()
        updated_state["error_message"] = error_msg
        return updated_state

# ==================== 条件路由函数 ====================

def should_retry_data_collection(state: AgentState) -> str:
    """判断是否需要重试数据收集"""
    if state.get("error_message") and "数据收集" in state.get("error_message", ""):
        retry_count = state.get("retry_count", 0)
        if retry_count < 2:
            logger.info(f"数据收集失败，准备第 {retry_count + 1} 次重试")
            return "retry_data_collection"
    return "continue_to_prediction"

def should_retry_prediction(state: AgentState) -> str:
    """判断是否需要重试预测"""
    if state.get("error_message") and "预测" in state.get("error_message", ""):
        retry_count = state.get("retry_count", 0)
        if retry_count < 2:
            logger.info(f"预测分析失败，准备第 {retry_count + 1} 次重试")
            return "retry_prediction"
    return "continue_to_impact_analysis"

def check_parallel_completion(state: AgentState) -> str:
    """检查并行任务完成状态"""
    has_predictions = state.get("predictions") is not None
    has_impact_analysis = state.get("impact_analysis") is not None
    
    if has_predictions and has_impact_analysis:
        return "continue_to_visualization"
    elif state.get("error_message"):
        return "handle_error"
    else:
        return "wait_for_completion"

def should_continue_workflow(state: AgentState) -> str:
    """判断是否继续工作流"""
    if state.get("error_message"):
        return "end_with_error"
    return "continue_to_coordinator"

# ==================== 辅助函数 ====================

async def _generate_exchange_rate_data_async(currency_pair: str) -> Dict[str, Any]:
    """异步生成汇率数据"""
    await asyncio.sleep(0.5)  # 模拟API调用
    
    # 尝试加载真实数据
    try:
        for filename in [f'{currency_pair.lower().replace("/", "_to_")}_data.csv', 
                        'exchange_rates.csv', 'usd_to_cny_data.csv']:
            if os.path.exists(filename):
                df = pd.read_csv(filename)
                if 'date' in df.columns and 'rate' in df.columns:
                    df['date'] = pd.to_datetime(df['date'])
                    df = df.sort_values('date').reset_index(drop=True)
                    df = df.tail(100)
                    return {
                        'date': df['date'].dt.strftime('%Y-%m-%d').tolist(),
                        'rate': df['rate'].tolist()
                    }
    except Exception as e:
        logger.warning(f"加载真实汇率数据失败: {e}，使用模拟数据")
    
    # 生成模拟数据
    dates = pd.date_range(end=datetime.now(), periods=100, freq='D')
    base_rate = 7.2 if "CNY" in currency_pair else 1.1
    rates = base_rate + np.cumsum(np.random.normal(0, 0.01, 100))
    
    return {
        'date': dates.strftime('%Y-%m-%d').tolist(),
        'rate': rates.tolist()
    }

async def _generate_sentiment_data_async() -> Dict[str, Any]:
    """异步生成情感数据"""
    await asyncio.sleep(0.3)
    
    dates = pd.date_range(end=datetime.now(), periods=100, freq='D')
    sentiment_scores = np.random.normal(0, 0.5, 100)
    
    return {
        'date': dates.strftime('%Y-%m-%d').tolist(),
        'sentiment_score': sentiment_scores.tolist()
    }

async def _generate_policy_data_async() -> Dict[str, Any]:
    """异步生成政策数据"""
    await asyncio.sleep(0.3)
    
    dates = pd.date_range(end=datetime.now(), periods=100, freq='D')
    policy_impact = np.random.normal(0, 0.3, 100)
    
    return {
        'date': dates.strftime('%Y-%m-%d').tolist(),
        'policy_impact': policy_impact.tolist()
    }

async def _generate_event_data_async() -> Dict[str, Any]:
    """异步生成事件数据"""
    await asyncio.sleep(0.3)
    
    dates = pd.date_range(end=datetime.now(), periods=100, freq='D')
    event_impact = np.random.normal(0, 0.2, 100)
    
    return {
        'date': dates.strftime('%Y-%m-%d').tolist(),
        'event_impact': event_impact.tolist()
    }

async def _train_linear_regression_async(state: AgentState) -> Dict[str, Any]:
    """异步训练线性回归模型"""
    await asyncio.sleep(0.5)
    
    # 从字典格式创建DataFrame
    df = pd.DataFrame(state["exchange_rate_data"])
    df['date'] = pd.to_datetime(df['date'])
    df['rate_lag1'] = df['rate'].shift(1)
    df['rate_lag2'] = df['rate'].shift(2)
    df['rate_ma5'] = df['rate'].rolling(window=5).mean()
    df = df.dropna()
    
    X = df[['rate_lag1', 'rate_lag2', 'rate_ma5']]
    y = df['rate']
    
    model = LinearRegression()
    model.fit(X, y)
    
    # 生成未来预测
    last_values = X.iloc[-1].values
    future_predictions = []
    
    for _ in range(state["prediction_days"]):
        pred = float(model.predict([last_values])[0])
        future_predictions.append(pred)
        # 更新特征值
        last_values = np.array([pred, last_values[0], np.mean([pred] + list(last_values[:2]))])
    
    r2 = float(r2_score(y, model.predict(X)))
    
    return {
        'predictions': future_predictions,
        'r2_score': r2,
        'model_type': 'Linear Regression'
    }

async def _train_random_forest_async(state: AgentState) -> Dict[str, Any]:
    """异步训练随机森林模型"""
    await asyncio.sleep(1.0)
    
    # 从字典格式创建DataFrame
    df = pd.DataFrame(state["exchange_rate_data"])
    df['date'] = pd.to_datetime(df['date'])
    df['rate_lag1'] = df['rate'].shift(1)
    df['rate_lag2'] = df['rate'].shift(2)
    df['rate_ma5'] = df['rate'].rolling(window=5).mean()
    df['rate_std5'] = df['rate'].rolling(window=5).std()
    df = df.dropna()
    
    X = df[['rate_lag1', 'rate_lag2', 'rate_ma5', 'rate_std5']]
    y = df['rate']
    
    model = RandomForestRegressor(n_estimators=50, random_state=42)
    model.fit(X, y)
    
    # 生成未来预测
    last_values = X.iloc[-1].values
    future_predictions = []
    
    for _ in range(state["prediction_days"]):
        pred = float(model.predict([last_values])[0])
        future_predictions.append(pred)
        # 更新特征值
        last_values = np.array([
            pred, 
            last_values[0], 
            np.mean([pred] + list(last_values[:2])),
            np.std([pred] + list(last_values[:2]))
        ])
    
    r2 = float(r2_score(y, model.predict(X)))
    
    return {
        'predictions': future_predictions,
        'r2_score': r2,
        'model_type': 'Random Forest'
    }

def _create_ensemble_prediction(lr_result: Dict, rf_result: Dict) -> Dict[str, Any]:
    """创建集成预测"""
    # 基于R²分数的加权平均
    total_score = lr_result['r2_score'] + rf_result['r2_score']
    if total_score > 0:
        lr_weight = lr_result['r2_score'] / total_score
        rf_weight = rf_result['r2_score'] / total_score
    else:
        lr_weight = rf_weight = 0.5
    
    ensemble_predictions = [
        lr_weight * lr_pred + rf_weight * rf_pred
        for lr_pred, rf_pred in zip(lr_result['predictions'], rf_result['predictions'])
    ]
    
    return {
        'predictions': ensemble_predictions,
        'r2_score': max(lr_result['r2_score'], rf_result['r2_score']),
        'model_type': 'Ensemble',
        'weights': {'linear_regression': lr_weight, 'random_forest': rf_weight}
    }

async def _analyze_sentiment_impact_async(state: AgentState) -> Dict[str, Any]:
    """异步分析情感影响"""
    await asyncio.sleep(0.3)
    
    # 从字典格式创建DataFrame
    exchange_df = pd.DataFrame(state["exchange_rate_data"])
    sentiment_df = pd.DataFrame(state["sentiment_data"])
    merged_data = pd.merge(exchange_df, sentiment_df, on='date', how='inner')
    correlation = merged_data['rate'].corr(merged_data['sentiment_score'])
    
    if pd.isna(correlation):
        correlation = 0.0
    
    return {
        'correlation': float(correlation),
        'impact_strength': float(abs(correlation)),
        'factor_type': 'sentiment'
    }

async def _analyze_policy_impact_async(state: AgentState) -> Dict[str, Any]:
    """异步分析政策影响"""
    await asyncio.sleep(0.3)
    
    # 从字典格式创建DataFrame
    exchange_df = pd.DataFrame(state["exchange_rate_data"])
    policy_df = pd.DataFrame(state["policy_data"])
    merged_data = pd.merge(exchange_df, policy_df, on='date', how='inner')
    correlation = merged_data['rate'].corr(merged_data['policy_impact'])
    
    if pd.isna(correlation):
        correlation = 0.0
    
    return {
        'correlation': float(correlation),
        'impact_strength': float(abs(correlation)),
        'factor_type': 'policy'
    }

async def _analyze_event_impact_async(state: AgentState) -> Dict[str, Any]:
    """异步分析事件影响"""
    await asyncio.sleep(0.3)
    
    # 从字典格式创建DataFrame
    exchange_df = pd.DataFrame(state["exchange_rate_data"])
    event_df = pd.DataFrame(state["event_data"])
    merged_data = pd.merge(exchange_df, event_df, on='date', how='inner')
    correlation = merged_data['rate'].corr(merged_data['event_impact'])
    
    if pd.isna(correlation):
        correlation = 0.0
    
    return {
        'correlation': float(correlation),
        'impact_strength': float(abs(correlation)),
        'factor_type': 'event'
    }

async def _combine_impact_analysis_async(sentiment_impact: Dict, policy_impact: Dict, 
                                       event_impact: Dict) -> Dict[str, Any]:
    """异步综合影响分析"""
    await asyncio.sleep(0.2)
    
    total_impact = (sentiment_impact['impact_strength'] + 
                   policy_impact['impact_strength'] + 
                   event_impact['impact_strength'])
    
    if total_impact > 0:
        contributions = {
            'sentiment': sentiment_impact['impact_strength'] / total_impact,
            'policy': policy_impact['impact_strength'] / total_impact,
            'event': event_impact['impact_strength'] / total_impact
        }
    else:
        contributions = {'sentiment': 0.33, 'policy': 0.33, 'event': 0.34}
    
    return {
        'individual_impacts': {
            'sentiment': sentiment_impact,
            'policy': policy_impact,
            'event': event_impact
        },
        'contributions': contributions,
        'total_impact_strength': total_impact
    }

async def _create_prediction_chart_async(state: AgentState, output_dir: str) -> str:
    """异步创建预测图表"""
    await asyncio.sleep(0.5)
    
    plt.figure(figsize=(12, 8))
    
    # 从字典格式创建DataFrame并获取历史数据
    exchange_df = pd.DataFrame(state["exchange_rate_data"])
    exchange_df['date'] = pd.to_datetime(exchange_df['date'])
    historical_data = exchange_df.tail(30)
    plt.plot(historical_data['date'], historical_data['rate'], 
            label='历史汇率', linewidth=2, color='blue')
    
    # 预测数据
    if state.get("predictions") and 'ensemble' in state["predictions"]:
        future_dates = pd.date_range(
            start=historical_data['date'].iloc[-1] + timedelta(days=1),
            periods=state["prediction_days"],
            freq='D'
        )
        
        predictions = state["predictions"]['ensemble']['predictions']
        plt.plot(future_dates, predictions, 
                label=f'未来{state["prediction_days"]}天预测', 
                linewidth=2, color='red', linestyle='--')
        
        # 添加置信区间
        std_dev = np.std(historical_data['rate'].tail(10))
        upper_bound = np.array(predictions) + 1.96 * std_dev
        lower_bound = np.array(predictions) - 1.96 * std_dev
        
        plt.fill_between(future_dates, lower_bound, upper_bound, 
                       alpha=0.3, color='red', label='95%置信区间')
    
    plt.title(f'{state["currency_pair"]} 汇率预测 (LangGraph版)', fontsize=16, fontweight='bold')
    plt.xlabel('日期', fontsize=12)
    plt.ylabel('汇率', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.xticks(rotation=45)
    plt.tight_layout()
    
    chart_path = f'{output_dir}/prediction_chart_{state["prediction_days"]}天.png'
    plt.savefig(chart_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    return chart_path

async def _create_impact_analysis_chart_async(state: AgentState, output_dir: str) -> str:
    """异步创建影响因素分析图表"""
    await asyncio.sleep(0.3)
    
    if not state.get("impact_analysis"):
        return ""
    
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
    
    # 影响因素贡献度饼图
    contributions = state["impact_analysis"]['contributions']
    labels = ['情感因素', '政策因素', '事件因素']
    values = [contributions['sentiment'], contributions['policy'], contributions['event']]
    colors = ['#ff9999', '#66b3ff', '#99ff99']
    
    ax1.pie(values, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
    ax1.set_title('影响因素贡献度分析', fontsize=14, fontweight='bold')
    
    # 影响强度柱状图
    impacts = state["impact_analysis"]['individual_impacts']
    factors = ['情感', '政策', '事件']
    strengths = [impacts['sentiment']['impact_strength'],
                impacts['policy']['impact_strength'],
                impacts['event']['impact_strength']]
    
    # 处理NaN值
    strengths = [0.0 if pd.isna(s) else s for s in strengths]
    
    bars = ax2.bar(factors, strengths, color=colors)
    ax2.set_title('各因素影响强度', fontsize=14, fontweight='bold')
    ax2.set_ylabel('影响强度')
    
    # 添加数值标签
    for bar, strength in zip(bars, strengths):
        height = bar.get_height()
        ax2.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                f'{strength:.3f}', ha='center', va='bottom')
    
    plt.tight_layout()
    
    chart_path = f'{output_dir}/impact_analysis_chart.png'
    plt.savefig(chart_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    return chart_path

async def _create_model_comparison_chart_async(state: AgentState, output_dir: str) -> str:
    """异步创建模型比较图表"""
    await asyncio.sleep(0.4)
    
    if not state.get("predictions"):
        return ""
    
    plt.figure(figsize=(12, 8))
    
    # 比较不同模型的预测结果
    future_dates = pd.date_range(
        start=datetime.now() + timedelta(days=1),
        periods=state["prediction_days"],
        freq='D'
    )
    
    colors = {'linear_regression': 'blue', 'random_forest': 'green', 'ensemble': 'red'}
    labels = {'linear_regression': '线性回归', 'random_forest': '随机森林', 'ensemble': '集成模型'}
    
    for model_name, result in state["predictions"].items():
        if 'predictions' in result:
            plt.plot(future_dates, result['predictions'], 
                    label=f"{labels[model_name]} (R²={result['r2_score']:.3f})",
                    color=colors[model_name], linewidth=2)
    
    plt.title(f'多模型预测比较 - {state["currency_pair"]}', fontsize=16, fontweight='bold')
    plt.xlabel('日期', fontsize=12)
    plt.ylabel('预测汇率', fontsize=12)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.xticks(rotation=45)
    plt.tight_layout()
    
    chart_path = f'{output_dir}/model_comparison_{state["prediction_days"]}天.png'
    plt.savefig(chart_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    return chart_path

async def _generate_final_report_async(state: AgentState) -> Dict[str, Any]:
    """异步生成最终报告"""
    await asyncio.sleep(0.2)
    
    report = {
        'timestamp': datetime.now().isoformat(),
        'currency_pair': state["currency_pair"],
        'prediction_days': state["prediction_days"],
        'total_execution_time': time.time() - state["execution_start_time"],
        'data_quality': {
            'exchange_rate_records': len(state["exchange_rate_data"]["date"]) if state.get("exchange_rate_data") is not None else 0,
            'sentiment_records': len(state["sentiment_data"]["date"]) if state.get("sentiment_data") is not None else 0,
            'policy_records': len(state["policy_data"]["date"]) if state.get("policy_data") is not None else 0,
            'event_records': len(state["event_data"]["date"]) if state.get("event_data") is not None else 0
        }
    }
    
    if state.get("predictions"):
        report['model_performance'] = {
            model_name: {
                'r2_score': result.get('r2_score', 0),
                'model_type': result.get('model_type', 'Unknown')
            }
            for model_name, result in state["predictions"].items()
        }
    
    if state.get("impact_analysis"):
        report['impact_summary'] = state["impact_analysis"]['contributions']
    
    if state.get("agent_metrics"):
        report['agent_performance'] = state["agent_metrics"]
    
    return report

# ==================== LangGraph工作流构建器 ====================

class LangGraphMultiAgentScheduler:
    """LangGraph多Agent调度器"""
    
    def __init__(self, strategy: SchedulingStrategy = SchedulingStrategy.ADAPTIVE):
        self.strategy = strategy
        self.memory = MemorySaver()
        self.workflow = self._build_workflow()
        
    def _build_workflow(self) -> StateGraph:
        """构建工作流图"""
        workflow = StateGraph(AgentState)
        
        if self.strategy == SchedulingStrategy.SEQUENTIAL:
            return self._build_sequential_workflow(workflow)
        elif self.strategy == SchedulingStrategy.PARALLEL:
            return self._build_parallel_workflow(workflow)
        elif self.strategy == SchedulingStrategy.PIPELINE:
            return self._build_pipeline_workflow(workflow)
        elif self.strategy == SchedulingStrategy.EXPERT_COMMITTEE:
            return self._build_expert_committee_workflow(workflow)
        else:  # ADAPTIVE
            return self._build_adaptive_workflow(workflow)
    
    def _build_sequential_workflow(self, workflow: StateGraph) -> StateGraph:
        """构建顺序执行工作流"""
        logger.info("构建顺序执行工作流")
        
        # 添加节点
        workflow.add_node("data_collector", data_collector_node)
        workflow.add_node("prediction", prediction_node)
        workflow.add_node("impact_analyzer", impact_analysis_node)
        workflow.add_node("visualization", visualization_node)
        workflow.add_node("coordinator", coordinator_node)
        
        # 设置入口点
        workflow.set_entry_point("data_collector")
        
        # 添加边
        workflow.add_edge("data_collector", "prediction")
        workflow.add_edge("prediction", "impact_analyzer")
        workflow.add_edge("impact_analyzer", "visualization")
        workflow.add_edge("visualization", "coordinator")
        workflow.add_edge("coordinator", END)
        
        return workflow.compile(checkpointer=self.memory)
    
    def _build_parallel_workflow(self, workflow: StateGraph) -> StateGraph:
        """构建并行执行工作流"""
        logger.info("构建并行执行工作流")
        
        # 添加节点
        workflow.add_node("data_collector", data_collector_node)
        workflow.add_node("prediction", prediction_node)
        workflow.add_node("impact_analyzer", impact_analysis_node)
        workflow.add_node("visualization", visualization_node)
        workflow.add_node("coordinator", coordinator_node)
        
        # 设置入口点
        workflow.set_entry_point("data_collector")
        
        # 数据收集后并行执行预测和影响分析
        workflow.add_edge("data_collector", "prediction")
        workflow.add_edge("data_collector", "impact_analyzer")
        
        # 两个并行任务完成后进行可视化
        workflow.add_edge(["prediction", "impact_analyzer"], "visualization")
        workflow.add_edge("visualization", "coordinator")
        workflow.add_edge("coordinator", END)
        
        return workflow.compile(checkpointer=self.memory)
    
    def _build_pipeline_workflow(self, workflow: StateGraph) -> StateGraph:
        """构建流水线执行工作流"""
        logger.info("构建流水线执行工作流")
        
        # 添加节点
        workflow.add_node("data_collector", data_collector_node)
        workflow.add_node("prediction", prediction_node)
        workflow.add_node("impact_analyzer", impact_analysis_node)
        workflow.add_node("visualization", visualization_node)
        workflow.add_node("coordinator", coordinator_node)
        
        # 设置入口点
        workflow.set_entry_point("data_collector")
        
        # 流水线式执行：数据收集 -> 预测 -> 影响分析 -> 可视化 -> 协调
        workflow.add_edge("data_collector", "prediction")
        workflow.add_edge("prediction", "impact_analyzer")
        workflow.add_edge("impact_analyzer", "visualization")
        workflow.add_edge("visualization", "coordinator")
        workflow.add_edge("coordinator", END)
        
        return workflow.compile(checkpointer=self.memory)
    
    def _build_expert_committee_workflow(self, workflow: StateGraph) -> StateGraph:
        """构建专家委员会工作流"""
        logger.info("构建专家委员会工作流")
        
        # 添加节点
        workflow.add_node("data_collector", data_collector_node)
        workflow.add_node("prediction", prediction_node)
        workflow.add_node("impact_analyzer", impact_analysis_node)
        workflow.add_node("visualization", visualization_node)
        workflow.add_node("coordinator", coordinator_node)
        
        # 设置入口点
        workflow.set_entry_point("data_collector")
        
        # 专家委员会：数据收集后并行执行多个专家分析
        workflow.add_edge("data_collector", "prediction")
        workflow.add_edge("data_collector", "impact_analyzer")
        
        # 专家分析完成后进行可视化和协调
        workflow.add_edge(["prediction", "impact_analyzer"], "visualization")
        workflow.add_edge("visualization", "coordinator")
        workflow.add_edge("coordinator", END)
        
        return workflow.compile(checkpointer=self.memory)
    
    def _build_adaptive_workflow(self, workflow: StateGraph) -> StateGraph:
        """构建自适应工作流"""
        logger.info("构建自适应工作流")
        
        # 添加节点
        workflow.add_node("data_collector", data_collector_node)
        workflow.add_node("prediction", prediction_node)
        workflow.add_node("impact_analyzer", impact_analysis_node)
        workflow.add_node("visualization", visualization_node)
        workflow.add_node("coordinator", coordinator_node)
        
        # 重试节点
        workflow.add_node("retry_data_collection", self._retry_data_collection_node)
        workflow.add_node("retry_prediction", self._retry_prediction_node)
        
        # 设置入口点
        workflow.set_entry_point("data_collector")
        
        # 添加条件路由
        workflow.add_conditional_edges(
            "data_collector",
            should_retry_data_collection,
            {
                "retry_data_collection": "retry_data_collection",
                "continue_to_prediction": "prediction"
            }
        )
        
        workflow.add_edge("retry_data_collection", "prediction")
        
        workflow.add_conditional_edges(
            "prediction",
            should_retry_prediction,
            {
                "retry_prediction": "retry_prediction",
                "continue_to_impact_analysis": "impact_analyzer"
            }
        )
        
        workflow.add_edge("retry_prediction", "impact_analyzer")
        workflow.add_edge("impact_analyzer", "visualization")
        
        workflow.add_conditional_edges(
            "visualization",
            should_continue_workflow,
            {
                "continue_to_coordinator": "coordinator",
                "end_with_error": END
            }
        )
        
        workflow.add_edge("coordinator", END)
        
        return workflow.compile(checkpointer=self.memory)
    
    async def _retry_data_collection_node(self, state: AgentState) -> AgentState:
        """重试数据收集节点"""
        retry_count = state.get("retry_count", 0) + 1
        updated_state = state.copy()
        updated_state["retry_count"] = retry_count
        updated_state["error_message"] = None
        updated_state = add_message(updated_state, f"开始第 {retry_count} 次数据收集重试")
        return await data_collector_node(updated_state)
    
    async def _retry_prediction_node(self, state: AgentState) -> AgentState:
        """重试预测节点"""
        retry_count = state.get("retry_count", 0) + 1
        updated_state = state.copy()
        updated_state["retry_count"] = retry_count
        updated_state["error_message"] = None
        updated_state = add_message(updated_state, f"开始第 {retry_count} 次预测重试")
        return await prediction_node(updated_state)
    
    async def execute_workflow(self, initial_state: AgentState) -> AgentState:
        """执行工作流"""
        logger.info(f"开始执行LangGraph工作流，策略: {self.strategy.value}")
        
        # 创建状态副本并添加执行开始时间
        execution_state = initial_state.copy()
        execution_state["execution_start_time"] = time.time()
        execution_state["messages"] = []
        execution_state["retry_count"] = 0
        execution_state["agent_metrics"] = {}
        
        try:
            # 执行工作流
            config = {"configurable": {"thread_id": "main_thread"}}
            result = await self.workflow.ainvoke(execution_state, config=config)
            
            logger.info("LangGraph工作流执行完成")
            return result
            
        except Exception as e:
            error_msg = f"工作流执行失败: {str(e)}"
            logger.error(error_msg)
            updated_state = execution_state.copy()
            updated_state["error_message"] = error_msg
            return updated_state
    
    def get_workflow_graph(self) -> str:
        """获取工作流图的可视化表示"""
        try:
            return self.workflow.get_graph().draw_mermaid()
        except Exception as e:
            logger.warning(f"无法生成工作流图: {e}")
            return "工作流图生成失败"

# ==================== 结果展示函数 ====================

def print_results(state: AgentState, scheduler: LangGraphMultiAgentScheduler):
    """打印结果"""
    print("\n" + "="*80)
    print("🚀 LangGraph多Agent调度系统 - 汇率预测结果")
    print("="*80)
    
    # 基本信息
    print(f"\n📊 预测信息:")
    print(f"   货币对: {state['currency_pair']}")
    print(f"   预测天数: {state['prediction_days']}天")
    print(f"   调度策略: {scheduler.strategy.value}")
    
    # 执行消息
    if state.get("messages"):
        print(f"\n📝 执行日志:")
        for message in state["messages"][-5:]:  # 显示最后5条消息
            print(f"   {message}")
    
    # 数据质量报告
    if state.get("final_report"):
        report = state["final_report"]
        data_quality = report.get('data_quality', {})
        print(f"\n📈 数据质量:")
        print(f"   汇率数据: {data_quality.get('exchange_rate_records', 0)} 条记录")
        print(f"   情感数据: {data_quality.get('sentiment_records', 0)} 条记录")
        print(f"   政策数据: {data_quality.get('policy_records', 0)} 条记录")
        print(f"   事件数据: {data_quality.get('event_records', 0)} 条记录")
        print(f"   总执行时间: {report.get('total_execution_time', 0):.2f}秒")
    
    # 预测结果
    if state.get("predictions"):
        print(f"\n🎯 预测结果:")
        for model_name, result in state["predictions"].items():
            if 'predictions' in result:
                r2_score = result.get('r2_score', 0)
                model_type = result.get('model_type', 'Unknown')
                predictions = result['predictions']
                
                print(f"\n   {model_type}:")
                print(f"     R² 分数: {r2_score:.4f}")
                print(f"     未来{len(predictions)}天预测: {[f'{p:.4f}' for p in predictions[:3]]}...")
                
                if model_name == 'ensemble' and 'weights' in result:
                    weights = result['weights']
                    print(f"     模型权重: LR={weights['linear_regression']:.3f}, RF={weights['random_forest']:.3f}")
    
    # 详细预测汇率结果显示
    if state.get("predictions") and 'ensemble' in state["predictions"]:
        print(f"\n💰 详细预测汇率结果 ({state['currency_pair']}):")
        ensemble_predictions = state["predictions"]['ensemble']['predictions']
        
        # 生成未来日期
        from datetime import datetime, timedelta
        start_date = datetime.now() + timedelta(days=1)
        
        print(f"\n   📅 未来{len(ensemble_predictions)}天汇率预测:")
        print(f"   {'日期':<12} {'预测汇率':<10} {'变化趋势':<8}")
        print(f"   {'-'*12} {'-'*10} {'-'*8}")
        
        for i, prediction in enumerate(ensemble_predictions):
            date = start_date + timedelta(days=i)
            date_str = date.strftime('%m-%d')
            
            # 计算变化趋势
            if i == 0:
                # 与当前汇率比较（假设当前汇率是历史数据的最后一个值）
                if state.get("exchange_rate_data"):
                    current_rate = state["exchange_rate_data"]["rate"][-1]
                    change = prediction - current_rate
                else:
                    change = 0
            else:
                change = prediction - ensemble_predictions[i-1]
            
            trend = "📈" if change > 0 else "📉" if change < 0 else "➡️"
            change_str = f"{change:+.4f}"
            
            print(f"   {date_str:<12} {prediction:<10.4f} {trend} {change_str}")
        
        # 显示预测统计信息
        min_rate = min(ensemble_predictions)
        max_rate = max(ensemble_predictions)
        avg_rate = sum(ensemble_predictions) / len(ensemble_predictions)
        
        print(f"\n   📊 预测统计:")
        print(f"   最低预测汇率: {min_rate:.4f}")
        print(f"   最高预测汇率: {max_rate:.4f}")
        print(f"   平均预测汇率: {avg_rate:.4f}")
        print(f"   汇率波动范围: {max_rate - min_rate:.4f}")
        
        # 显示投资建议
        if state.get("exchange_rate_data"):
            current_rate = state["exchange_rate_data"]["rate"][-1]
            future_avg = avg_rate
            
            print(f"\n   💡 投资建议:")
            if future_avg > current_rate * 1.01:  # 上涨超过1%
                print(f"   📈 汇率预期上涨 {((future_avg/current_rate-1)*100):.2f}%，建议关注买入机会")
            elif future_avg < current_rate * 0.99:  # 下跌超过1%
                print(f"   📉 汇率预期下跌 {((1-future_avg/current_rate)*100):.2f}%，建议谨慎操作")
            else:
                print(f"   ➡️ 汇率预期相对稳定，波动幅度较小")
    
    # 影响因素分析
    if state.get("impact_analysis"):
        print(f"\n🔍 影响因素分析:")
        contributions = state["impact_analysis"]['contributions']
        print(f"   情感因素贡献: {contributions['sentiment']:.1%}")
        print(f"   政策因素贡献: {contributions['policy']:.1%}")
        print(f"   事件因素贡献: {contributions['event']:.1%}")
        
        total_impact = state["impact_analysis"].get('total_impact_strength', 0)
        print(f"   总体影响强度: {total_impact:.4f}")
    
    # 可视化文件
    if state.get("visualizations"):
        print(f"\n📊 生成的图表:")
        for chart_type, path in state["visualizations"].items():
            if path and os.path.exists(path):
                print(f"   {chart_type}: {path}")
    
    # Agent性能指标
    if state.get("agent_metrics"):
        print(f"\n⚡ Agent性能指标:")
        for agent_name, metrics in state["agent_metrics"].items():
            print(f"   {agent_name}:")
            print(f"     平均执行时间: {metrics['average_time']:.2f}秒")
            print(f"     成功率: {metrics['success_rate']:.1%}")
            print(f"     执行次数: {metrics['total_executions']}")
    
    print("\n" + "="*80)
    
    if state.get("error_message"):
        print(f"❌ 执行过程中出现错误: {state['error_message']}")
    else:
        print("✅ LangGraph多Agent调度系统执行完成！")
    
    print("="*80)

# ==================== 主函数 ====================

async def main():
    """主函数"""
    print("🚀 欢迎使用LangGraph多Agent调度系统！")
    print("\n可用的调度策略:")
    print("1. sequential - 顺序执行")
    print("2. parallel - 并行执行")
    print("3. pipeline - 流水线执行")
    print("4. expert_committee - 专家委员会")
    print("5. adaptive - 自适应调度")
    
    # 获取用户输入
    try:
        strategy_input = input("\n请选择调度策略 (1-5, 默认为5-自适应): ").strip()
        strategy_map = {
            '1': SchedulingStrategy.SEQUENTIAL,
            '2': SchedulingStrategy.PARALLEL,
            '3': SchedulingStrategy.PIPELINE,
            '4': SchedulingStrategy.EXPERT_COMMITTEE,
            '5': SchedulingStrategy.ADAPTIVE
        }
        strategy = strategy_map.get(strategy_input, SchedulingStrategy.ADAPTIVE)
        
        prediction_days = input("请输入预测天数 (默认7天): ").strip()
        prediction_days = int(prediction_days) if prediction_days.isdigit() else 7
        
        currency_pair = input("请输入货币对 (默认USD/CNY): ").strip()
        currency_pair = currency_pair if currency_pair else "USD/CNY"
        
    except KeyboardInterrupt:
        print("\n用户取消操作")
        return
    except Exception as e:
        print(f"输入错误: {e}，使用默认设置")
        strategy = SchedulingStrategy.ADAPTIVE
        prediction_days = 7
        currency_pair = "USD/CNY"
    
    # 创建调度器和初始状态
    scheduler = LangGraphMultiAgentScheduler(strategy)
    initial_state: AgentState = {
        "currency_pair": currency_pair,
        "prediction_days": prediction_days,
        "exchange_rate_data": None,
        "sentiment_data": None,
        "policy_data": None,
        "event_data": None,
        "predictions": None,
        "impact_analysis": None,
        "visualizations": None,
        "messages": [],
        "error_message": None,
        "retry_count": 0,
        "execution_start_time": 0.0,
        "agent_metrics": {},
        "final_report": None
    }
    
    # 执行工作流
    print(f"\n🔄 开始执行LangGraph工作流...")
    print(f"调度策略: {strategy.value}")
    print(f"货币对: {currency_pair}")
    print(f"预测天数: {prediction_days}")
    
    final_state = await scheduler.execute_workflow(initial_state)
    
    # 打印结果
    print_results(final_state, scheduler)
    
    # 询问是否要查看工作流图
    try:
        show_graph = input("\n是否查看工作流图? (y/n, 默认n): ").strip().lower()
        if show_graph == 'y':
            print("\n📊 工作流图 (Mermaid格式):")
            print(scheduler.get_workflow_graph())
    except KeyboardInterrupt:
        pass
    
    # 询问是否要查看详细报告
    try:
        show_detailed = input("\n是否查看详细报告? (y/n, 默认n): ").strip().lower()
        if show_detailed == 'y' and final_state.get("final_report"):
            print("\n📊 详细报告:")
            print(json.dumps(final_state["final_report"], indent=2, ensure_ascii=False, default=str))
    except KeyboardInterrupt:
        pass

if __name__ == '__main__':
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    except Exception as e:
        logger.error(f"程序执行出错: {str(e)}")
        print(f"\n❌ 程序执行出错: {str(e)}")