# scripts/evaluate_models.py
"""
模型评估脚本
"""

import argparse
import sys
from pathlib import Path
import torch
import pandas as pd
import numpy as np
from datetime import datetime, timedelta

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from examples.reinforcement_learning_trading_torch import (
    TradingEnvironment,
    RLEvaluator,
    DQNAgent,
    PPOAgent,
    A2CAgent,
    DDPGAgent,
    create_rl_strategy_backtest,
    plot_backtest_results
)

def load_model(model_path: str, model_type: str, state_dim: int, device: torch.device):
    """加载已训练的模型"""
    model_file = Path(model_path)
    
    if not model_file.exists():
        print(f"❌ 模型文件不存在: {model_path}")
        return None
    
    try:
        checkpoint = torch.load(model_file, map_location=device)
        
        if model_type.upper() == 'DQN':
            agent = DQNAgent(state_dim=state_dim, action_dim=21, device=device)
            agent.q_network.load_state_dict(checkpoint['q_network'])
            agent.target_network.load_state_dict(checkpoint['target_network'])
            agent.epsilon = checkpoint.get('epsilon', 0.0)  # 评估时不探索
            
        elif model_type.upper() == 'PPO':
            agent = PPOAgent(state_dim=state_dim, device=device)
            agent.network.load_state_dict(checkpoint['network'])
            
        elif model_type.upper() == 'A2C':
            agent = A2CAgent(state_dim=state_dim, device=device)
            agent.network.load_state_dict(checkpoint['network'])
            
        elif model_type.upper() == 'DDPG':
            agent = DDPGAgent(state_dim=state_dim, device=device)
            agent.actor.load_state_dict(checkpoint['actor'])
            agent.critic.load_state_dict(checkpoint['critic'])
            
        else:
            print(f"❌ 不支持的模型类型: {model_type}")
            return None
        
        print(f"✅ 模型加载成功: {model_path}")
        return agent
        
    except Exception as e:
        print(f"❌ 模型加载失败: {e}")
        return None

def evaluate_single_model(model_path: str, model_type: str, test_data: pd.DataFrame, 
                         device: torch.device, episodes: int = 100):
    """评估单个模型"""
    print(f"\n📊 评估 {model_type} 模型")
    print("=" * 40)
    
    # 创建测试环境
    test_env = TradingEnvironment(
        data=test_data,
        initial_balance=100000,
        transaction_cost=0.001,
        max_position=1.0,
        lookback_window=30
    )
    
    # 加载模型
    agent = load_model(model_path, model_type, test_env.state_dim, device)
    if agent is None:
        return None
    
    # 创建评估器
    evaluator = RLEvaluator(test_env)
    
    # 评估模型
    results = evaluator.evaluate_agent(agent, episodes=episodes)
    
    # 生成报告
    report = evaluator.generate_evaluation_report(results, model_type)
    print(report)
    
    # 执行详细回测
    backtest_results = create_rl_strategy_backtest(agent, test_data)
    
    # 生成图表
    plot_backtest_results(backtest_results, f"{model_type} 策略回测")
    
    return {
        'evaluation_results': results,
        'backtest_results': backtest_results,
        'agent': agent
    }

def compare_models(model_configs: list, test_data: pd.DataFrame, device: torch.device):
    """比较多个模型"""
    print("\n🔥 模型对比分析")
    print("=" * 60)
    
    comparison_results = {}
    
    for config in model_configs:
        model_path = config['path']
        model_type = config['type']
        model_name = config.get('name', model_type)
        
        result = evaluate_single_model(model_path, model_type, test_data, device)
        if result:
            comparison_results[model_name] = result['evaluation_results']
    
    if len(comparison_results) > 1:
        # 生成对比报告
        from examples.reinforcement_learning_trading_torch import compare_rl_algorithms
        comparison_report = compare_rl_algorithms(comparison_results)
        print(comparison_report)
    
    return comparison_results

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="强化学习模型评估脚本")
    
    parser.add_argument(
        '--model', '-m',
        required=True,
        help='模型文件路径'
    )
    
    parser.add_argument(
        '--type', '-t',
        choices=['DQN', 'PPO', 'A2C', 'DDPG'],
        required=True,
        help='模型类型'
    )
    
    parser.add_argument(
        '--data', '-d',
        help='测试数据路径'
    )
    
    parser.add_argument(
        '--episodes',
        type=int,
        default=100,
        help='评估回合数'
    )
    
    parser.add_argument(
        '--compare',
        action='store_true',
        help='比较多个模型'
    )
    
    parser.add_argument(
        '--output',
        help='输出报告路径'
    )
    
    args = parser.parse_args()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"🔥 使用设备: {device}")
    
    # 准备测试数据
    if args.data:
        test_data = pd.read_csv(args.data, index_col=0, parse_dates=True)
        print(f"📊 加载测试数据: {len(test_data)}条记录")
    else:
        # 使用默认数据
        print("📊 使用默认测试数据")
        from examples.reinforcement_learning_trading_torch import MarketDataService
        
        market_service = MarketDataService()
        end_date = datetime.now()
        start_date = end_date - timedelta(days=180)
        
        test_data = market_service.get_stock_data(
            symbol="AAPL",
            start_date=start_date.strftime('%Y-%m-%d'),
            end_date=end_date.strftime('%Y-%m-%d')
        )
        
        if test_data is None:
            print("❌ 无法获取测试数据")
            return
    
    if args.compare:
        # 比较模式（需要配置多个模型）
        model_configs = [
            {'path': 'models/dqn_episode_500.pth', 'type': 'DQN', 'name': 'DQN-500'},
            {'path': 'models/ppo_episode_500.pth', 'type': 'PPO', 'name': 'PPO-500'},
            {'path': 'models/a2c_episode_500.pth', 'type': 'A2C', 'name': 'A2C-500'},
            {'path': 'models/ddpg_episode_500.pth', 'type': 'DDPG', 'name': 'DDPG-500'}
        ]
        
        # 过滤存在的模型文件
        existing_configs = []
        for config in model_configs:
            if Path(config['path']).exists():
                existing_configs.append(config)
            else:
                print(f"⚠️ 模型文件不存在: {config['path']}")
        
        if existing_configs:
            comparison_results = compare_models(existing_configs, test_data, device)
        else:
            print("❌ 没有找到可比较的模型文件")
    else:
        # 单模型评估
        result = evaluate_single_model(args.model, args.type, test_data, device, args.episodes)
        
        if result and args.output:
            # 保存评估结果
            output_path = Path(args.output)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            
            import json
            evaluation_data = {
                'model_type': args.type,
                'model_path': args.model,
                'evaluation_results': result['evaluation_results'],
                'backtest_summary': {
                    'total_return': result['backtest_results']['total_return'],
                    'annualized_return': result['backtest_results']['annualized_return'],
                    'sharpe_ratio': result['backtest_results']['sharpe_ratio'],
                    'max_drawdown': result['backtest_results']['max_drawdown'],
                    'total_trades': result['backtest_results']['total_trades']
                },
                'timestamp': datetime.now().isoformat()
            }
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(evaluation_data, f, indent=2, ensure_ascii=False)
            
            print(f"💾 评估结果已保存: {output_path}")

if __name__ == "__main__":
    main()