# scripts/run_rl_training.py
"""
运行强化学习训练脚本
"""

import argparse
import sys
from pathlib import Path
import yaml
import torch
import numpy as np
from datetime import datetime

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from examples.reinforcement_learning_trading_torch import (
    main as run_main,
    run_rl_trading_demo,
    TradingEnvironment,
    RLTrainer,
    DQNAgent,
    PPOAgent,
    A2CAgent,
    DDPGAgent
)

def load_config(config_path: str = "config/rl_config.yaml"):
    """加载配置文件"""
    config_file = Path(config_path)
    
    if not config_file.exists():
        print(f"❌ 配置文件不存在: {config_path}")
        return None
    
    try:
        with open(config_file, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        print(f"✅ 配置文件加载成功: {config_path}")
        return config
    except Exception as e:
        print(f"❌ 配置文件加载失败: {e}")
        return None

def setup_device(config: dict):
    """设置计算设备"""
    if config.get('device', {}).get('use_cuda', True) and torch.cuda.is_available():
        device_id = config.get('device', {}).get('cuda_device', 0)
        device = torch.device(f'cuda:{device_id}')
        print(f"🔥 使用GPU: {torch.cuda.get_device_name(device_id)}")
    else:
        device = torch.device('cpu')
        print("💻 使用CPU")
    
    return device

def train_single_algorithm(algo_name: str, config: dict, env: TradingEnvironment, device: torch.device):
    """训练单个算法"""
    print(f"\n🎯 训练 {algo_name} 算法")
    print("=" * 50)
    
    trainer = RLTrainer(env, device)
    
    try:
        if algo_name.upper() == 'DQN':
            results = trainer.train_dqn(
                episodes=config.get('training', {}).get('episodes', 1000),
                save_interval=config.get('training', {}).get('save_interval', 100)
            )
        elif algo_name.upper() == 'PPO':
            results = trainer.train_ppo(
                episodes=config.get('training', {}).get('episodes', 1000),
                update_interval=config.get('ppo', {}).get('update_interval', 20)
            )
        elif algo_name.upper() == 'A2C':
            results = trainer.train_a2c(
                episodes=config.get('training', {}).get('episodes', 1000),
                update_interval=config.get('a2c', {}).get('update_interval', 10)
            )
        elif algo_name.upper() == 'DDPG':
            results = trainer.train_ddpg(
                episodes=config.get('training', {}).get('episodes', 1000)
            )
        else:
            print(f"❌ 不支持的算法: {algo_name}")
            return None
        
        print(f"✅ {algo_name} 训练完成")
        return results
        
    except Exception as e:
        print(f"❌ {algo_name} 训练失败: {e}")
        import traceback
        traceback.print_exc()
        return None

def run_training(args, config: dict):
    """运行训练"""
    print("🚀 开始强化学习训练")
    print("=" * 60)
    
    # 设置设备
    device = setup_device(config)
    
    # 设置随机种子
    if args.seed is not None:
        torch.manual_seed(args.seed)
        np.random.seed(args.seed)
        print(f"🎲 设置随机种子: {args.seed}")
    
    try:
        # 准备数据（这里使用示例数据）
        from examples.reinforcement_learning_trading_torch import MarketDataService
        from datetime import timedelta
        
        market_service = MarketDataService()
        end_date = datetime.now()
        start_date = end_date - timedelta(days=config.get('data', {}).get('train_period', 730))
        
        # 获取数据
        data = market_service.get_stock_data(
            symbol=config.get('data', {}).get('symbol', 'AAPL'),
            start_date=start_date.strftime('%Y-%m-%d'),
            end_date=end_date.strftime('%Y-%m-%d')
        )
        
        if data is None or len(data) < 100:
            print("❌ 数据获取失败，使用演示数据")
            return run_rl_trading_demo()
        
        # 创建环境
        env_config = config.get('environment', {})
        env = TradingEnvironment(
            data=data,
            initial_balance=env_config.get('initial_balance', 100000),
            transaction_cost=env_config.get('transaction_cost', 0.001),
            max_position=env_config.get('max_position', 1.0),
            lookback_window=env_config.get('lookback_window', 30)
        )
        
        print(f"📊 数据准备完成: {len(data)}条记录")
        
        # 训练指定算法
        if args.algorithm:
            results = train_single_algorithm(args.algorithm, config, env, device)
            if results:
                print(f"🎉 {args.algorithm} 训练成功完成！")
            else:
                print(f"❌ {args.algorithm} 训练失败")
        else:
            # 训练所有算法
            algorithms = ['DQN', 'PPO', 'A2C', 'DDPG']
            for algo in algorithms:
                results = train_single_algorithm(algo, config, env, device)
                if results:
                    print(f"✅ {algo} 完成")
                else:
                    print(f"❌ {algo} 失败")
        
        print("\n🎯 训练任务完成！")
        
    except Exception as e:
        print(f"❌ 训练过程出错: {e}")
        import traceback
        traceback.print_exc()

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="强化学习交易系统训练脚本")
    
    parser.add_argument(
        '--algorithm', '-a',
        choices=['DQN', 'PPO', 'A2C', 'DDPG'],
        help='指定训练算法'
    )
    
    parser.add_argument(
        '--config', '-c',
        default='config/rl_config.yaml',
        help='配置文件路径'
    )
    
    parser.add_argument(
        '--demo',
        action='store_true',
        help='运行演示模式'
    )
    
    parser.add_argument(
        '--seed',
        type=int,
        default=42,
        help='随机种子'
    )
    
    parser.add_argument(
        '--episodes',
        type=int,
        help='训练回合数'
    )
    
    args = parser.parse_args()
    
    # 演示模式
    if args.demo:
        print("🎮 运行演示模式")
        run_rl_trading_demo()
        return
    
    # 加载配置
    config = load_config(args.config)
    if config is None:
        print("❌ 配置文件加载失败，使用默认配置")
        config = {
            'environment': {
                'initial_balance': 100000,
                'transaction_cost': 0.001,
                'max_position': 1.0,
                'lookback_window': 30
            },
            'training': {
                'episodes': args.episodes or 1000,
                'save_interval': 100
            },
            'device': {
                'use_cuda': True,
                'cuda_device': 0
            }
        }
    
    # 覆盖配置
    if args.episodes:
        config['training']['episodes'] = args.episodes
    
    # 运行训练
    run_training(args, config)

if __name__ == "__main__":
    main()
