# examples/reinforcement_learning_trading_torch.py
"""
强化学习交易系统 - PyTorch版本
使用多种强化学习算法进行智能交易决策
"""

import sys
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Optional, Any
import warnings
from collections import deque, namedtuple
import random
import math
warnings.filterwarnings('ignore')

# PyTorch相关库
try:
    import torch
    import torch.nn as nn
    import torch.optim as optim
    import torch.nn.functional as F
    from torch.distributions import Categorical, Normal
    from torch.utils.tensorboard import SummaryWriter
    import gymnasium as gym
    from gymnasium import spaces
    PYTORCH_AVAILABLE = True
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"🔥 使用设备: {device}")
    
except ImportError:
    PYTORCH_AVAILABLE = False
    print("⚠️ PyTorch或gymnasium未安装，强化学习模型将无法使用")

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.services.market_data_service import MarketDataService

# 经验回放元组
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])

class TradingEnvironment(gym.Env):
    """交易环境 - PyTorch版本"""
    
    def __init__(self, data: pd.DataFrame, initial_balance: float = 100000,
                 transaction_cost: float = 0.001, max_position: float = 1.0,
                 lookback_window: int = 30):
        super().__init__()
        
        self.data = data.reset_index(drop=True)
        self.initial_balance = initial_balance
        self.transaction_cost = transaction_cost
        self.max_position = max_position
        self.lookback_window = lookback_window
        
        # 状态空间：价格特征 + 技术指标 + 账户状态
        self.state_dim = self._calculate_state_dimension()
        
        # 动作空间：买入、卖出、持有 (连续动作)
        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=(1,), dtype=np.float32
        )
        
        # 观察空间
        self.observation_space = spaces.Box(
            low=-np.inf, high=np.inf, shape=(self.state_dim,), dtype=np.float32
        )
        
        # 初始化环境状态
        self.reset()
    
    def _calculate_state_dimension(self) -> int:
        """计算状态维度"""
        # 价格特征
        price_features = ['open', 'high', 'low', 'close', 'volume']
        
        # 技术指标
        technical_features = [
            'returns', 'volatility', 'rsi', 'macd', 'bb_position',
            'sma_5', 'sma_20', 'ema_12', 'ema_26'
        ]
        
        # 账户状态
        account_features = ['balance', 'position', 'unrealized_pnl', 'total_trades']
        
        # 时间特征
        time_features = ['day_of_week', 'month', 'quarter']
        
        return len(price_features) + len(technical_features) + len(account_features) + len(time_features)
    
    def _create_features(self) -> pd.DataFrame:
        """创建特征"""
        df = self.data.copy()
        
        # 价格特征标准化
        for col in ['open', 'high', 'low', 'close']:
            df[col] = df[col] / df['close'].shift(1) - 1
        
        # 成交量标准化
        df['volume'] = (df['volume'] - df['volume'].rolling(20).mean()) / df['volume'].rolling(20).std()
        
        # 技术指标
        df['returns'] = df['close'].pct_change()
        df['volatility'] = df['returns'].rolling(20).std()
        
        # RSI
        delta = df['close'].diff()
        gain = delta.where(delta > 0, 0)
        loss = -delta.where(delta < 0, 0)
        avg_gain = gain.rolling(14).mean()
        avg_loss = loss.rolling(14).mean()
        rs = avg_gain / avg_loss
        df['rsi'] = 100 - (100 / (1 + rs))
        df['rsi'] = (df['rsi'] - 50) / 50  # 标准化到[-1, 1]
        
        # MACD
        ema12 = df['close'].ewm(span=12).mean()
        ema26 = df['close'].ewm(span=26).mean()
        df['macd'] = (ema12 - ema26) / df['close']
        
        # 布林带位置
        sma20 = df['close'].rolling(20).mean()
        std20 = df['close'].rolling(20).std()
        df['bb_position'] = (df['close'] - sma20) / (2 * std20)
        
        # 移动平均
        df['sma_5'] = df['close'].rolling(5).mean() / df['close'] - 1
        df['sma_20'] = df['close'].rolling(20).mean() / df['close'] - 1
        df['ema_12'] = df['close'].ewm(span=12).mean() / df['close'] - 1
        df['ema_26'] = df['close'].ewm(span=26).mean() / df['close'] - 1
        
        # 时间特征
        df['day_of_week'] = (df.index % 7) / 7
        df['month'] = (df.index % 30) / 30
        df['quarter'] = (df.index % 90) / 90
        
        return df.fillna(0)
    
    def reset(self, seed=None):
        """重置环境"""
        super().reset(seed=seed)
        
        # 创建特征
        self.features = self._create_features()
        
        # 初始化账户状态
        self.balance = self.initial_balance
        self.position = 0.0  # 仓位比例 [-1, 1]
        self.total_trades = 0
        self.total_reward = 0
        self.max_drawdown = 0
        self.peak_balance = self.initial_balance
        
        # 设置时间索引
        self.current_step = self.lookback_window
        self.max_steps = len(self.data) - 1
        
        # 交易历史
        self.trade_history = []
        self.balance_history = [self.initial_balance]
        self.position_history = [0.0]
        self.reward_history = []
        
        return self._get_observation(), {}
    
    def _get_observation(self) -> np.ndarray:
        """获取当前观察状态"""
        if self.current_step >= len(self.features):
            # 返回最后一个有效状态
            self.current_step = len(self.features) - 1
        
        # 价格和技术指标特征
        feature_cols = [
            'open', 'high', 'low', 'close', 'volume',
            'returns', 'volatility', 'rsi', 'macd', 'bb_position',
            'sma_5', 'sma_20', 'ema_12', 'ema_26',
            'day_of_week', 'month', 'quarter'
        ]
        
        market_features = self.features[feature_cols].iloc[self.current_step].values
        
        # 账户状态特征
        account_features = np.array([
            self.balance / self.initial_balance - 1,  # 标准化余额
            self.position,  # 当前仓位
            self._calculate_unrealized_pnl() / self.initial_balance,  # 未实现盈亏
            self.total_trades / 100  # 标准化交易次数
        ])
        
        # 合并所有特征
        observation = np.concatenate([market_features, account_features])
        
        return observation.astype(np.float32)
    
    def _calculate_unrealized_pnl(self) -> float:
        """计算未实现盈亏"""
        if self.position == 0:
            return 0
        
        current_price = self.data.iloc[self.current_step]['close']
        if hasattr(self, 'entry_price') and self.entry_price is not None:
            return self.position * self.balance * (current_price / self.entry_price - 1)
        return 0
    
    def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, bool, Dict]:
        """执行一个时间步"""
        if self.current_step >= self.max_steps:
            return self._get_observation(), 0, True, False, {}
        
        # 解析动作
        target_position = np.clip(action[0], -self.max_position, self.max_position)
        
        # 执行交易
        reward = self._execute_trade(target_position)
        
        # 更新状态
        self.current_step += 1
        self.total_reward += reward
        
        # 更新历史记录
        self.balance_history.append(self.balance)
        self.position_history.append(self.position)
        self.reward_history.append(reward)
        
        # 更新最大回撤
        if self.balance > self.peak_balance:
            self.peak_balance = self.balance
        
        current_drawdown = (self.peak_balance - self.balance) / self.peak_balance
        self.max_drawdown = max(self.max_drawdown, current_drawdown)
        
        # 检查是否结束
        done = self.current_step >= self.max_steps or self.balance <= 0
        
        return self._get_observation(), reward, done, False, self._get_info()
    
    def _execute_trade(self, target_position: float) -> float:
        """执行交易并计算奖励"""
        current_price = self.data.iloc[self.current_step]['close']
        prev_price = self.data.iloc[self.current_step - 1]['close']
        
        # 计算位置变化
        position_change = target_position - self.position
        
        # 计算交易成本
        trade_cost = abs(position_change) * self.transaction_cost * self.balance
        
        # 执行交易
        if abs(position_change) > 1e-6:  # 有实际交易
            self.total_trades += 1
            self.balance -= trade_cost
            
            # 记录交易
            self.trade_history.append({
                'step': self.current_step,
                'action': 'buy' if position_change > 0 else 'sell',
                'position_change': position_change,
                'price': current_price,
                'cost': trade_cost
            })
            
            # 更新入场价格
            if self.position == 0:
                self.entry_price = current_price
            elif target_position == 0:
                self.entry_price = None
            else:
                # 平均成本法
                self.entry_price = (self.entry_price * abs(self.position) + 
                                  current_price * abs(position_change)) / abs(target_position)
        
        # 更新仓位
        self.position = target_position
        
        # 计算收益
        if self.position != 0:
            position_return = self.position * (current_price / prev_price - 1)
            self.balance += position_return * self.balance
        
        # 计算奖励
        reward = self._calculate_reward(current_price, prev_price, position_change, trade_cost)
        
        return reward
    
    def _calculate_reward(self, current_price: float, prev_price: float, 
                         position_change: float, trade_cost: float) -> float:
        """计算奖励函数"""
        # 基础收益奖励
        if self.position != 0:
            price_change = (current_price / prev_price - 1)
            profit_reward = self.position * price_change * 100  # 放大收益信号
        else:
            profit_reward = 0
        
        # 交易成本惩罚
        cost_penalty = trade_cost / self.balance * 1000  # 放大成本惩罚
        
        # 风险惩罚（过度交易）
        overtrading_penalty = abs(position_change) * 0.1
        
        # 持仓稳定性奖励
        stability_reward = 0.01 if abs(position_change) < 0.1 else 0
        
        # 最大回撤惩罚
        drawdown_penalty = self.max_drawdown * 10
        
        # 综合奖励
        total_reward = (profit_reward 
                       - cost_penalty 
                       - overtrading_penalty 
                       + stability_reward 
                       - drawdown_penalty)
        
        return total_reward
    
    def _get_info(self) -> Dict:
        """获取环境信息"""
        return {
            'balance': self.balance,
            'position': self.position,
            'total_trades': self.total_trades,
            'total_reward': self.total_reward,
            'max_drawdown': self.max_drawdown,
            'current_step': self.current_step,
            'unrealized_pnl': self._calculate_unrealized_pnl()
        }
    
    def render(self, mode='human'):
        """渲染环境状态"""
        if mode == 'human':
            print(f"Step: {self.current_step}, Balance: ${self.balance:.2f}, "
                  f"Position: {self.position:.2f}, Trades: {self.total_trades}")

class ReplayBuffer:
    """经验回放缓冲区"""
    
    def __init__(self, capacity: int = 100000):
        self.buffer = deque(maxlen=capacity)
        self.capacity = capacity
    
    def push(self, state: np.ndarray, action: np.ndarray, reward: float, 
             next_state: np.ndarray, done: bool):
        """添加经验"""
        experience = Experience(state, action, reward, next_state, done)
        self.buffer.append(experience)
    
    def sample(self, batch_size: int) -> List[Experience]:
        """采样批次"""
        return random.sample(self.buffer, batch_size)
    
    def __len__(self):
        return len(self.buffer)

class DQNNetwork(nn.Module):
    """DQN网络"""
    
    def __init__(self, state_dim: int, action_dim: int, hidden_dim: int = 256):
        super(DQNNetwork, self).__init__()
        
        self.network = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, action_dim)
        )
        
        # 权重初始化
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.xavier_uniform_(module.weight)
            module.bias.data.fill_(0.01)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.network(x)

class DQNAgent:
    """DQN智能体"""
    
    def __init__(self, state_dim: int, action_dim: int, device: torch.device,
                 learning_rate: float = 0.001, gamma: float = 0.99,
                 epsilon: float = 1.0, epsilon_decay: float = 0.995,
                 epsilon_min: float = 0.01, batch_size: int = 32,
                 target_update_freq: int = 1000):
        
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.device = device
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.batch_size = batch_size
        self.target_update_freq = target_update_freq
        
        # 网络
        self.q_network = DQNNetwork(state_dim, action_dim).to(device)
        self.target_network = DQNNetwork(state_dim, action_dim).to(device)
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=learning_rate)
        
        # 同步目标网络
        self.update_target_network()
        
        # 经验回放
        self.replay_buffer = ReplayBuffer()
        
        # 训练统计
        self.train_steps = 0
        self.losses = []
        
        # 动作空间（离散化连续动作）
        self.action_space = np.linspace(-1, 1, action_dim)
    
    def update_target_network(self):
        """更新目标网络"""
        self.target_network.load_state_dict(self.q_network.state_dict())
    
    def get_action(self, state: np.ndarray, training: bool = True) -> np.ndarray:
        """获取动作"""
        if training and random.random() < self.epsilon:
            # 随机动作
            action_idx = random.randint(0, self.action_dim - 1)
        else:
            # 贪婪动作
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                q_values = self.q_network(state_tensor)
                action_idx = q_values.argmax().item()
        
        return np.array([self.action_space[action_idx]])
    
    def store_experience(self, state: np.ndarray, action: np.ndarray, reward: float,
                        next_state: np.ndarray, done: bool):
        """存储经验"""
        # 找到最接近的动作索引
        action_idx = np.argmin(np.abs(self.action_space - action[0]))
        self.replay_buffer.push(state, action_idx, reward, next_state, done)
    
    def train(self):
        """训练网络"""
        if len(self.replay_buffer) < self.batch_size:
            return
        
        # 采样经验
        experiences = self.replay_buffer.sample(self.batch_size)
        
        # 准备批次数据
        states = torch.FloatTensor([e.state for e in experiences]).to(self.device)
        actions = torch.LongTensor([e.action for e in experiences]).to(self.device)
        rewards = torch.FloatTensor([e.reward for e in experiences]).to(self.device)
        next_states = torch.FloatTensor([e.next_state for e in experiences]).to(self.device)
        dones = torch.BoolTensor([e.done for e in experiences]).to(self.device)
        
        # 当前Q值
        current_q_values = self.q_network(states).gather(1, actions.unsqueeze(1))
        
        # 目标Q值
        with torch.no_grad():
            max_next_q_values = self.target_network(next_states).max(1)[0]
            target_q_values = rewards + (self.gamma * max_next_q_values * ~dones)
        
        # 计算损失
        loss = F.mse_loss(current_q_values.squeeze(), target_q_values)
        
        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.q_network.parameters(), 1.0)
        self.optimizer.step()
        
        # 更新统计信息
        self.losses.append(loss.item())
        self.train_steps += 1
        
        # 更新目标网络
        if self.train_steps % self.target_update_freq == 0:
            self.update_target_network()
        
        # 更新epsilon
        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)

class PPONetwork(nn.Module):
    """PPO网络（策略和价值网络）"""
    
    def __init__(self, state_dim: int, action_dim: int = 1, hidden_dim: int = 256):
        super(PPONetwork, self).__init__()
        
        # 共享层
        self.shared = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 策略网络（输出动作的均值和标准差）
        self.policy_mean = nn.Linear(hidden_dim, action_dim)
        self.policy_std = nn.Linear(hidden_dim, action_dim)
        
        # 价值网络
        self.value = nn.Linear(hidden_dim, 1)
        
        # 权重初始化
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.orthogonal_(module.weight, gain=0.01)
            module.bias.data.fill_(0.0)
    
    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        shared_features = self.shared(x)
        
        # 策略输出
        action_mean = torch.tanh(self.policy_mean(shared_features))
        action_std = F.softplus(self.policy_std(shared_features)) + 1e-6
        
        # 价值输出
        value = self.value(shared_features)
        
        return action_mean, action_std, value
    
    def get_action_and_value(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        action_mean, action_std, value = self.forward(x)
        
        # 创建正态分布
        dist = Normal(action_mean, action_std)
        action = dist.sample()
        action_log_prob = dist.log_prob(action)
        
        return action, action_log_prob, value, dist.entropy()

class PPOAgent:
    """PPO智能体"""
    
    def __init__(self, state_dim: int, device: torch.device,
                 learning_rate: float = 3e-4, gamma: float = 0.99,
                 clip_epsilon: float = 0.2, value_coef: float = 0.5,
                 entropy_coef: float = 0.01, max_grad_norm: float = 0.5):
        
        self.device = device
        self.gamma = gamma
        self.clip_epsilon = clip_epsilon
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        self.max_grad_norm = max_grad_norm
        
        # 网络
        self.network = PPONetwork(state_dim).to(device)
        self.optimizer = optim.Adam(self.network.parameters(), lr=learning_rate)
        
        # 经验存储
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        self.log_probs = []
        self.dones = []
        
        # 训练统计
        self.policy_losses = []
        self.value_losses = []
        self.entropy_losses = []
    
    def get_action(self, state: np.ndarray) -> Tuple[np.ndarray, float, float]:
        """获取动作"""
        with torch.no_grad():
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
            action, log_prob, value, entropy = self.network.get_action_and_value(state_tensor)
            
            # 限制动作范围
            action = torch.clamp(action, -1, 1)
            
            return action.cpu().numpy()[0], log_prob.cpu().item(), value.cpu().item()
    
    def store_experience(self, state: np.ndarray, action: np.ndarray, reward: float,
                        value: float, log_prob: float, done: bool):
        """存储经验"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.values.append(value)
        self.log_probs.append(log_prob)
        self.dones.append(done)
    
    def compute_returns_and_advantages(self, next_value: float = 0) -> Tuple[List[float], List[float]]:
        """计算回报和优势"""
        returns = []
        advantages = []
        
        # 计算折扣回报
        R = next_value
        for i in reversed(range(len(self.rewards))):
            R = self.rewards[i] + self.gamma * R * (1 - self.dones[i])
            returns.insert(0, R)
        
        # 计算优势
        for i in range(len(returns)):
            advantage = returns[i] - self.values[i]
            advantages.append(advantage)
        
        return returns, advantages
    
    def train(self, next_value: float = 0, epochs: int = 10):
        """训练网络"""
        if len(self.states) == 0:
            return
        
        # 计算回报和优势
        returns, advantages = self.compute_returns_and_advantages(next_value)
        
        # 转换为张量
        states = torch.FloatTensor(self.states).to(self.device)
        actions = torch.FloatTensor(self.actions).to(self.device)
        old_log_probs = torch.FloatTensor(self.log_probs).to(self.device)
        returns = torch.FloatTensor(returns).to(self.device)
        advantages = torch.FloatTensor(advantages).to(self.device)
        
        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
        
        # 多轮训练
        for epoch in range(epochs):
            # 前向传播
            action_mean, action_std, values = self.network(states)
            
            # 计算新的动作概率
            dist = Normal(action_mean, action_std)
            new_log_probs = dist.log_prob(actions)
            entropy = dist.entropy()
            
            # 计算比率
            ratio = torch.exp(new_log_probs - old_log_probs)
            
            # 计算策略损失
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            policy_loss = -torch.min(surr1, surr2).mean()
            
            # 计算价值损失
            value_loss = F.mse_loss(values.squeeze(), returns)
            
            # 计算熵损失
            entropy_loss = -entropy.mean()
            
            # 总损失
            total_loss = policy_loss + self.value_coef * value_loss + self.entropy_coef * entropy_loss
            
            # 反向传播
            self.optimizer.zero_grad()
            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.max_grad_norm)
            self.optimizer.step()
            
            # 记录损失
            self.policy_losses.append(policy_loss.item())
            self.value_losses.append(value_loss.item())
            self.entropy_losses.append(entropy_loss.item())
        
        # 清空经验
        self.clear_experience()
    
    def clear_experience(self):
        """清空经验"""
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()
        self.values.clear()
        self.log_probs.clear()
        self.dones.clear()

class ActorCriticNetwork(nn.Module):
    """Actor-Critic网络"""
    
    def __init__(self, state_dim: int, hidden_dim: int = 256):
        super(ActorCriticNetwork, self).__init__()
        
        # 共享特征提取层
        self.shared_layers = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # Actor网络
        self.actor = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1),
            nn.Tanh()
        )
        
        # Critic网络
        self.critic = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1)
        )
        
        # 权重初始化
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
            module.bias.data.fill_(0.0)
    
    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        shared_features = self.shared_layers(x)
        action = self.actor(shared_features)
        value = self.critic(shared_features)
        return action, value

class A2CAgent:
    """A2C智能体"""
    
    def __init__(self, state_dim: int, device: torch.device,
                 learning_rate: float = 0.001, gamma: float = 0.99,
                 value_coef: float = 0.5, entropy_coef: float = 0.01):
        
        self.device = device
        self.gamma = gamma
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        
        # 网络
        self.network = ActorCriticNetwork(state_dim).to(device)
        self.optimizer = optim.Adam(self.network.parameters(), lr=learning_rate)
        
        # 经验存储
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        self.log_probs = []
        self.dones = []
        
        # 训练统计
        self.losses = []
        self.actor_losses = []
        self.critic_losses = []
    
    def get_action(self, state: np.ndarray) -> Tuple[np.ndarray, float, float]:
        """获取动作"""
        state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
        
        with torch.no_grad():
            action, value = self.network(state_tensor)
            
            # 添加探索噪声
            noise = torch.randn_like(action) * 0.1
            action = torch.clamp(action + noise, -1, 1)
            
            # 计算动作概率（用于训练）
            log_prob = -0.5 * (noise ** 2).sum(dim=1)
            
            return action.cpu().numpy()[0], log_prob.cpu().item(), value.cpu().item()
    
    def store_experience(self, state: np.ndarray, action: np.ndarray, reward: float,
                        value: float, log_prob: float, done: bool):
        """存储经验"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.values.append(value)
        self.log_probs.append(log_prob)
        self.dones.append(done)
    
    def compute_returns(self, next_value: float = 0) -> List[float]:
        """计算折扣回报"""
        returns = []
        R = next_value
        
        for i in reversed(range(len(self.rewards))):
            R = self.rewards[i] + self.gamma * R * (1 - self.dones[i])
            returns.insert(0, R)
        
        return returns
    
    def train(self, next_value: float = 0):
        """训练网络"""
        if len(self.states) == 0:
            return
        
        # 计算回报
        returns = self.compute_returns(next_value)
        
        # 转换为张量
        states = torch.FloatTensor(self.states).to(self.device)
        actions = torch.FloatTensor(self.actions).to(self.device)
        old_log_probs = torch.FloatTensor(self.log_probs).to(self.device)
        returns = torch.FloatTensor(returns).to(self.device)
        old_values = torch.FloatTensor(self.values).to(self.device)
        
        # 前向传播
        pred_actions, pred_values = self.network(states)
        
        # 计算优势
        advantages = returns - old_values
        
        # Actor损失
        actor_loss = -(old_log_probs * advantages.detach()).mean()
        
        # Critic损失
        critic_loss = F.mse_loss(pred_values.squeeze(), returns)
        
        # 总损失
        total_loss = actor_loss + self.value_coef * critic_loss
        
        # 反向传播
        self.optimizer.zero_grad()
        total_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.0)
        self.optimizer.step()
        
        # 记录损失
        self.losses.append(total_loss.item())
        self.actor_losses.append(actor_loss.item())
        self.critic_losses.append(critic_loss.item())
        
        # 清空经验
        self.clear_experience()
    
    def clear_experience(self):
        """清空经验"""
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()
        self.values.clear()
        self.log_probs.clear()
        self.dones.clear()

class DDPGActor(nn.Module):
    """DDPG Actor网络"""
    
    def __init__(self, state_dim: int, action_dim: int = 1, hidden_dim: int = 256):
        super(DDPGActor, self).__init__()
        
        self.network = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim),
            nn.Tanh()
        )
        
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.uniform_(module.weight, -0.003, 0.003)
            module.bias.data.fill_(0.0)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.network(x)

class DDPGCritic(nn.Module):
    """DDPG Critic网络"""
    
    def __init__(self, state_dim: int, action_dim: int = 1, hidden_dim: int = 256):
        super(DDPGCritic, self).__init__()
        
        self.network = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )
        
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            torch.nn.init.uniform_(module.weight, -0.003, 0.003)
            module.bias.data.fill_(0.0)
    
    def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
        x = torch.cat([state, action], dim=1)
        return self.network(x)

class OUNoise:
    """Ornstein-Uhlenbeck噪声"""
    
    def __init__(self, size: int, mu: float = 0.0, theta: float = 0.15, sigma: float = 0.2):
        self.mu = mu * np.ones(size)
        self.theta = theta
        self.sigma = sigma
        self.state = None
        self.reset()
    
    def reset(self):
        """重置噪声"""
        self.state = np.copy(self.mu)
    
    def sample(self) -> np.ndarray:
        """生成噪声样本"""
        x = self.state
        dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
        self.state = x + dx
        return self.state

class DDPGAgent:
    """DDPG智能体"""
    
    def __init__(self, state_dim: int, device: torch.device,
                 actor_lr: float = 0.001, critic_lr: float = 0.002,
                 gamma: float = 0.99, tau: float = 0.005,
                 batch_size: int = 64, buffer_size: int = 100000):
        
        self.device = device
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        
        # 网络
        self.actor = DDPGActor(state_dim).to(device)
        self.critic = DDPGCritic(state_dim).to(device)
        self.target_actor = DDPGActor(state_dim).to(device)
        self.target_critic = DDPGCritic(state_dim).to(device)
        
        # 优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)
        
        # 初始化目标网络
        self.hard_update(self.target_actor, self.actor)
        self.hard_update(self.target_critic, self.critic)
        
        # 噪声
        self.noise = OUNoise(1)
        
        # 经验回放
        self.replay_buffer = ReplayBuffer(buffer_size)
        
        # 训练统计
        self.actor_losses = []
        self.critic_losses = []
    
    def hard_update(self, target: nn.Module, source: nn.Module):
        """硬更新目标网络"""
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data)
    
    def soft_update(self, target: nn.Module, source: nn.Module):
        """软更新目标网络"""
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - self.tau) + param.data * self.tau)
    
    def get_action(self, state: np.ndarray, training: bool = True) -> np.ndarray:
        """获取动作"""
        with torch.no_grad():
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
            action = self.actor(state_tensor).cpu().numpy()[0]
            
            if training:
                action += self.noise.sample()
                action = np.clip(action, -1, 1)
            
            return action
    
    def store_experience(self, state: np.ndarray, action: np.ndarray, reward: float,
                        next_state: np.ndarray, done: bool):
        """存储经验"""
        self.replay_buffer.push(state, action, reward, next_state, done)
    
    def train(self):
        """训练网络"""
        if len(self.replay_buffer) < self.batch_size:
            return
        
        # 采样经验
        experiences = self.replay_buffer.sample(self.batch_size)
        
        # 准备批次数据
        states = torch.FloatTensor([e.state for e in experiences]).to(self.device)
        actions = torch.FloatTensor([e.action for e in experiences]).to(self.device)
        rewards = torch.FloatTensor([e.reward for e in experiences]).to(self.device)
        next_states = torch.FloatTensor([e.next_state for e in experiences]).to(self.device)
        dones = torch.BoolTensor([e.done for e in experiences]).to(self.device)
        
        # 训练Critic
        with torch.no_grad():
            next_actions = self.target_actor(next_states)
            next_q_values = self.target_critic(next_states, next_actions)
            target_q_values = rewards + (self.gamma * next_q_values * ~dones)
        
        current_q_values = self.critic(states, actions)
        critic_loss = F.mse_loss(current_q_values, target_q_values)
        
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()
        
        # 训练Actor
        pred_actions = self.actor(states)
        actor_loss = -self.critic(states, pred_actions).mean()
        
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()
        
        # 软更新目标网络
        self.soft_update(self.target_actor, self.actor)
        self.soft_update(self.target_critic, self.critic)
        
        # 记录损失
        self.actor_losses.append(actor_loss.item())
        self.critic_losses.append(critic_loss.item())

class RLTrainer:
    """强化学习训练器"""
    
    def __init__(self, env: TradingEnvironment, device: torch.device):
        self.env = env
        self.device = device
        self.training_history = {}
        
        # 创建TensorBoard日志
        self.writer = SummaryWriter('logs/rl_training')
    
    def train_dqn(self, episodes: int = 1000, save_interval: int = 100) -> Dict:
        """训练DQN智能体"""
        print(f"🚀 开始训练DQN智能体 ({episodes}轮)...")
        
        # 创建智能体
        agent = DQNAgent(
            state_dim=self.env.state_dim,
            action_dim=21,  # 21个离散动作
            device=self.device
        )
        
        # 训练统计
        episode_rewards = []
        episode_balances = []
        episode_trades = []
        
        for episode in range(episodes):
            state, _ = self.env.reset()
            total_reward = 0
            steps = 0
            
            while True:
                # 获取动作
                action = agent.get_action(state, training=True)
                
                # 执行动作
                next_state, reward, done, _, info = self.env.step(action)
                
                # 存储经验
                agent.store_experience(state, action, reward, next_state, done)
                
                # 训练
                agent.train()
                
                # 更新状态
                state = next_state
                total_reward += reward
                steps += 1
                
                if done:
                    break
            
            # 记录统计信息
            episode_rewards.append(total_reward)
            episode_balances.append(info['balance'])
            episode_trades.append(info['total_trades'])
            
            # 记录到TensorBoard
            self.writer.add_scalar('DQN/Episode_Reward', total_reward, episode)
            self.writer.add_scalar('DQN/Final_Balance', info['balance'], episode)
            self.writer.add_scalar('DQN/Total_Trades', info['total_trades'], episode)
            self.writer.add_scalar('DQN/Max_Drawdown', info['max_drawdown'], episode)
            self.writer.add_scalar('DQN/Epsilon', agent.epsilon, episode)
            
            # 定期输出进度
            if (episode + 1) % 100 == 0:
                avg_reward = np.mean(episode_rewards[-100:])
                avg_balance = np.mean(episode_balances[-100:])
                print(f"   Episode {episode + 1}/{episodes}, "
                      f"平均奖励: {avg_reward:.2f}, "
                      f"平均余额: ${avg_balance:.2f}, "
                      f"Epsilon: {agent.epsilon:.3f}")
            
            # 保存模型
            if (episode + 1) % save_interval == 0:
                self.save_model(agent, f'dqn_episode_{episode + 1}')
        
        return {
            'agent': agent,
            'episode_rewards': episode_rewards,
            'episode_balances': episode_balances,
            'episode_trades': episode_trades
        }
    
    def train_ppo(self, episodes: int = 1000, update_interval: int = 20) -> Dict:
        """训练PPO智能体"""
        print(f"🚀 开始训练PPO智能体 ({episodes}轮)...")
        
        # 创建智能体
        agent = PPOAgent(
            state_dim=self.env.state_dim,
            device=self.device
        )
        
        # 训练统计
        episode_rewards = []
        episode_balances = []
        episode_trades = []
        
        for episode in range(episodes):
            state, _ = self.env.reset()
            total_reward = 0
            steps = 0
            
            while True:
                # 获取动作
                action, log_prob, value = agent.get_action(state)
                
                # 执行动作
                next_state, reward, done, _, info = self.env.step(action)
                
                # 存储经验
                agent.store_experience(state, action, reward, value, log_prob, done)
                
                # 更新状态
                state = next_state
                total_reward += reward
                steps += 1
                
                if done:
                    break
            
            # 定期训练
            if (episode + 1) % update_interval == 0:
                agent.train()
            
            # 记录统计信息
            episode_rewards.append(total_reward)
            episode_balances.append(info['balance'])
            episode_trades.append(info['total_trades'])
            
            # 记录到TensorBoard
            self.writer.add_scalar('PPO/Episode_Reward', total_reward, episode)
            self.writer.add_scalar('PPO/Final_Balance', info['balance'], episode)
            self.writer.add_scalar('PPO/Total_Trades', info['total_trades'], episode)
            self.writer.add_scalar('PPO/Max_Drawdown', info['max_drawdown'], episode)
            
            # 定期输出进度
            if (episode + 1) % 100 == 0:
                avg_reward = np.mean(episode_rewards[-100:])
                avg_balance = np.mean(episode_balances[-100:])
                print(f"   Episode {episode + 1}/{episodes}, "
                      f"平均奖励: {avg_reward:.2f}, "
                      f"平均余额: ${avg_balance:.2f}")
        
        return {
            'agent': agent,
            'episode_rewards': episode_rewards,
            'episode_balances': episode_balances,
            'episode_trades': episode_trades
        }
    
    def train_a2c(self, episodes: int = 1000, update_interval: int = 10) -> Dict:
        """训练A2C智能体"""
        print(f"🚀 开始训练A2C智能体 ({episodes}轮)...")
        
        # 创建智能体
        agent = A2CAgent(
            state_dim=self.env.state_dim,
            device=self.device
        )
        
        # 训练统计
        episode_rewards = []
        episode_balances = []
        episode_trades = []
        
        for episode in range(episodes):
            state, _ = self.env.reset()
            total_reward = 0
            steps = 0
            
            while True:
                # 获取动作
                action, log_prob, value = agent.get_action(state)
                
                # 执行动作
                next_state, reward, done, _, info = self.env.step(action)
                
                # 存储经验
                agent.store_experience(state, action, reward, value, log_prob, done)
                
                # 更新状态
                state = next_state
                total_reward += reward
                steps += 1
                
                if done:
                    break
            
            # 定期训练
            if (episode + 1) % update_interval == 0:
                agent.train()
            
            # 记录统计信息
            episode_rewards.append(total_reward)
            episode_balances.append(info['balance'])
            episode_trades.append(info['total_trades'])
            
            # 记录到TensorBoard
            self.writer.add_scalar('A2C/Episode_Reward', total_reward, episode)
            self.writer.add_scalar('A2C/Final_Balance', info['balance'], episode)
            self.writer.add_scalar('A2C/Total_Trades', info['total_trades'], episode)
            self.writer.add_scalar('A2C/Max_Drawdown', info['max_drawdown'], episode)
        
        return {
            'agent': agent,
            'episode_rewards': episode_rewards,
            'episode_balances': episode_balances,
            'episode_trades': episode_trades
        }
    
    def train_ddpg(self, episodes: int = 1000) -> Dict:
        """训练DDPG智能体"""
        print(f"🚀 开始训练DDPG智能体 ({episodes}轮)...")
        
        # 创建智能体
        agent = DDPGAgent(
            state_dim=self.env.state_dim,
            device=self.device
        )
        
        # 训练统计
        episode_rewards = []
        episode_balances = []
        episode_trades = []
        
        for episode in range(episodes):
            state, _ = self.env.reset()
            agent.noise.reset()
            total_reward = 0
            steps = 0
            
            while True:
                # 获取动作
                action = agent.get_action(state, training=True)
                
                # 执行动作
                next_state, reward, done, _, info = self.env.step(action)
                
                # 存储经验
                agent.store_experience(state, action, reward, next_state, done)
                
                # 训练
                agent.train()
                
                # 更新状态
                state = next_state
                total_reward += reward
                steps += 1
                
                if done:
                    break
            
            # 记录统计信息
            episode_rewards.append(total_reward)
            episode_balances.append(info['balance'])
            episode_trades.append(info['total_trades'])
            
            # 记录到TensorBoard
            self.writer.add_scalar('DDPG/Episode_Reward', total_reward, episode)
            self.writer.add_scalar('DDPG/Final_Balance', info['balance'], episode)
            self.writer.add_scalar('DDPG/Total_Trades', info['total_trades'], episode)
            self.writer.add_scalar('DDPG/Max_Drawdown', info['max_drawdown'], episode)
            
            # 定期输出进度
            if (episode + 1) % 100 == 0:
                avg_reward = np.mean(episode_rewards[-100:])
                avg_balance = np.mean(episode_balances[-100:])
                print(f"   Episode {episode + 1}/{episodes}, "
                      f"平均奖励: {avg_reward:.2f}, "
                      f"平均余额: ${avg_balance:.2f}")
        
        return {
            'agent': agent,
            'episode_rewards': episode_rewards,
            'episode_balances': episode_balances,
            'episode_trades': episode_trades
        }
    
    def save_model(self, agent: Any, name: str):
        """保存模型"""
        Path("models").mkdir(exist_ok=True)
        
        if isinstance(agent, DQNAgent):
            torch.save({
                'q_network': agent.q_network.state_dict(),
                'target_network': agent.target_network.state_dict(),
                'optimizer': agent.optimizer.state_dict(),
                'epsilon': agent.epsilon
            }, f"models/{name}.pth")
        
        elif isinstance(agent, PPOAgent):
            torch.save({
                'network': agent.network.state_dict(),
                'optimizer': agent.optimizer.state_dict()
            }, f"models/{name}.pth")
        
        elif isinstance(agent, A2CAgent):
            torch.save({
                'network': agent.network.state_dict(),
                'optimizer': agent.optimizer.state_dict()
            }, f"models/{name}.pth")
        
        elif isinstance(agent, DDPGAgent):
            torch.save({
                'actor': agent.actor.state_dict(),
                'critic': agent.critic.state_dict(),
                'target_actor': agent.target_actor.state_dict(),
                'target_critic': agent.target_critic.state_dict(),
                'actor_optimizer': agent.actor_optimizer.state_dict(),
                'critic_optimizer': agent.critic_optimizer.state_dict()
            }, f"models/{name}.pth")
        
        print(f"   💾 模型已保存: {name}.pth")

class RLEvaluator:
    """强化学习评估器"""
    
    def __init__(self, env: TradingEnvironment):
        self.env = env
    
    def evaluate_agent(self, agent: Any, episodes: int = 100) -> Dict:
        """评估智能体"""
        print(f"📊 评估智能体 ({episodes}轮)...")
        
        episode_rewards = []
        episode_balances = []
        episode_trades = []
        episode_returns = []
        episode_sharpe_ratios = []
        episode_max_drawdowns = []
        
        for episode in range(episodes):
            state, _ = self.env.reset()
            total_reward = 0
            
            while True:
                # 获取动作（不探索）
                if isinstance(agent, DQNAgent):
                    action = agent.get_action(state, training=False)
                elif isinstance(agent, (PPOAgent, A2CAgent)):
                    action, _, _ = agent.get_action(state)
                elif isinstance(agent, DDPGAgent):
                    action = agent.get_action(state, training=False)
                else:
                    action = np.array([0.0])
                
                # 执行动作
                next_state, reward, done, _, info = self.env.step(action)
                
                # 更新状态
                state = next_state
                total_reward += reward
                
                if done:
                    break
            
            # 记录统计信息
            episode_rewards.append(total_reward)
            episode_balances.append(info['balance'])
            episode_trades.append(info['total_trades'])
            
            # 计算收益率
            episode_return = (info['balance'] - self.env.initial_balance) / self.env.initial_balance
            episode_returns.append(episode_return)
            
            # 计算夏普比率
            balance_history = np.array(self.env.balance_history)
            if len(balance_history) > 1:
                daily_returns = np.diff(balance_history) / balance_history[:-1]
                if np.std(daily_returns) > 0:
                    sharpe_ratio = np.mean(daily_returns) / np.std(daily_returns) * np.sqrt(252)
                else:
                    sharpe_ratio = 0
            else:
                sharpe_ratio = 0
            
            episode_sharpe_ratios.append(sharpe_ratio)
            episode_max_drawdowns.append(info['max_drawdown'])
        
        # 计算评估指标
        evaluation_results = {
            'avg_reward': np.mean(episode_rewards),
            'std_reward': np.std(episode_rewards),
            'avg_balance': np.mean(episode_balances),
            'std_balance': np.std(episode_balances),
            'avg_trades': np.mean(episode_trades),
            'avg_return': np.mean(episode_returns),
            'std_return': np.std(episode_returns),
            'avg_sharpe_ratio': np.mean(episode_sharpe_ratios),
            'avg_max_drawdown': np.mean(episode_max_drawdowns),
            'win_rate': np.mean(np.array(episode_returns) > 0),
            'episode_rewards': episode_rewards,
            'episode_balances': episode_balances,
            'episode_returns': episode_returns
        }
        
        return evaluation_results
    
    def generate_evaluation_report(self, results: Dict, agent_name: str) -> str:
        """生成评估报告"""
        report = f"""
🎯 {agent_name} 智能体评估报告
{'='*60}
📊 基础指标:
   平均奖励: {results['avg_reward']:.2f} ± {results['std_reward']:.2f}
   平均余额: ${results['avg_balance']:.2f} ± ${results['std_balance']:.2f}
   平均交易次数: {results['avg_trades']:.1f}

💰 收益指标:
   平均收益率: {results['avg_return']:.2%}
   收益波动率: {results['std_return']:.2%}
   夏普比率: {results['avg_sharpe_ratio']:.3f}
   胜率: {results['win_rate']:.1%}

⚠️ 风险指标:
   平均最大回撤: {results['avg_max_drawdown']:.2%}

📈 表现评级:
"""
        
        # 计算评级
        score = 0
        if results['avg_return'] > 0.1:
            score += 2
        elif results['avg_return'] > 0.05:
            score += 1
        
        if results['avg_sharpe_ratio'] > 1.0:
            score += 2
        elif results['avg_sharpe_ratio'] > 0.5:
            score += 1
        
        if results['avg_max_drawdown'] < 0.1:
            score += 2
        elif results['avg_max_drawdown'] < 0.2:
            score += 1
        
        if results['win_rate'] > 0.6:
            score += 2
        elif results['win_rate'] > 0.5:
            score += 1
        
        if score >= 7:
            rating = "优秀 ⭐⭐⭐⭐⭐"
        elif score >= 5:
            rating = "良好 ⭐⭐⭐⭐"
        elif score >= 3:
            rating = "一般 ⭐⭐⭐"
        else:
            rating = "需改进 ⭐⭐"
        
        report += f"   {rating}\n"
        
        return report

def compare_rl_algorithms(results_dict: Dict[str, Dict]) -> str:
    """比较不同强化学习算法"""
    comparison_report = """
🔥 强化学习算法对比分析
{'='*60}
"""
    
    # 创建对比表格
    comparison_data = []
    for algo_name, results in results_dict.items():
        comparison_data.append([
            algo_name,
            f"{results['avg_return']:.2%}",
            f"{results['avg_sharpe_ratio']:.3f}",
            f"{results['avg_max_drawdown']:.2%}",
            f"{results['win_rate']:.1%}",
            f"{results['avg_trades']:.1f}"
        ])
    
    # 排序（按平均收益率）
    comparison_data.sort(key=lambda x: float(x[1].strip('%')), reverse=True)
    
    comparison_report += f"""
📊 算法对比表:
{'算法':<10} {'平均收益率':<10} {'夏普比率':<10} {'最大回撤':<10} {'胜率':<10} {'交易次数':<10}
{'-'*70}
"""
    
    for row in comparison_data:
        comparison_report += f"{row[0]:<10} {row[1]:<10} {row[2]:<10} {row[3]:<10} {row[4]:<10} {row[5]:<10}\n"
    
    # 算法特点分析
    comparison_report += f"""
🎯 算法特点分析:
   DQN: 离散动作空间，适合明确的买卖信号
   PPO: 策略梯度方法，训练稳定，适合连续控制
   A2C: 快速收敛，但可能不如PPO稳定
   DDPG: 连续动作空间，适合精确的仓位控制

💡 使用建议:
   • 对于初学者，建议使用PPO
   • 对于精确控制，建议使用DDPG
   • 对于快速原型，建议使用A2C
   • 对于简单策略，建议使用DQN
"""
    
    return comparison_report

def main():
    """主函数"""
    if not PYTORCH_AVAILABLE:
        print("❌ PyTorch或相关库未安装")
        return
    
    print("🚀 强化学习交易系统启动 - PyTorch版本")
    print(f"🔥 使用设备: {device}")
    
    try:
        # 1. 数据准备
        print("\n📊 准备训练数据...")
        market_service = MarketDataService()
        
        # 获取历史数据
        end_date = datetime.now()
        start_date = end_date - timedelta(days=365*2)  # 2年数据
        
        data = market_service.get_stock_data(
            symbol="AAPL",
            start_date=start_date.strftime('%Y-%m-%d'),
            end_date=end_date.strftime('%Y-%m-%d')
        )
        
        if data is None or len(data) < 100:
            print("❌ 数据获取失败或数据量不足")
            return
        
        print(f"   ✅ 获取数据: {len(data)}条记录")
        
        # 数据分割
        train_size = int(len(data) * 0.8)
        train_data = data[:train_size].copy()
        test_data = data[train_size:].copy()
        
        print(f"   📈 训练数据: {len(train_data)}条")
        print(f"   📉 测试数据: {len(test_data)}条")
        
        # 2. 创建环境
        print("\n🏗️ 创建交易环境...")
        train_env = TradingEnvironment(
            data=train_data,
            initial_balance=100000,
            transaction_cost=0.001,
            max_position=1.0,
            lookback_window=30
        )
        
        test_env = TradingEnvironment(
            data=test_data,
            initial_balance=100000,
            transaction_cost=0.001,
            max_position=1.0,
            lookback_window=30
        )
        
        print(f"   ✅ 状态维度: {train_env.state_dim}")
        print(f"   ✅ 动作空间: {train_env.action_space}")
        
        # 3. 创建训练器
        trainer = RLTrainer(train_env, device)
        evaluator = RLEvaluator(test_env)
        
        # 4. 训练不同算法
        algorithms_to_train = ['DQN', 'PPO', 'A2C', 'DDPG']
        training_results = {}
        evaluation_results = {}
        
        for algo in algorithms_to_train:
            print(f"\n{'='*60}")
            print(f"🎯 训练 {algo} 算法")
            print(f"{'='*60}")
            
            try:
                if algo == 'DQN':
                    results = trainer.train_dqn(episodes=500)
                elif algo == 'PPO':
                    results = trainer.train_ppo(episodes=500)
                elif algo == 'A2C':
                    results = trainer.train_a2c(episodes=500)
                elif algo == 'DDPG':
                    results = trainer.train_ddpg(episodes=500)
                
                training_results[algo] = results
                
                # 评估智能体
                print(f"\n📊 评估 {algo} 智能体...")
                eval_results = evaluator.evaluate_agent(results['agent'], episodes=50)
                evaluation_results[algo] = eval_results
                
                # 生成评估报告
                report = evaluator.generate_evaluation_report(eval_results, algo)
                print(report)
                
            except Exception as e:
                print(f"❌ {algo} 训练失败: {str(e)}")
                continue
        
        # 5. 算法对比
        if len(evaluation_results) > 1:
            print("\n" + "="*60)
            comparison_report = compare_rl_algorithms(evaluation_results)
            print(comparison_report)
        
        # 6. 可视化结果
        print("\n📈 生成可视化图表...")
        visualize_rl_results(training_results, evaluation_results)
        
        # 7. 保存最佳模型
        if evaluation_results:
            best_algo = max(evaluation_results.keys(), 
                          key=lambda x: evaluation_results[x]['avg_return'])
            best_agent = training_results[best_algo]['agent']
            trainer.save_model(best_agent, f'best_{best_algo.lower()}_model')
            print(f"🏆 最佳算法: {best_algo}")
            print(f"💾 最佳模型已保存")
        
        print("\n✅ 强化学习交易系统训练完成！")
        
    except Exception as e:
        print(f"❌ 系统运行错误: {str(e)}")
        import traceback
        traceback.print_exc()

def visualize_rl_results(training_results: Dict, evaluation_results: Dict):
    """可视化强化学习结果"""
    try:
        import matplotlib.pyplot as plt
        import seaborn as sns
        
        # 设置中文字体和样式
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        sns.set_style("whitegrid")
        
        # 创建图表目录
        Path("charts").mkdir(exist_ok=True)
        
        # 1. 训练奖励曲线
        plt.figure(figsize=(15, 10))
        
        for i, (algo, results) in enumerate(training_results.items(), 1):
            plt.subplot(2, 2, i)
            rewards = results['episode_rewards']
            
            # 计算移动平均
            window = min(50, len(rewards) // 10)
            if window > 1:
                moving_avg = pd.Series(rewards).rolling(window=window).mean()
                plt.plot(moving_avg, label=f'{algo} (移动平均)', linewidth=2)
            
            plt.plot(rewards, alpha=0.3, label=f'{algo} (原始)', linewidth=1)
            plt.title(f'{algo} 训练奖励曲线')
            plt.xlabel('Episode')
            plt.ylabel('Total Reward')
            plt.legend()
            plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig('charts/rl_training_rewards.png', dpi=300, bbox_inches='tight')
        plt.show()
        
        # 2. 算法性能对比
        if len(evaluation_results) > 1:
            fig, axes = plt.subplots(2, 3, figsize=(18, 12))
            
            algos = list(evaluation_results.keys())
            metrics = ['avg_return', 'avg_sharpe_ratio', 'avg_max_drawdown', 
                      'win_rate', 'avg_trades', 'std_return']
            metric_names = ['平均收益率', '夏普比率', '最大回撤', '胜率', '平均交易次数', '收益波动率']
            
            for i, (metric, name) in enumerate(zip(metrics, metric_names)):
                ax = axes[i//3, i%3]
                values = [evaluation_results[algo][metric] for algo in algos]
                
                bars = ax.bar(algos, values, alpha=0.7, 
                             color=plt.cm.Set3(np.linspace(0, 1, len(algos))))
                
                # 添加数值标签
                for bar, value in zip(bars, values):
                    height = bar.get_height()
                    if metric in ['avg_return', 'avg_max_drawdown', 'win_rate', 'std_return']:
                        label = f'{value:.2%}'
                    else:
                        label = f'{value:.3f}' if metric == 'avg_sharpe_ratio' else f'{value:.1f}'
                    
                    ax.text(bar.get_x() + bar.get_width()/2., height,
                           label, ha='center', va='bottom', fontweight='bold')
                
                ax.set_title(name, fontsize=12, fontweight='bold')
                ax.set_ylabel(name)
                
                # 格式化y轴
                if metric in ['avg_return', 'avg_max_drawdown', 'win_rate', 'std_return']:
                    ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'{x:.1%}'))
                
                ax.grid(True, alpha=0.3)
            
            plt.tight_layout()
            plt.savefig('charts/rl_algorithms_comparison.png', dpi=300, bbox_inches='tight')
            plt.show()
        
        # 3. 收益分布图
        plt.figure(figsize=(15, 8))
        
        for i, (algo, results) in enumerate(evaluation_results.items()):
            plt.subplot(2, 2, i+1)
            returns = results['episode_returns']
            
            # 直方图
            plt.hist(returns, bins=20, alpha=0.7, density=True, 
                    color=plt.cm.Set3(i), edgecolor='black', linewidth=0.5)
            
            # 添加统计线
            mean_return = np.mean(returns)
            std_return = np.std(returns)
            plt.axvline(mean_return, color='red', linestyle='--', 
                       label=f'均值: {mean_return:.2%}', linewidth=2)
            plt.axvline(mean_return + std_return, color='orange', linestyle=':', 
                       label=f'+1σ: {mean_return + std_return:.2%}', linewidth=1.5)
            plt.axvline(mean_return - std_return, color='orange', linestyle=':', 
                       label=f'-1σ: {mean_return - std_return:.2%}', linewidth=1.5)
            
            plt.title(f'{algo} 收益率分布')
            plt.xlabel('收益率')
            plt.ylabel('密度')
            plt.legend()
            plt.grid(True, alpha=0.3)
            
            # 格式化x轴
            plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'{x:.1%}'))
        
        plt.tight_layout()
        plt.savefig('charts/rl_returns_distribution.png', dpi=300, bbox_inches='tight')
        plt.show()
        
        # 4. 风险收益散点图
        if len(evaluation_results) > 1:
            plt.figure(figsize=(10, 8))
            
            for algo, results in evaluation_results.items():
                plt.scatter(results['std_return'], results['avg_return'], 
                           s=200, alpha=0.7, label=algo)
                
                # 添加算法标签
                plt.annotate(algo, 
                           (results['std_return'], results['avg_return']),
                           xytext=(5, 5), textcoords='offset points',
                           fontsize=10, fontweight='bold')
            
            plt.xlabel('收益波动率 (风险)', fontsize=12)
            plt.ylabel('平均收益率 (收益)', fontsize=12)
            plt.title('风险-收益散点图', fontsize=14, fontweight='bold')
            plt.legend()
            plt.grid(True, alpha=0.3)
            
            # 格式化坐标轴
            plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'{x:.1%}'))
            plt.gca().yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'{x:.1%}'))
            
            # 添加效率前沿线（理论）
            x_line = np.linspace(plt.xlim()[0], plt.xlim()[1], 100)
            y_line = x_line * 2  # 简单的线性关系作为参考
            plt.plot(x_line, y_line, 'k--', alpha=0.3, label='理论效率前沿')
            
            plt.tight_layout()
            plt.savefig('charts/rl_risk_return_scatter.png', dpi=300, bbox_inches='tight')
            plt.show()
        
        print("   ✅ 图表已保存到 charts/ 目录")
        
    except ImportError:
        print("   ⚠️ matplotlib未安装，跳过可视化")
    except Exception as e:
        print(f"   ❌ 可视化生成失败: {str(e)}")

def create_rl_strategy_backtest(agent: Any, test_data: pd.DataFrame, 
                               initial_balance: float = 100000) -> Dict:
    """创建强化学习策略回测"""
    print("🔄 执行强化学习策略回测...")
    
    # 创建测试环境
    test_env = TradingEnvironment(
        data=test_data,
        initial_balance=initial_balance,
        transaction_cost=0.001,
        max_position=1.0,
        lookback_window=30
    )
    
    # 执行回测
    state, _ = test_env.reset()
    
    # 记录回测数据
    backtest_data = {
        'dates': [],
        'prices': [],
        'positions': [],
        'balances': [],
        'actions': [],
        'rewards': []
    }
    
    total_reward = 0
    step = 0
    
    while True:
        # 获取动作
        if isinstance(agent, DQNAgent):
            action = agent.get_action(state, training=False)
        elif isinstance(agent, (PPOAgent, A2CAgent)):
            action, _, _ = agent.get_action(state)
        elif isinstance(agent, DDPGAgent):
            action = agent.get_action(state, training=False)
        else:
            action = np.array([0.0])
        
        # 执行动作
        next_state, reward, done, _, info = test_env.step(action)
        
        # 记录数据
        current_price = test_data.iloc[test_env.current_step-1]['close']
        backtest_data['dates'].append(test_data.index[test_env.current_step-1])
        backtest_data['prices'].append(current_price)
        backtest_data['positions'].append(test_env.position)
        backtest_data['balances'].append(test_env.balance)
        backtest_data['actions'].append(action[0])
        backtest_data['rewards'].append(reward)
        
        # 更新状态
        state = next_state
        total_reward += reward
        step += 1
        
        if done:
            break
    
    # 计算回测指标
    balance_series = pd.Series(backtest_data['balances'])
    returns = balance_series.pct_change().dropna()
    
    backtest_results = {
        'total_return': (balance_series.iloc[-1] - initial_balance) / initial_balance,
        'annualized_return': ((balance_series.iloc[-1] / initial_balance) ** (252 / len(balance_series))) - 1,
        'volatility': returns.std() * np.sqrt(252),
        'sharpe_ratio': (returns.mean() * 252) / (returns.std() * np.sqrt(252)) if returns.std() > 0 else 0,
        'max_drawdown': (balance_series.cummax() - balance_series).max() / balance_series.cummax().max(),
        'total_trades': test_env.total_trades,
        'win_rate': np.mean(returns > 0),
        'backtest_data': backtest_data
    }
    
    print(f"   📊 回测完成:")
    print(f"      总收益率: {backtest_results['total_return']:.2%}")
    print(f"      年化收益率: {backtest_results['annualized_return']:.2%}")
    print(f"      夏普比率: {backtest_results['sharpe_ratio']:.3f}")
    print(f"      最大回撤: {backtest_results['max_drawdown']:.2%}")
    print(f"      交易次数: {backtest_results['total_trades']}")
    
    return backtest_results

def plot_backtest_results(backtest_results: Dict, title: str = "强化学习策略回测"):
    """绘制回测结果"""
    try:
        import matplotlib.pyplot as plt
        
        data = backtest_results['backtest_data']
        
        fig, axes = plt.subplots(4, 1, figsize=(15, 16))
        
        # 1. 价格和仓位
        ax1 = axes[0]
        ax1_twin = ax1.twinx()
        
        ax1.plot(data['dates'], data['prices'], 'b-', label='价格', linewidth=1.5)
        ax1_twin.plot(data['dates'], data['positions'], 'r-', label='仓位', linewidth=2, alpha=0.7)
        
        ax1.set_ylabel('价格', color='b')
        ax1_twin.set_ylabel('仓位', color='r')
        ax1.set_title(f'{title} - 价格与仓位')
        ax1.grid(True, alpha=0.3)
        ax1.legend(loc='upper left')
        ax1_twin.legend(loc='upper right')
        
        # 2. 账户余额
        ax2 = axes[1]
        ax2.plot(data['dates'], data['balances'], 'g-', linewidth=2, label='账户余额')
        ax2.axhline(y=100000, color='k', linestyle='--', alpha=0.5, label='初始资金')
        ax2.set_ylabel('账户余额 ($)')
        ax2.set_title('账户余额变化')
        ax2.grid(True, alpha=0.3)
        ax2.legend()
        ax2.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'${x:,.0f}'))
        
        # 3. 动作信号
        ax3 = axes[2]
        actions = np.array(data['actions'])
        colors = ['red' if a < -0.1 else 'green' if a > 0.1 else 'gray' for a in actions]
        ax3.scatter(data['dates'], actions, c=colors, alpha=0.6, s=10)
        ax3.axhline(y=0, color='k', linestyle='-', alpha=0.3)
        ax3.axhline(y=0.5, color='g', linestyle='--', alpha=0.3, label='买入阈值')
        ax3.axhline(y=-0.5, color='r', linestyle='--', alpha=0.3, label='卖出阈值')
        ax3.set_ylabel('动作信号')
        ax3.set_title('交易动作信号')
        ax3.grid(True, alpha=0.3)
        ax3.legend()
        
        # 4. 累计奖励
        ax4 = axes[3]
        cumulative_rewards = np.cumsum(data['rewards'])
        ax4.plot(data['dates'], cumulative_rewards, 'purple', linewidth=2, label='累计奖励')
        ax4.set_ylabel('累计奖励')
        ax4.set_xlabel('日期')
        ax4.set_title('累计奖励变化')
        ax4.grid(True, alpha=0.3)
        ax4.legend()
        
        plt.tight_layout()
        plt.savefig(f'charts/rl_backtest_{title.lower().replace(" ", "_")}.png', 
                   dpi=300, bbox_inches='tight')
        plt.show()
        
    except ImportError:
        print("   ⚠️ matplotlib未安装，跳过回测图表生成")
    except Exception as e:
        print(f"   ❌ 回测图表生成失败: {str(e)}")

def run_rl_trading_demo():
    """运行强化学习交易演示"""
    print("🎮 强化学习交易演示")
    print("="*50)
    
    # 创建简单的演示数据
    np.random.seed(42)
    dates = pd.date_range('2023-01-01', periods=252, freq='D')
    
    # 生成模拟股价数据
    returns = np.random.normal(0.001, 0.02, 252)
    prices = [100]
    for r in returns:
        prices.append(prices[-1] * (1 + r))
    
    demo_data = pd.DataFrame({
        'open': prices[:-1],
        'high': [p * (1 + abs(np.random.normal(0, 0.01))) for p in prices[:-1]],
        'low': [p * (1 - abs(np.random.normal(0, 0.01))) for p in prices[:-1]],
        'close': prices[1:],
        'volume': np.random.randint(1000000, 5000000, 252)
    }, index=dates)
    
    print(f"📊 生成演示数据: {len(demo_data)}条记录")
    
    # 创建环境
    env = TradingEnvironment(demo_data, initial_balance=10000)
    
    # 创建简单的随机智能体进行演示
    class RandomAgent:
        def get_action(self, state, training=False):
            return np.array([np.random.uniform(-1, 1)])
    
    agent = RandomAgent()
    
    # 运行演示
    state, _ = env.reset()
    demo_results = {
        'balances': [env.balance],
        'positions': [env.position],
        'prices': []
    }
    
    for step in range(min(100, len(demo_data)-1)):
        action = agent.get_action(state)
        next_state, reward, done, _, info = env.step(action)
        
        demo_results['balances'].append(env.balance)
        demo_results['positions'].append(env.position)
        demo_results['prices'].append(demo_data.iloc[step]['close'])
        
        state = next_state
        if done:
            break
    
    # 显示结果
    final_balance = demo_results['balances'][-1]
    total_return = (final_balance - 10000) / 10000
    
    print(f"🎯 演示结果:")
    print(f"   初始资金: $10,000")
    print(f"   最终余额: ${final_balance:.2f}")
    print(f"   总收益率: {total_return:.2%}")
    print(f"   交易次数: {env.total_trades}")
    
    return demo_results

if __name__ == "__main__":
    print("🎯 强化学习交易系统 - PyTorch版本")
    print("🔥 支持算法: DQN, PPO, A2C, DDPG")
    print("💡 使用GPU加速训练" if torch.cuda.is_available() else "💻 使用CPU训练")
    print("="*60)
    
    # 检查是否运行演示
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == '--demo':
        run_rl_trading_demo()
    else:
        main()