# examples/deep_learning_forecasting_torch.py
"""
深度学习时间序列预测示例 - PyTorch版本
使用多种深度学习模型预测股价和金融时间序列
"""

import sys
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Optional
import warnings
warnings.filterwarnings('ignore')

# PyTorch相关库
try:
    import torch
    import torch.nn as nn
    import torch.optim as optim
    from torch.utils.data import Dataset, DataLoader, TensorDataset
    import torch.nn.functional as F
    from sklearn.preprocessing import MinMaxScaler, StandardScaler
    from sklearn.metrics import mean_squared_error, mean_absolute_error
    PYTORCH_AVAILABLE = True
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"🔥 使用设备: {device}")
    
except ImportError:
    PYTORCH_AVAILABLE = False
    print("⚠️ PyTorch未安装，深度学习模型将无法使用")

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from src.services.market_data_service import MarketDataService

class TimeSeriesDataset(Dataset):
    """PyTorch时间序列数据集"""
    
    def __init__(self, sequences: np.ndarray, targets: np.ndarray):
        self.sequences = torch.FloatTensor(sequences)
        self.targets = torch.FloatTensor(targets)
        
    def __len__(self):
        return len(self.sequences)
    
    def __getitem__(self, idx):
        return self.sequences[idx], self.targets[idx]

class TimeSeriesPreprocessor:
    """时间序列数据预处理器 - PyTorch版本"""
    
    def __init__(self, sequence_length: int = 60, prediction_horizon: int = 1):
        self.sequence_length = sequence_length
        self.prediction_horizon = prediction_horizon
        self.scaler = None
        self.feature_names = []
        
    def create_features(self, data: pd.DataFrame) -> pd.DataFrame:
        """创建时间序列特征"""
        df = data.copy()
        
        # 基础价格特征
        df['returns'] = df['close'].pct_change()
        df['log_returns'] = np.log(df['close'] / df['close'].shift(1))
        df['high_low_ratio'] = df['high'] / df['low']
        df['open_close_ratio'] = df['open'] / df['close']
        
        # 技术指标
        # 移动平均
        for window in [5, 10, 20, 50]:
            df[f'sma_{window}'] = df['close'].rolling(window).mean()
            df[f'ema_{window}'] = df['close'].ewm(span=window).mean()
            
        # 波动率
        df['volatility_5'] = df['returns'].rolling(5).std()
        df['volatility_20'] = df['returns'].rolling(20).std()
        
        # RSI
        delta = df['close'].diff()
        gain = delta.where(delta > 0, 0)
        loss = -delta.where(delta < 0, 0)
        avg_gain = gain.rolling(14).mean()
        avg_loss = loss.rolling(14).mean()
        rs = avg_gain / avg_loss
        df['rsi'] = 100 - (100 / (1 + rs))
        
        # MACD
        ema12 = df['close'].ewm(span=12).mean()
        ema26 = df['close'].ewm(span=26).mean()
        df['macd'] = ema12 - ema26
        df['macd_signal'] = df['macd'].ewm(span=9).mean()
        df['macd_histogram'] = df['macd'] - df['macd_signal']
        
        # 布林带
        sma20 = df['close'].rolling(20).mean()
        std20 = df['close'].rolling(20).std()
        df['bb_upper'] = sma20 + (std20 * 2)
        df['bb_lower'] = sma20 - (std20 * 2)
        df['bb_width'] = df['bb_upper'] - df['bb_lower']
        df['bb_position'] = (df['close'] - df['bb_lower']) / df['bb_width']
        
        # 成交量特征
        df['volume_sma'] = df['volume'].rolling(20).mean()
        df['volume_ratio'] = df['volume'] / df['volume_sma']
        
        # 时间特征
        df['day_of_week'] = df.index.dayofweek
        df['month'] = df.index.month
        df['quarter'] = df.index.quarter
        
        return df
    
    def create_sequences(self, data: pd.DataFrame, target_column: str = 'close') -> Tuple[np.ndarray, np.ndarray]:
        """创建序列数据"""
        # 删除无用列并填充缺失值
        feature_cols = [col for col in data.columns if col not in ['symbol', 'source']]
        df = data[feature_cols].fillna(method='ffill').fillna(method='bfill')
        
        # 数据标准化
        self.scaler = MinMaxScaler()
        scaled_data = self.scaler.fit_transform(df)
        
        self.feature_names = feature_cols
        
        # 获取目标列索引
        target_idx = feature_cols.index(target_column)
        
        X, y = [], []
        
        for i in range(self.sequence_length, len(scaled_data) - self.prediction_horizon + 1):
            # 输入序列
            X.append(scaled_data[i-self.sequence_length:i])
            # 目标值
            y.append(scaled_data[i+self.prediction_horizon-1, target_idx])
        
        return np.array(X), np.array(y)
    
    def inverse_transform_predictions(self, predictions: np.ndarray, target_column: str = 'close') -> np.ndarray:
        """反向转换预测结果"""
        if self.scaler is None:
            raise ValueError("数据未经过标准化处理")
        
        # 创建与原始数据相同形状的数组
        target_idx = self.feature_names.index(target_column)
        dummy_array = np.zeros((len(predictions), len(self.feature_names)))
        dummy_array[:, target_idx] = predictions
        
        # 反向转换
        inverse_transformed = self.scaler.inverse_transform(dummy_array)
        return inverse_transformed[:, target_idx]

class LSTMModel(nn.Module):
    """LSTM模型"""
    
    def __init__(self, input_size: int, hidden_size: int = 50, num_layers: int = 3, dropout: float = 0.2):
        super(LSTMModel, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            dropout=dropout,
            batch_first=True
        )
        
        # 全连接层
        self.fc1 = nn.Linear(hidden_size, 25)
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(25, 1)
        
    def forward(self, x):
        # LSTM前向传播
        lstm_out, _ = self.lstm(x)
        
        # 取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 全连接层
        out = self.fc1(last_output)
        out = F.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        
        return out

class GRUModel(nn.Module):
    """GRU模型"""
    
    def __init__(self, input_size: int, hidden_size: int = 50, num_layers: int = 3, dropout: float = 0.2):
        super(GRUModel, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # GRU层
        self.gru = nn.GRU(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            dropout=dropout,
            batch_first=True
        )
        
        # 全连接层
        self.fc1 = nn.Linear(hidden_size, 25)
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(25, 1)
        
    def forward(self, x):
        # GRU前向传播
        gru_out, _ = self.gru(x)
        
        # 取最后一个时间步的输出
        last_output = gru_out[:, -1, :]
        
        # 全连接层
        out = self.fc1(last_output)
        out = F.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        
        return out

class CNNLSTMModel(nn.Module):
    """CNN-LSTM混合模型"""
    
    def __init__(self, input_size: int, cnn_filters: int = 64, lstm_hidden: int = 50, dropout: float = 0.2):
        super(CNNLSTMModel, self).__init__()
        
        # CNN层
        self.conv1 = nn.Conv1d(in_channels=input_size, out_channels=cnn_filters, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(in_channels=cnn_filters, out_channels=cnn_filters, kernel_size=3, padding=1)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=cnn_filters,
            hidden_size=lstm_hidden,
            num_layers=2,
            dropout=dropout,
            batch_first=True
        )
        
        # 全连接层
        self.fc1 = nn.Linear(lstm_hidden, 25)
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(25, 1)
        
    def forward(self, x):
        # 调整维度以适应Conv1d：(batch, features, sequence)
        x = x.transpose(1, 2)
        
        # CNN前向传播
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = self.pool(x)
        
        # 调整维度以适应LSTM：(batch, sequence, features)
        x = x.transpose(1, 2)
        
        # LSTM前向传播
        lstm_out, _ = self.lstm(x)
        
        # 取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 全连接层
        out = self.fc1(last_output)
        out = F.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        
        return out

class BiLSTMModel(nn.Module):
    """双向LSTM模型"""
    
    def __init__(self, input_size: int, hidden_size: int = 50, num_layers: int = 2, dropout: float = 0.2):
        super(BiLSTMModel, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # 双向LSTM层
        self.bilstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            dropout=dropout,
            batch_first=True,
            bidirectional=True
        )
        
        # 全连接层（注意双向LSTM输出是2*hidden_size）
        self.fc1 = nn.Linear(hidden_size * 2, 25)
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(25, 1)
        
    def forward(self, x):
        # 双向LSTM前向传播
        bilstm_out, _ = self.bilstm(x)
        
        # 取最后一个时间步的输出
        last_output = bilstm_out[:, -1, :]
        
        # 全连接层
        out = self.fc1(last_output)
        out = F.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        
        return out

class TransformerModel(nn.Module):
    """Transformer模型"""
    
    def __init__(self, input_size: int, d_model: int = 64, nhead: int = 4, num_layers: int = 2, dropout: float = 0.2):
        super(TransformerModel, self).__init__()
        
        self.d_model = d_model
        self.input_projection = nn.Linear(input_size, d_model)
        
        # 位置编码
        self.pos_encoding = PositionalEncoding(d_model, dropout)
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dropout=dropout,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        
        # 全连接层
        self.fc1 = nn.Linear(d_model, 25)
        self.dropout = nn.Dropout(dropout)
        self.fc2 = nn.Linear(25, 1)
        
    def forward(self, x):
        # 输入投影
        x = self.input_projection(x)
        x = x * np.sqrt(self.d_model)
        
        # 位置编码
        x = self.pos_encoding(x)
        
        # Transformer编码
        transformer_out = self.transformer(x)
        
        # 全局平均池化
        pooled = torch.mean(transformer_out, dim=1)
        
        # 全连接层
        out = self.fc1(pooled)
        out = F.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        
        return out

class PositionalEncoding(nn.Module):
    """位置编码"""
    
    def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        
        self.register_buffer('pe', pe)
        
    def forward(self, x):
        x = x + self.pe[:x.size(1), :].transpose(0, 1)
        return self.dropout(x)

class EnsembleModel(nn.Module):
    """集成模型"""
    
    def __init__(self, input_size: int):
        super(EnsembleModel, self).__init__()
        
        # 多个子模型
        self.lstm = LSTMModel(input_size, hidden_size=50, num_layers=2)
        self.gru = GRUModel(input_size, hidden_size=50, num_layers=2)
        self.bilstm = BiLSTMModel(input_size, hidden_size=25, num_layers=1)
        
        # 融合层
        self.fusion = nn.Linear(3, 1)
        
    def forward(self, x):
        # 获取各子模型的输出
        lstm_out = self.lstm(x)
        gru_out = self.gru(x)
        bilstm_out = self.bilstm(x)
        
        # 拼接输出
        combined = torch.cat([lstm_out, gru_out, bilstm_out], dim=1)
        
        # 融合
        out = self.fusion(combined)
        
        return out

class ModelTrainer:
    """PyTorch模型训练器"""
    
    def __init__(self, device: torch.device = device):
        self.device = device
        self.models = {}
        self.training_history = {}
        
    def train_model(self, model: nn.Module, train_loader: DataLoader, 
                   val_loader: DataLoader, model_name: str, 
                   epochs: int = 100, learning_rate: float = 0.001) -> Dict:
        """训练模型"""
        
        print(f"🧠 训练 {model_name} 模型...")
        
        # 将模型移到设备
        model = model.to(self.device)
        
        # 定义损失函数和优化器
        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=0.5)
        
        # 训练历史
        train_losses = []
        val_losses = []
        best_val_loss = float('inf')
        patience = 10
        patience_counter = 0
        
        try:
            for epoch in range(epochs):
                # 训练阶段
                model.train()
                train_loss = 0.0
                
                for batch_x, batch_y in train_loader:
                    batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
                    
                    optimizer.zero_grad()
                    outputs = model(batch_x)
                    loss = criterion(outputs.squeeze(), batch_y)
                    loss.backward()
                    optimizer.step()
                    
                    train_loss += loss.item()
                
                train_loss /= len(train_loader)
                
                # 验证阶段
                model.eval()
                val_loss = 0.0
                
                with torch.no_grad():
                    for batch_x, batch_y in val_loader:
                        batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
                        outputs = model(batch_x)
                        loss = criterion(outputs.squeeze(), batch_y)
                        val_loss += loss.item()
                
                val_loss /= len(val_loader)
                
                # 记录历史
                train_losses.append(train_loss)
                val_losses.append(val_loss)
                
                # 学习率调度
                scheduler.step(val_loss)
                
                # 早停检查
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    patience_counter = 0
                    # 保存最佳模型
                    torch.save(model.state_dict(), f'best_{model_name}.pth')
                else:
                    patience_counter += 1
                
                if patience_counter >= patience:
                    print(f"     早停在第 {epoch + 1} 轮")
                    break
                
                # 每10轮打印一次进度
                if (epoch + 1) % 10 == 0:
                    print(f"     Epoch {epoch + 1}/{epochs}: Train Loss = {train_loss:.6f}, Val Loss = {val_loss:.6f}")
            
            # 加载最佳模型
            model.load_state_dict(torch.load(f'best_{model_name}.pth'))
            
            # 生成预测
            train_predictions = self._predict(model, train_loader)
            val_predictions = self._predict(model, val_loader)
            
            # 计算评估指标
            train_targets = self._get_targets(train_loader)
            val_targets = self._get_targets(val_loader)
            
            train_mse = mean_squared_error(train_targets, train_predictions)
            val_mse = mean_squared_error(val_targets, val_predictions)
            train_mae = mean_absolute_error(train_targets, train_predictions)
            val_mae = mean_absolute_error(val_targets, val_predictions)
            
            # 保存模型和历史
            self.models[model_name] = model
            self.training_history[model_name] = {
                'train_losses': train_losses,
                'val_losses': val_losses
            }
            
            print(f"     ✅ {model_name}: Val MSE = {val_mse:.6f}, Val MAE = {val_mae:.6f}")
            
            return {
                'model': model,
                'train_mse': train_mse,
                'val_mse': val_mse,
                'train_mae': train_mae,
                'val_mae': val_mae,
                'train_predictions': train_predictions,
                'val_predictions': val_predictions,
                'training_history': {
                    'train_losses': train_losses,
                    'val_losses': val_losses
                }
            }
            
        except Exception as e:
            print(f"     ❌ {model_name} 训练失败: {e}")
            return None
    
    def _predict(self, model: nn.Module, data_loader: DataLoader) -> np.ndarray:
        """生成预测"""
        model.eval()
        predictions = []
        
        with torch.no_grad():
            for batch_x, _ in data_loader:
                batch_x = batch_x.to(self.device)
                outputs = model(batch_x)
                predictions.extend(outputs.squeeze().cpu().numpy())
        
        return np.array(predictions)
    
    def _get_targets(self, data_loader: DataLoader) -> np.ndarray:
        """获取真实标签"""
        targets = []
        
        for _, batch_y in data_loader:
            targets.extend(batch_y.numpy())
        
        return np.array(targets)
    
    def compare_models(self, results: Dict) -> pd.DataFrame:
        """比较模型性能"""
        comparison_data = []
        
        for model_name, result in results.items():
            if result is not None:
                comparison_data.append({
                    'Model': model_name,
                    'Train MSE': f"{result['train_mse']:.6f}",
                    'Val MSE': f"{result['val_mse']:.6f}",
                    'Train MAE': f"{result['train_mae']:.6f}",
                    'Val MAE': f"{result['val_mae']:.6f}",
                    'Overfitting': f"{(result['val_mse'] / result['train_mse'] - 1) * 100:.1f}%"
                })
        
        return pd.DataFrame(comparison_data)
    
    def plot_training_history(self, model_name: str):
        """绘制训练历史"""
        if model_name not in self.training_history:
            print(f"❌ 模型 {model_name} 的训练历史不存在")
            return
        
        try:
            import matplotlib.pyplot as plt
            
            history = self.training_history[model_name]
            
            fig, ax = plt.subplots(1, 1, figsize=(12, 6))
            
            # 损失函数
            ax.plot(history['train_losses'], label='Training Loss')
            ax.plot(history['val_losses'], label='Validation Loss')
            ax.set_title(f'{model_name} - Training History')
            ax.set_xlabel('Epoch')
            ax.set_ylabel('Loss')
            ax.legend()
            ax.grid(True)
            
            plt.tight_layout()
            plt.show()
            
        except ImportError:
            print("⚠️ matplotlib未安装，无法绘制训练历史")

class MultiStepPredictor:
    """多步预测器 - PyTorch版本"""
    
    def __init__(self, model: nn.Module, preprocessor: TimeSeriesPreprocessor, device: torch.device = device):
        self.model = model.to(device)
        self.preprocessor = preprocessor
        self.device = device
    
    def predict_sequence(self, last_sequence: np.ndarray, steps: int) -> np.ndarray:
        """递归预测多个时间步"""
        self.model.eval()
        predictions = []
        
        # 转换为tensor
        current_sequence = torch.FloatTensor(last_sequence).unsqueeze(0).to(self.device)
        
        with torch.no_grad():
            for _ in range(steps):
                # 预测下一个值
                next_pred = self.model(current_sequence).cpu().numpy()[0, 0]
                predictions.append(next_pred)
                
                # 更新序列：移除第一个时间步，添加预测值到最后
                new_sequence = current_sequence.clone()
                new_sequence = torch.roll(new_sequence, -1, dims=1)
                new_sequence[0, -1, 0] = next_pred  # 假设price是第一个特征
                current_sequence = new_sequence
        
        return np.array(predictions)
    
    def predict_multiple_horizons(self, last_sequence: np.ndarray, horizons: List[int]) -> Dict[int, float]:
        """预测多个时间范围"""
        results = {}
        
        for horizon in horizons:
            predictions = self.predict_sequence(last_sequence, horizon)
            # 反向转换预测结果
            pred_prices = self.preprocessor.inverse_transform_predictions(predictions)
            results[horizon] = pred_prices[-1]  # 最后一个预测值
        
        return results

class RiskAnalyzer:
    """风险分析器 - PyTorch版本"""
    
    @staticmethod
    def calculate_var(returns: np.ndarray, confidence_level: float = 0.05) -> float:
        """计算风险价值(VaR)"""
        return np.percentile(returns, confidence_level * 100)
    
    @staticmethod
    def calculate_expected_shortfall(returns: np.ndarray, confidence_level: float = 0.05) -> float:
        """计算期望损失(ES)"""
        var = RiskAnalyzer.calculate_var(returns, confidence_level)
        return returns[returns <= var].mean()
    
    @staticmethod
    def monte_carlo_simulation(model: nn.Module, last_sequence: np.ndarray, 
                             device: torch.device, simulations: int = 1000) -> np.ndarray:
        """蒙特卡洛模拟"""
        model.eval()
        results = []
        
        with torch.no_grad():
            for _ in range(simulations):
                # 添加随机噪声
                noise = np.random.normal(0, 0.01, last_sequence.shape)
                noisy_sequence = last_sequence + noise
                
                # 转换为tensor并预测
                sequence_tensor = torch.FloatTensor(noisy_sequence).unsqueeze(0).to(device)
                prediction = model(sequence_tensor).cpu().numpy()[0, 0]
                results.append(prediction)
        
        return np.array(results)
    
    @staticmethod
    def analyze_prediction_uncertainty(predictions: np.ndarray) -> Dict:
        """分析预测不确定性"""
        return {
            'mean': np.mean(predictions),
            'std': np.std(predictions),
            'var_95': np.percentile(predictions, 5),
            'var_99': np.percentile(predictions, 1),
            'confidence_interval_95': (np.percentile(predictions, 2.5), np.percentile(predictions, 97.5))
        }

def create_prediction_report(symbol: str, current_price: float, 
                           predictions: Dict, uncertainty_analysis: Dict) -> str:
    """创建预测报告"""
    
    report = f"""
📈 {symbol} 股价预测报告 (PyTorch版本)
{'='*50}

当前价格: ${current_price:.2f}

🔮 预测结果:
"""
    
    for horizon, pred_price in predictions.items():
        change_pct = ((pred_price - current_price) / current_price) * 100
        trend = "📈" if change_pct > 0 else "📉"
        report += f"   {horizon}日后: ${pred_price:.2f} ({change_pct:+.1f}%) {trend}\n"
    
    report += f"""
🎯 不确定性分析:
   预测均值: ${uncertainty_analysis['mean']:.2f}
   标准差: ${uncertainty_analysis['std']:.2f}
   95%置信区间: ${uncertainty_analysis['confidence_interval_95'][0]:.2f} - ${uncertainty_analysis['confidence_interval_95'][1]:.2f}
   
⚠️  风险提示:
   5%分位数 (VaR 95%): ${uncertainty_analysis['var_95']:.2f}
   1%分位数 (VaR 99%): ${uncertainty_analysis['var_99']:.2f}

💡 投资建议:
   基于PyTorch深度学习模型的预测结果仅供参考
   请结合基本面分析和市场环境做出投资决策
   注意控制风险，合理配置资产
"""
    
    return report

def main():
    """主函数演示PyTorch深度学习时间序列预测"""
    
    if not PYTORCH_AVAILABLE:
        print("❌ PyTorch未安装，无法运行深度学习示例")
        print("   请运行: pip install torch torchvision")
        return
    
    print("🔥 PyTorch深度学习时间序列预测系统")
    print("=" * 50)
    print(f"🎯 设备: {device}")
    
    # 初始化服务
    market_service = MarketDataService()
    
    # 获取数据
    symbol = 'AAPL'
    print(f"📊 获取 {symbol} 历史数据...")
    
    start_date = datetime.now() - timedelta(days=1000)  # 获取更多数据用于深度学习
    end_date = datetime.now()
    
    data = market_service.get_stock_price(
        symbol,
        start_date=start_date,
        end_date=end_date,
        interval='1d'
    )
    
    if data.empty:
        print("❌ 未能获取数据")
        return
    
    print(f"✅ 获取到 {len(data)} 条数据记录")
    
    try:
        # 数据预处理
        print("\n🔧 数据预处理...")
        preprocessor = TimeSeriesPreprocessor(sequence_length=60, prediction_horizon=1)
        
        # 特征工程
        processed_data = preprocessor.create_features(data)
        print(f"   创建了 {len(processed_data.columns)} 个特征")
        
        # 创建序列数据
        X, y = preprocessor.create_sequences(processed_data, target_column='close')
        print(f"   序列数据形状: X={X.shape}, y={y.shape}")
        
        # 分割数据
        train_size = int(0.7 * len(X))
        val_size = int(0.15 * len(X))
        
        X_train = X[:train_size]
        y_train = y[:train_size]
        X_val = X[train_size:train_size + val_size]
        y_val = y[train_size:train_size + val_size]
        X_test = X[train_size + val_size:]
        y_test = y[train_size + val_size:]
        
        print(f"   训练集: {X_train.shape[0]} 样本")
        print(f"   验证集: {X_val.shape[0]} 样本")
        print(f"   测试集: {X_test.shape[0]} 样本")
        
        # 创建DataLoader
        batch_size = 32
        
        train_dataset = TimeSeriesDataset(X_train, y_train)
        val_dataset = TimeSeriesDataset(X_val, y_val)
        test_dataset = TimeSeriesDataset(X_test, y_test)
        
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
        test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
        
        print(f"   DataLoader已创建，批次大小: {batch_size}")
        
        # 创建模型
        input_size = X.shape[2]  # 特征数量
        
        models_config = {
            'LSTM': LSTMModel(input_size, hidden_size=64, num_layers=3, dropout=0.2),
            'GRU': GRUModel(input_size, hidden_size=64, num_layers=3, dropout=0.2),
            'CNN_LSTM': CNNLSTMModel(input_size, cnn_filters=64, lstm_hidden=50, dropout=0.2),
            'BiLSTM': BiLSTMModel(input_size, hidden_size=32, num_layers=2, dropout=0.2),
            'Transformer': TransformerModel(input_size, d_model=64, nhead=4, num_layers=2, dropout=0.2),
            'Ensemble': EnsembleModel(input_size)
        }
        
        # 训练模型
        print(f"\n🧠 训练 {len(models_config)} 个PyTorch深度学习模型...")
        trainer = ModelTrainer(device)
        results = {}
        
        for model_name, model in models_config.items():
            print(f"\n🎯 开始训练 {model_name}...")
            
            # 显示模型参数数量
            total_params = sum(p.numel() for p in model.parameters())
            trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
            print(f"   模型参数总数: {total_params:,}")
            print(f"   可训练参数: {trainable_params:,}")
            
            result = trainer.train_model(
                model, train_loader, val_loader, 
                model_name, epochs=100, learning_rate=0.001
            )
            
            if result is not None:
                results[model_name] = result
        
        # 模型性能比较
        if results:
            print(f"\n📊 模型性能对比:")
            comparison_df = trainer.compare_models(results)
            print(comparison_df.to_string(index=False))
            
            # 找出最佳模型
            best_model_name = min(results.keys(), 
                                key=lambda x: results[x]['val_mse'])
            best_model = results[best_model_name]['model']
            
            print(f"\n🏆 最佳模型: {best_model_name}")
            
            # 测试集评估
            print(f"\n🎯 测试集评估:")
            best_model.eval()
            test_predictions = []
            test_targets = []
            
            with torch.no_grad():
                for batch_x, batch_y in test_loader:
                    batch_x, batch_y = batch_x.to(device), batch_y.to(device)
                    outputs = best_model(batch_x)
                    test_predictions.extend(outputs.squeeze().cpu().numpy())
                    test_targets.extend(batch_y.cpu().numpy())
            
            test_predictions = np.array(test_predictions)
            test_targets = np.array(test_targets)
            
            test_mse = mean_squared_error(test_targets, test_predictions)
            test_mae = mean_absolute_error(test_targets, test_predictions)
            
            print(f"   测试集 MSE: {test_mse:.6f}")
            print(f"   测试集 MAE: {test_mae:.6f}")
            
            # 计算准确率（方向预测）
            if len(test_targets) > 1:
                actual_direction = np.diff(test_targets) > 0
                pred_direction = np.diff(test_predictions) > 0
                direction_accuracy = np.mean(actual_direction == pred_direction)
                print(f"   方向预测准确率: {direction_accuracy:.3f}")
            
            # 多步预测
            print(f"\n🔮 多步预测分析...")
            last_sequence = X_test[-1]
            multi_step_predictor = MultiStepPredictor(best_model, preprocessor, device)
            
            horizons = [1, 3, 5, 10, 20]
            predictions = multi_step_predictor.predict_multiple_horizons(last_sequence, horizons)
            
            current_price = processed_data['close'].iloc[-1]
            
            # 不确定性分析
            print(f"   进行蒙特卡洛模拟...")
            mc_predictions = RiskAnalyzer.monte_carlo_simulation(
                best_model, last_sequence, device, simulations=1000
            )
            
            # 反向转换蒙特卡洛预测结果
            mc_prices = preprocessor.inverse_transform_predictions(mc_predictions)
            uncertainty_analysis = RiskAnalyzer.analyze_prediction_uncertainty(mc_prices)
            
            # 生成预测报告
            report = create_prediction_report(symbol, current_price, predictions, uncertainty_analysis)
            print(report)
            
            # 模型解释性分析
            print(f"\n🔍 模型解释性分析:")
            
            # 特征重要性分析（通过扰动方法）
            print(f"   计算特征重要性...")
            feature_importance = analyze_feature_importance(
                best_model, X_test[:100], preprocessor.feature_names, device
            )
            
            print(f"   前10个重要特征:")
            for i, (feature, importance) in enumerate(feature_importance[:10]):
                print(f"     {i+1}. {feature}: {importance:.4f}")
            
            # 预测置信度分析
            print(f"\n📊 预测置信度分析:")
            confidence_scores = calculate_prediction_confidence(
                best_model, X_test[-50:], device
            )
            
            avg_confidence = np.mean(confidence_scores)
            print(f"   平均置信度: {avg_confidence:.3f}")
            print(f"   置信度范围: {np.min(confidence_scores):.3f} - {np.max(confidence_scores):.3f}")
            
            # 训练历史可视化
            print(f"\n📈 训练历史分析:")
            try:
                trainer.plot_training_history(best_model_name)
            except:
                print("   无法显示训练历史图表")
            
            # 保存模型
            model_save_path = f"models/{symbol}_{best_model_name}_pytorch.pth"
            Path("models").mkdir(exist_ok=True)
            
            try:
                torch.save({
                    'model_state_dict': best_model.state_dict(),
                    'model_class': best_model.__class__.__name__,
                    'input_size': input_size,
                    'preprocessor': preprocessor,
                    'feature_names': preprocessor.feature_names
                }, model_save_path)
                print(f"\n💾 最佳模型已保存到: {model_save_path}")
            except Exception as e:
                print(f"\n❌ 模型保存失败: {e}")
            
            # 实时预测建议
            print(f"\n💡 实时预测建议:")
            next_day_pred = predictions[1]
            if next_day_pred > current_price * 1.02:
                print(f"   🟢 强烈看涨: 预测明日上涨 {((next_day_pred/current_price)-1)*100:.1f}%")
                print(f"   📈 建议: 考虑适量买入，设置止损位于 ${current_price * 0.95:.2f}")
            elif next_day_pred > current_price * 1.005:
                print(f"   🟡 温和看涨: 预测明日上涨 {((next_day_pred/current_price)-1)*100:.1f}%")
                print(f"   📊 建议: 小幅增持，密切关注市场变化")
            elif next_day_pred < current_price * 0.98:
                print(f"   🔴 强烈看跌: 预测明日下跌 {((next_day_pred/current_price)-1)*100:.1f}%")
                print(f"   📉 建议: 考虑减仓或止损，等待更好入场机会")
            elif next_day_pred < current_price * 0.995:
                print(f"   🟠 温和看跌: 预测明日下跌 {((next_day_pred/current_price)-1)*100:.1f}%")
                print(f"   ⚖️ 建议: 保持观望，适当防御")
            else:
                print(f"   ⚪ 中性: 预测明日波动较小")
                print(f"   🔄 建议: 维持现有仓位，等待明确信号")
            
            # 风险评估
            print(f"\n⚠️  风险评估:")
            volatility = np.std(processed_data['returns'].dropna())
            print(f"   历史波动率: {volatility:.4f}")
            print(f"   预测不确定性: {uncertainty_analysis['std']:.4f}")
            
            risk_level = "高" if uncertainty_analysis['std'] > volatility * 2 else "中" if uncertainty_analysis['std'] > volatility else "低"
            print(f"   风险等级: {risk_level}")
            
            # 模型性能总结
            print(f"\n📋 模型性能总结:")
            print(f"   最佳模型: {best_model_name}")
            print(f"   测试集MSE: {test_mse:.6f}")
            print(f"   方向准确率: {direction_accuracy:.1%}")
            print(f"   预测置信度: {avg_confidence:.3f}")
            
            # PyTorch特有的优势
            print(f"\n🔥 PyTorch优势:")
            print(f"   ✅ 动态计算图，便于调试")
            print(f"   ✅ 灵活的模型定义")
            print(f"   ✅ 丰富的预训练模型")
            print(f"   ✅ 强大的GPU加速支持")
            print(f"   ✅ 活跃的社区和生态")
        
        else:
            print("❌ 所有模型训练失败")
            
    except Exception as e:
        print(f"❌ PyTorch深度学习预测过程中发生错误: {e}")
        import traceback
        traceback.print_exc()

def analyze_feature_importance(model: nn.Module, X_sample: np.ndarray, 
                             feature_names: List[str], device: torch.device) -> List[Tuple[str, float]]:
    """分析特征重要性"""
    model.eval()
    
    # 基准预测
    X_tensor = torch.FloatTensor(X_sample).to(device)
    with torch.no_grad():
        baseline_pred = model(X_tensor).cpu().numpy()
    
    importance_scores = []
    
    for feature_idx in range(len(feature_names)):
        # 扰动特征
        X_perturbed = X_sample.copy()
        X_perturbed[:, :, feature_idx] = 0  # 将特征设为0
        
        # 扰动后预测
        X_perturbed_tensor = torch.FloatTensor(X_perturbed).to(device)
        with torch.no_grad():
            perturbed_pred = model(X_perturbed_tensor).cpu().numpy()
        
        # 计算重要性（预测差异的均值）
        importance = np.mean(np.abs(baseline_pred - perturbed_pred))
        importance_scores.append((feature_names[feature_idx], importance))
    
    # 按重要性排序
    importance_scores.sort(key=lambda x: x[1], reverse=True)
    
    return importance_scores

def calculate_prediction_confidence(model: nn.Module, X_sample: np.ndarray, device: torch.device) -> np.ndarray:
    """计算预测置信度"""
    model.eval()
    
    # 启用dropout进行多次预测
    confidence_scores = []
    
    for sample in X_sample:
        sample_tensor = torch.FloatTensor(sample).unsqueeze(0).to(device)
        predictions = []
        
        # 多次预测（启用dropout）
        for _ in range(10):
            # 设置为训练模式以启用dropout
            model.train()
            with torch.no_grad():
                pred = model(sample_tensor).cpu().numpy()[0, 0]
                predictions.append(pred)
        
        # 计算预测的标准差作为不确定性指标
        uncertainty = np.std(predictions)
        # 置信度为不确定性的倒数（归一化）
        confidence = 1 / (1 + uncertainty)
        confidence_scores.append(confidence)
    
    model.eval()  # 恢复评估模式
    return np.array(confidence_scores)

def create_pytorch_model_comparison():
    """创建PyTorch模型对比表"""
    
    comparison_data = {
        'Model': ['LSTM', 'GRU', 'CNN-LSTM', 'BiLSTM', 'Transformer', 'Ensemble'],
        'Parameters': ['Medium', 'Medium', 'High', 'High', 'High', 'Very High'],
        'Training Speed': ['Fast', 'Fast', 'Medium', 'Slow', 'Slow', 'Very Slow'],
        'Memory Usage': ['Low', 'Low', 'Medium', 'High', 'High', 'Very High'],
        'Performance': ['Good', 'Good', 'Very Good', 'Very Good', 'Excellent', 'Excellent'],
        'Best For': [
            'General time series',
            'Simple patterns',
            'Complex patterns',
            'Sequential data',
            'Long sequences',
            'Maximum accuracy'
        ]
    }
    
    df = pd.DataFrame(comparison_data)
    
    print("\n🔥 PyTorch模型对比表:")
    print("=" * 80)
    print(df.to_string(index=False))
    print("=" * 80)
    
    return df

if __name__ == "__main__":
    # 显示模型对比
    create_pytorch_model_comparison()
    
    # 运行主程序
    main()