"""
Demo 09: Transformer（时间序列分类/嵌入）
使用Transformer架构学习时间序列的表示
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from demo_01_raw import RawVectorizer
import matplotlib.pyplot as plt


class PositionalEncoding(nn.Module):
    """位置编码"""
    
    def __init__(self, d_model, max_len=5000):
        super(PositionalEncoding, self).__init__()
        
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * 
                           (-math.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        
        self.register_buffer('pe', pe)
    
    def forward(self, x):
        return x + self.pe[:x.size(0), :]


class TimeSeriesTransformer(nn.Module):
    """时间序列Transformer模型"""
    
    def __init__(self, input_dim=1, d_model=32, nhead=4, num_layers=2, 
                 dim_feedforward=64, latent_dim=32, dropout=0.1):
        """
        初始化Transformer
        
        Args:
            input_dim: 输入维度
            d_model: 模型维度
            nhead: 注意力头数
            num_layers: Transformer层数
            dim_feedforward: 前馈网络维度
            latent_dim: 输出潜在维度
            dropout: Dropout率
        """
        super(TimeSeriesTransformer, self).__init__()
        
        # 输入投影
        self.input_projection = nn.Linear(input_dim, d_model)
        
        # 位置编码
        self.pos_encoder = PositionalEncoding(d_model)
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=False
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers)
        
        # 输出层
        self.pooling = nn.AdaptiveAvgPool1d(1)
        self.output_projection = nn.Linear(d_model, latent_dim)
        
        self.dropout = nn.Dropout(dropout)
        self.d_model = d_model
        
    def forward(self, x):
        """
        前向传播
        
        Args:
            x: (batch_size, seq_len, input_dim)
            
        Returns:
            (batch_size, latent_dim)
        """
        # 输入投影
        x = self.input_projection(x)  # (batch, seq_len, d_model)
        
        # 转换为 (seq_len, batch, d_model) for Transformer
        x = x.transpose(0, 1)
        
        # 位置编码
        x = self.pos_encoder(x)
        
        # Transformer编码
        x = self.transformer(x)  # (seq_len, batch, d_model)
        
        # 转回 (batch, d_model, seq_len)
        x = x.transpose(0, 1).transpose(1, 2)
        
        # 池化
        x = self.pooling(x).squeeze(-1)  # (batch, d_model)
        
        # 输出投影
        x = self.output_projection(x)  # (batch, latent_dim)
        
        return x
    
    def get_attention_weights(self, x):
        """获取注意力权重（简化版）"""
        # 这需要修改Transformer层以返回注意力权重
        # 这里返回None作为占位符
        return None


class TransformerVectorizer:
    """Transformer向量化处理类"""
    
    def __init__(self, window_size=60, step_size=5, d_model=32, nhead=4, 
                 num_layers=2, latent_dim=32, n_epochs=100, learning_rate=1e-3):
        """
        初始化参数
        
        Args:
            window_size: 窗口大小
            step_size: 步长
            d_model: 模型维度
            nhead: 注意力头数
            num_layers: Transformer层数
            latent_dim: 潜在维度
            n_epochs: 训练轮数
            learning_rate: 学习率
        """
        self.window_size = window_size
        self.step_size = step_size
        self.d_model = d_model
        self.nhead = nhead
        self.num_layers = num_layers
        self.latent_dim = latent_dim
        self.n_epochs = n_epochs
        self.learning_rate = learning_rate
        
        self.raw_vectorizer = RawVectorizer(window_size, step_size)
        self.model = None
        self.train_losses = []
        
    def create_self_supervised_target(self, X):
        """
        创建自监督学习目标
        这里使用简单的预测下一个时间步的值作为目标
        
        Args:
            X: 输入数据 (n_samples, seq_len)
            
        Returns:
            目标数据
        """
        # 使用窗口的均值作为简单的目标
        targets = X.mean(axis=1)
        return targets
    
    def train_transformer(self, X, batch_size=32, verbose=True):
        """
        训练Transformer模型
        
        Args:
            X: 训练数据
            batch_size: 批次大小
            verbose: 是否打印训练过程
            
        Returns:
            训练好的模型
        """
        # 创建模型
        self.model = TimeSeriesTransformer(
            input_dim=1,
            d_model=self.d_model,
            nhead=self.nhead,
            num_layers=self.num_layers,
            dim_feedforward=self.d_model * 2,
            latent_dim=self.latent_dim
        )
        
        # 准备数据
        X_tensor = torch.FloatTensor(X).unsqueeze(-1)  # 添加特征维度
        targets = self.create_self_supervised_target(X)
        targets_tensor = torch.FloatTensor(targets).unsqueeze(-1)
        
        # 优化器和损失函数
        optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
        criterion = nn.MSELoss()
        
        # 训练
        self.train_losses = []
        self.model.train()
        
        for epoch in range(self.n_epochs):
            epoch_losses = []
            
            # 批次训练
            indices = torch.randperm(len(X_tensor))
            for i in range(0, len(X_tensor), batch_size):
                batch_idx = indices[i:i+batch_size]
                batch_x = X_tensor[batch_idx]
                batch_y = targets_tensor[batch_idx]
                
                # 前向传播
                optimizer.zero_grad()
                outputs = self.model(batch_x)
                loss = criterion(outputs, batch_y)
                
                # 反向传播
                loss.backward()
                optimizer.step()
                
                epoch_losses.append(loss.item())
            
            avg_loss = np.mean(epoch_losses)
            self.train_losses.append(avg_loss)
            
            if verbose and (epoch + 1) % 20 == 0:
                print(f"Epoch {epoch+1}/{self.n_epochs} - Loss: {avg_loss:.6f}")
        
        return self.model
    
    def encode_data(self, X):
        """
        使用训练好的Transformer编码数据
        
        Args:
            X: 输入数据
            
        Returns:
            编码后的嵌入向量
        """
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        self.model.eval()
        with torch.no_grad():
            X_tensor = torch.FloatTensor(X).unsqueeze(-1)
            embeddings = self.model(X_tensor)
            return embeddings.numpy()
    
    def fit_transform(self, ticker="AAPL", start="2020-01-01", end="2024-12-31"):
        """
        完整的Transformer向量化流程
        
        Args:
            ticker: 股票代码
            start: 开始日期
            end: 结束日期
            
        Returns:
            tuple: (原始窗口矩阵, 嵌入向量矩阵)
        """
        # 获取原始窗口
        X_raw = self.raw_vectorizer.fit_transform(ticker, start, end)
        
        # 训练Transformer
        print("训练Transformer模型...")
        self.train_transformer(X_raw)
        
        # 获取嵌入
        embeddings = self.encode_data(X_raw)
        
        return X_raw, embeddings
    
    def analyze_embeddings(self, embeddings):
        """
        分析嵌入向量的特性
        
        Args:
            embeddings: 嵌入向量矩阵
            
        Returns:
            dict: 分析结果
        """
        # 计算嵌入的统计特性
        analysis = {
            'mean': embeddings.mean(axis=0),
            'std': embeddings.std(axis=0),
            'min': embeddings.min(axis=0),
            'max': embeddings.max(axis=0),
            'sparsity': (embeddings == 0).mean(),
            'correlation_matrix': np.corrcoef(embeddings.T)
        }
        
        # 计算嵌入之间的平均距离
        n_samples = len(embeddings)
        distances = []
        for i in range(min(100, n_samples)):
            for j in range(i+1, min(100, n_samples)):
                dist = np.linalg.norm(embeddings[i] - embeddings[j])
                distances.append(dist)
        
        analysis['avg_distance'] = np.mean(distances) if distances else 0
        analysis['std_distance'] = np.std(distances) if distances else 0
        
        return analysis
    
    def plot_training_loss(self):
        """绘制训练损失曲线"""
        if not self.train_losses:
            print("尚未训练")
            return
        
        plt.figure(figsize=(10, 4))
        plt.plot(self.train_losses)
        plt.xlabel('Epoch')
        plt.ylabel('MSE Loss')
        plt.title('Transformer Training Loss')
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def visualize_embedding_distribution(self, embeddings):
        """可视化嵌入向量的分布"""
        plt.figure(figsize=(12, 4))
        
        # 嵌入值的分布
        plt.subplot(1, 3, 1)
        plt.hist(embeddings.flatten(), bins=50, alpha=0.7)
        plt.xlabel('Embedding Value')
        plt.ylabel('Frequency')
        plt.title('Embedding Value Distribution')
        plt.grid(True, alpha=0.3)
        
        # 各维度的均值
        plt.subplot(1, 3, 2)
        dim_means = embeddings.mean(axis=0)
        plt.bar(range(len(dim_means)), dim_means)
        plt.xlabel('Dimension')
        plt.ylabel('Mean Value')
        plt.title('Mean Value per Dimension')
        plt.grid(True, alpha=0.3)
        
        # 各维度的标准差
        plt.subplot(1, 3, 3)
        dim_stds = embeddings.std(axis=0)
        plt.bar(range(len(dim_stds)), dim_stds)
        plt.xlabel('Dimension')
        plt.ylabel('Std Value')
        plt.title('Std per Dimension')
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()


def main():
    """主函数，演示使用方法"""
    print("=" * 60)
    print("Demo 09: Transformer（时间序列嵌入）")
    print("=" * 60)
    
    # 创建Transformer向量化器
    vectorizer = TransformerVectorizer(
        window_size=60,
        step_size=5,
        d_model=32,
        nhead=4,
        num_layers=2,
        latent_dim=32,
        n_epochs=100,
        learning_rate=1e-3
    )
    
    # 执行向量化
    print("正在下载AAPL股票数据并训练Transformer模型...")
    X_raw, embeddings = vectorizer.fit_transform(
        ticker="AAPL",
        start="2020-01-01",
        end="2024-12-31"
    )
    
    # 输出结果
    print(f"\n原始窗口 shape: {X_raw.shape}")
    print(f"Transformer嵌入 shape: {embeddings.shape}")
    
    # 分析嵌入
    analysis = vectorizer.analyze_embeddings(embeddings)
    print(f"\n嵌入分析:")
    print(f"  平均距离: {analysis['avg_distance']:.4f}")
    print(f"  距离标准差: {analysis['std_distance']:.4f}")
    print(f"  稀疏度: {analysis['sparsity']:.4f}")
    print(f"  维度均值范围: [{analysis['mean'].min():.3f}, {analysis['mean'].max():.3f}]")
    print(f"  维度标准差范围: [{analysis['std'].min():.3f}, {analysis['std'].max():.3f}]")
    
    # 显示前几个窗口的嵌入
    print(f"\n前3个窗口的嵌入（前8维）:")
    for i in range(min(3, embeddings.shape[0])):
        print(f"  窗口{i}: {embeddings[i, :8]}")
    
    # 计算嵌入相关性
    corr_matrix = analysis['correlation_matrix']
    print(f"\n嵌入维度相关性:")
    print(f"  平均相关性: {np.mean(np.abs(corr_matrix[np.triu_indices_from(corr_matrix, k=1)])):.4f}")
    print(f"  最大相关性: {np.max(np.abs(corr_matrix[np.triu_indices_from(corr_matrix, k=1)])):.4f}")
    
    # 可选：绘制训练损失和嵌入分布
    try:
        vectorizer.plot_training_loss()
        vectorizer.visualize_embedding_distribution(embeddings)
    except:
        print("\n(跳过可视化)")
    
    return X_raw, embeddings


if __name__ == "__main__":
    X_raw, embeddings = main()
