"""
Demo 08: TS2Vec 对比学习嵌入（简化版）
使用对比学习方法学习时间序列的通用表示
注意：这是一个简化的实现，不依赖于ts2vec库
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from demo_01_raw import RawVectorizer
import matplotlib.pyplot as plt


class TemporalEncoder(nn.Module):
    """时间序列编码器"""
    
    def __init__(self, input_dim=1, hidden_dim=64, output_dim=64, num_layers=2):
        super(TemporalEncoder, self).__init__()
        
        # 1D卷积层
        self.conv1 = nn.Conv1d(input_dim, hidden_dim, kernel_size=3, padding=1)
        self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1)
        self.conv3 = nn.Conv1d(hidden_dim, output_dim, kernel_size=3, padding=1)
        
        # 批归一化
        self.bn1 = nn.BatchNorm1d(hidden_dim)
        self.bn2 = nn.BatchNorm1d(hidden_dim)
        self.bn3 = nn.BatchNorm1d(output_dim)
        
        # Dropout
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, x):
        """
        前向传播
        Args:
            x: (batch_size, seq_len, input_dim)
        Returns:
            (batch_size, output_dim)
        """
        # 转换为 (batch_size, input_dim, seq_len)
        x = x.transpose(1, 2)
        
        # 卷积编码
        x = F.relu(self.bn1(self.conv1(x)))
        x = self.dropout(x)
        x = F.relu(self.bn2(self.conv2(x)))
        x = self.dropout(x)
        x = F.relu(self.bn3(self.conv3(x)))
        
        # 全局平均池化
        x = F.adaptive_avg_pool1d(x, 1).squeeze(-1)
        
        return x


class ContrastiveLearner:
    """对比学习训练器"""
    
    def __init__(self, input_dim=1, hidden_dim=64, output_dim=64, 
                 temperature=0.1, learning_rate=1e-3):
        """
        初始化
        
        Args:
            input_dim: 输入维度
            hidden_dim: 隐藏层维度
            output_dim: 输出嵌入维度
            temperature: 对比学习温度参数
            learning_rate: 学习率
        """
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.temperature = temperature
        self.learning_rate = learning_rate
        
        # 创建编码器
        self.encoder = TemporalEncoder(input_dim, hidden_dim, output_dim)
        self.optimizer = torch.optim.Adam(self.encoder.parameters(), lr=learning_rate)
        
    def augment_data(self, x, noise_level=0.01, mask_ratio=0.1):
        """
        数据增强
        
        Args:
            x: 原始数据
            noise_level: 噪声水平
            mask_ratio: 遮罩比例
            
        Returns:
            增强后的数据
        """
        batch_size, seq_len = x.shape
        
        # 添加高斯噪声
        noise = torch.randn_like(x) * noise_level
        x_aug = x + noise
        
        # 随机遮罩
        mask = torch.rand(batch_size, seq_len) > mask_ratio
        x_aug = x_aug * mask.float()
        
        return x_aug
    
    def contrastive_loss(self, z1, z2):
        """
        计算对比损失（InfoNCE）
        
        Args:
            z1: 第一个视图的嵌入
            z2: 第二个视图的嵌入
            
        Returns:
            损失值
        """
        batch_size = z1.shape[0]
        
        # 归一化
        z1 = F.normalize(z1, dim=1)
        z2 = F.normalize(z2, dim=1)
        
        # 计算相似度矩阵
        sim_matrix = torch.mm(z1, z2.t()) / self.temperature
        
        # 正样本对在对角线上
        pos_sim = torch.diag(sim_matrix)
        
        # 计算损失
        loss = -pos_sim + torch.logsumexp(sim_matrix, dim=1)
        
        return loss.mean()
    
    def train(self, X, n_epochs=100, batch_size=32):
        """
        训练模型
        
        Args:
            X: 训练数据
            n_epochs: 训练轮数
            batch_size: 批次大小
            
        Returns:
            训练损失历史
        """
        X_tensor = torch.FloatTensor(X)
        losses = []
        
        self.encoder.train()
        
        for epoch in range(n_epochs):
            epoch_losses = []
            
            # 批次训练
            indices = torch.randperm(len(X_tensor))
            for i in range(0, len(X_tensor), batch_size):
                batch_idx = indices[i:i+batch_size]
                batch = X_tensor[batch_idx]
                
                # 创建两个增强视图
                x1 = self.augment_data(batch)
                x2 = self.augment_data(batch)
                
                # 添加维度以适应编码器输入
                x1 = x1.unsqueeze(-1)  # (batch, seq_len, 1)
                x2 = x2.unsqueeze(-1)
                
                # 前向传播
                z1 = self.encoder(x1)
                z2 = self.encoder(x2)
                
                # 计算损失
                loss = self.contrastive_loss(z1, z2)
                
                # 反向传播
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                
                epoch_losses.append(loss.item())
            
            avg_loss = np.mean(epoch_losses)
            losses.append(avg_loss)
            
            if (epoch + 1) % 20 == 0:
                print(f"Epoch {epoch+1}/{n_epochs} - Loss: {avg_loss:.4f}")
        
        return losses
    
    def encode(self, X):
        """
        编码数据
        
        Args:
            X: 输入数据
            
        Returns:
            嵌入向量
        """
        self.encoder.eval()
        with torch.no_grad():
            X_tensor = torch.FloatTensor(X).unsqueeze(-1)  # 添加通道维度
            embeddings = self.encoder(X_tensor)
            return embeddings.numpy()


class TS2VecVectorizer:
    """TS2Vec向量化处理类"""
    
    def __init__(self, window_size=60, step_size=5, output_dim=64, n_epochs=100):
        """
        初始化参数
        
        Args:
            window_size: 窗口大小
            step_size: 步长
            output_dim: 输出嵌入维度
            n_epochs: 训练轮数
        """
        self.window_size = window_size
        self.step_size = step_size
        self.output_dim = output_dim
        self.n_epochs = n_epochs
        self.raw_vectorizer = RawVectorizer(window_size, step_size)
        self.learner = None
        self.train_losses = []
        
    def fit_transform(self, ticker="AAPL", start="2020-01-01", end="2024-12-31"):
        """
        完整的TS2Vec向量化流程
        
        Args:
            ticker: 股票代码
            start: 开始日期
            end: 结束日期
            
        Returns:
            tuple: (原始窗口矩阵, 嵌入向量矩阵)
        """
        # 获取原始窗口
        X_raw = self.raw_vectorizer.fit_transform(ticker, start, end)
        
        # 创建对比学习器
        self.learner = ContrastiveLearner(
            input_dim=1,
            hidden_dim=64,
            output_dim=self.output_dim,
            temperature=0.1
        )
        
        # 训练
        print("训练TS2Vec模型...")
        self.train_losses = self.learner.train(X_raw, n_epochs=self.n_epochs)
        
        # 获取嵌入
        embeddings = self.learner.encode(X_raw)
        
        return X_raw, embeddings
    
    def compute_similarity_matrix(self, embeddings, top_k=5):
        """
        计算嵌入之间的相似度矩阵
        
        Args:
            embeddings: 嵌入向量
            top_k: 返回每个样本的top-k最相似样本
            
        Returns:
            dict: 相似度信息
        """
        # 归一化
        embeddings_norm = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
        
        # 计算余弦相似度
        sim_matrix = np.dot(embeddings_norm, embeddings_norm.T)
        
        # 找出每个样本的top-k最相似样本
        top_k_similar = {}
        for i in range(len(embeddings)):
            # 排除自己
            sim_scores = sim_matrix[i].copy()
            sim_scores[i] = -1
            
            # 找出top-k
            top_indices = np.argsort(sim_scores)[-top_k:][::-1]
            top_scores = sim_scores[top_indices]
            
            top_k_similar[i] = {
                'indices': top_indices.tolist(),
                'scores': top_scores.tolist()
            }
        
        return {
            'similarity_matrix': sim_matrix,
            'top_k_similar': top_k_similar
        }
    
    def plot_training_loss(self):
        """绘制训练损失曲线"""
        if not self.train_losses:
            print("尚未训练")
            return
        
        plt.figure(figsize=(10, 4))
        plt.plot(self.train_losses)
        plt.xlabel('Epoch')
        plt.ylabel('Contrastive Loss')
        plt.title('TS2Vec Training Loss')
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def visualize_embeddings_2d(self, embeddings, method='pca'):
        """
        2D可视化嵌入
        
        Args:
            embeddings: 嵌入向量
            method: 降维方法 ('pca' 或 'tsne')
        """
        from sklearn.decomposition import PCA
        from sklearn.manifold import TSNE
        
        # 降维到2D
        if method == 'pca':
            reducer = PCA(n_components=2)
            embeddings_2d = reducer.fit_transform(embeddings)
        else:
            reducer = TSNE(n_components=2, random_state=42)
            embeddings_2d = reducer.fit_transform(embeddings)
        
        # 绘图
        plt.figure(figsize=(10, 8))
        scatter = plt.scatter(embeddings_2d[:, 0], embeddings_2d[:, 1], 
                            c=range(len(embeddings)), cmap='viridis', alpha=0.6)
        plt.colorbar(scatter, label='Window Index')
        plt.xlabel('Component 1')
        plt.ylabel('Component 2')
        plt.title(f'TS2Vec Embeddings Visualization ({method.upper()})')
        plt.grid(True, alpha=0.3)
        plt.show()


def main():
    """主函数，演示使用方法"""
    print("=" * 60)
    print("Demo 08: TS2Vec 对比学习嵌入（简化版）")
    print("=" * 60)
    
    # 创建TS2Vec向量化器
    vectorizer = TS2VecVectorizer(
        window_size=60,
        step_size=5,
        output_dim=64,
        n_epochs=50  # 减少训练轮数以加快演示
    )
    
    # 执行向量化
    print("正在下载AAPL股票数据并训练TS2Vec模型...")
    X_raw, embeddings = vectorizer.fit_transform(
        ticker="AAPL",
        start="2020-01-01",
        end="2024-12-31"
    )
    
    # 输出结果
    print(f"\n原始窗口 shape: {X_raw.shape}")
    print(f"TS2Vec嵌入 shape: {embeddings.shape}")
    
    # 嵌入统计
    print(f"\n嵌入统计:")
    print(f"  均值范围: [{embeddings.mean(axis=0).min():.3f}, {embeddings.mean(axis=0).max():.3f}]")
    print(f"  标准差范围: [{embeddings.std(axis=0).min():.3f}, {embeddings.std(axis=0).max():.3f}]")
    
    # 计算相似度
    sim_info = vectorizer.compute_similarity_matrix(embeddings, top_k=3)
    print(f"\n相似度分析:")
    print(f"  平均相似度: {sim_info['similarity_matrix'].mean():.4f}")
    print(f"  相似度标准差: {sim_info['similarity_matrix'].std():.4f}")
    
    # 显示第一个窗口的最相似窗口
    top_similar = sim_info['top_k_similar'][0]
    print(f"\n窗口0的最相似窗口:")
    for idx, score in zip(top_similar['indices'], top_similar['scores']):
        print(f"  窗口{idx}: 相似度={score:.4f}")
    
    # 显示前几个窗口的嵌入
    print(f"\n前3个窗口的嵌入（前8维）:")
    for i in range(min(3, embeddings.shape[0])):
        print(f"  窗口{i}: {embeddings[i, :8]}")
    
    # 可选：绘制训练损失和嵌入可视化
    try:
        vectorizer.plot_training_loss()
        vectorizer.visualize_embeddings_2d(embeddings, method='pca')
    except:
        print("\n(跳过可视化)")
    
    return X_raw, embeddings


if __name__ == "__main__":
    X_raw, embeddings = main()
