"""
Demo 07: 自编码器（Autoencoder）
使用神经网络自编码器进行非线性降维
"""

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from demo_01_raw import RawVectorizer
import matplotlib.pyplot as plt


class Autoencoder(nn.Module):
    """自编码器模型"""
    
    def __init__(self, input_dim=60, latent_dim=16, hidden_dim=32):
        """
        初始化自编码器
        
        Args:
            input_dim: 输入维度
            latent_dim: 潜在空间维度
            hidden_dim: 隐藏层维度
        """
        super(Autoencoder, self).__init__()
        
        # 编码器
        self.encoder = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Linear(hidden_dim, latent_dim),
            nn.ReLU()
        )
        
        # 解码器
        self.decoder = nn.Sequential(
            nn.Linear(latent_dim, hidden_dim),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_dim),
            nn.Linear(hidden_dim, input_dim)
        )
        
    def forward(self, x):
        """前向传播"""
        z = self.encoder(x)
        x_reconstructed = self.decoder(z)
        return x_reconstructed, z
    
    def encode(self, x):
        """编码"""
        return self.encoder(x)
    
    def decode(self, z):
        """解码"""
        return self.decoder(z)


class AutoencoderVectorizer:
    """自编码器向量化处理类"""
    
    def __init__(self, window_size=60, step_size=5, latent_dim=16, 
                 hidden_dim=32, learning_rate=1e-3, n_epochs=200):
        """
        初始化参数
        
        Args:
            window_size: 窗口大小
            step_size: 步长
            latent_dim: 潜在空间维度
            hidden_dim: 隐藏层维度
            learning_rate: 学习率
            n_epochs: 训练轮数
        """
        self.window_size = window_size
        self.step_size = step_size
        self.latent_dim = latent_dim
        self.hidden_dim = hidden_dim
        self.learning_rate = learning_rate
        self.n_epochs = n_epochs
        self.raw_vectorizer = RawVectorizer(window_size, step_size)
        self.model = None
        self.train_losses = []
        
    def train_autoencoder(self, X, batch_size=32, verbose=True):
        """
        训练自编码器
        
        Args:
            X: 输入数据
            batch_size: 批次大小
            verbose: 是否打印训练过程
            
        Returns:
            训练好的模型
        """
        # 转换为PyTorch张量
        X_tensor = torch.FloatTensor(X)
        
        # 创建模型
        self.model = Autoencoder(
            input_dim=self.window_size,
            latent_dim=self.latent_dim,
            hidden_dim=self.hidden_dim
        )
        
        # 优化器和损失函数
        optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        criterion = nn.MSELoss()
        
        # 训练
        self.train_losses = []
        self.model.train()
        
        for epoch in range(self.n_epochs):
            # 批次训练
            epoch_losses = []
            for i in range(0, len(X_tensor), batch_size):
                batch = X_tensor[i:i+batch_size]
                
                # 前向传播
                optimizer.zero_grad()
                x_recon, z = self.model(batch)
                loss = criterion(x_recon, batch)
                
                # 反向传播
                loss.backward()
                optimizer.step()
                
                epoch_losses.append(loss.item())
            
            avg_loss = np.mean(epoch_losses)
            self.train_losses.append(avg_loss)
            
            if verbose and (epoch + 1) % 50 == 0:
                print(f"Epoch {epoch+1:3d}/{self.n_epochs} - Loss: {avg_loss:.6f}")
        
        return self.model
    
    def encode_data(self, X):
        """
        使用训练好的编码器编码数据
        
        Args:
            X: 输入数据
            
        Returns:
            编码后的数据
        """
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        self.model.eval()
        with torch.no_grad():
            X_tensor = torch.FloatTensor(X)
            Z = self.model.encode(X_tensor)
            return Z.numpy()
    
    def decode_data(self, Z):
        """
        解码潜在表示
        
        Args:
            Z: 潜在表示
            
        Returns:
            解码后的数据
        """
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        self.model.eval()
        with torch.no_grad():
            Z_tensor = torch.FloatTensor(Z)
            X_recon = self.model.decode(Z_tensor)
            return X_recon.numpy()
    
    def compute_reconstruction_error(self, X):
        """
        计算重构误差
        
        Args:
            X: 原始数据
            
        Returns:
            dict: 重构误差统计
        """
        Z = self.encode_data(X)
        X_recon = self.decode_data(Z)
        
        # 计算误差
        errors = np.sqrt(np.sum((X - X_recon) ** 2, axis=1))
        
        return {
            'mean_error': np.mean(errors),
            'std_error': np.std(errors),
            'max_error': np.max(errors),
            'min_error': np.min(errors),
            'errors': errors
        }
    
    def detect_anomalies(self, X, threshold_percentile=95):
        """
        基于重构误差检测异常
        
        Args:
            X: 输入数据
            threshold_percentile: 异常阈值百分位数
            
        Returns:
            dict: 异常检测结果
        """
        error_stats = self.compute_reconstruction_error(X)
        errors = error_stats['errors']
        
        # 计算阈值
        threshold = np.percentile(errors, threshold_percentile)
        
        # 检测异常
        anomalies = errors > threshold
        anomaly_indices = np.where(anomalies)[0]
        
        return {
            'anomaly_indices': anomaly_indices,
            'anomaly_scores': errors,
            'threshold': threshold,
            'n_anomalies': len(anomaly_indices)
        }
    
    def fit_transform(self, ticker="AAPL", start="2020-01-01", end="2024-12-31"):
        """
        完整的自编码器向量化流程
        
        Args:
            ticker: 股票代码
            start: 开始日期
            end: 结束日期
            
        Returns:
            tuple: (原始窗口矩阵, 编码向量矩阵)
        """
        # 获取原始窗口
        X_raw = self.raw_vectorizer.fit_transform(ticker, start, end)
        
        # 训练自编码器
        print("训练自编码器...")
        self.train_autoencoder(X_raw)
        
        # 编码数据
        Z = self.encode_data(X_raw)
        
        return X_raw, Z
    
    def plot_training_loss(self):
        """绘制训练损失曲线"""
        if not self.train_losses:
            print("尚未训练")
            return
        
        plt.figure(figsize=(10, 4))
        plt.plot(self.train_losses)
        plt.xlabel('Epoch')
        plt.ylabel('MSE Loss')
        plt.title('Autoencoder Training Loss')
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def visualize_reconstruction(self, X, sample_idx=0):
        """
        可视化重构效果
        
        Args:
            X: 原始数据
            sample_idx: 要可视化的样本索引
        """
        # 获取单个样本
        x_original = X[sample_idx:sample_idx+1]
        
        # 编码和解码
        z = self.encode_data(x_original)
        x_recon = self.decode_data(z)
        
        # 绘图
        plt.figure(figsize=(12, 4))
        
        plt.subplot(1, 2, 1)
        plt.plot(x_original[0], label='Original', alpha=0.7)
        plt.plot(x_recon[0], label='Reconstructed', alpha=0.7)
        plt.xlabel('Time')
        plt.ylabel('Value')
        plt.title(f'Sample {sample_idx} Reconstruction')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.subplot(1, 2, 2)
        plt.bar(range(len(z[0])), z[0])
        plt.xlabel('Latent Dimension')
        plt.ylabel('Value')
        plt.title(f'Latent Representation (dim={self.latent_dim})')
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()


def main():
    """主函数，演示使用方法"""
    print("=" * 60)
    print("Demo 07: 自编码器（Autoencoder）")
    print("=" * 60)
    
    # 创建自编码器向量化器
    vectorizer = AutoencoderVectorizer(
        window_size=60, 
        step_size=5, 
        latent_dim=16,
        hidden_dim=32,
        learning_rate=1e-3,
        n_epochs=200
    )
    
    # 执行向量化
    print("正在下载AAPL股票数据并训练自编码器...")
    X_raw, Z = vectorizer.fit_transform(
        ticker="AAPL",
        start="2020-01-01",
        end="2024-12-31"
    )
    
    # 输出结果
    print(f"\n原始窗口 shape: {X_raw.shape}")
    print(f"编码向量 shape: {Z.shape}")
    print(f"压缩比: {Z.shape[1] / X_raw.shape[1]:.2%}")
    
    # 计算重构误差
    error_stats = vectorizer.compute_reconstruction_error(X_raw)
    print(f"\n重构误差统计:")
    print(f"  平均误差: {error_stats['mean_error']:.6f}")
    print(f"  标准差: {error_stats['std_error']:.6f}")
    print(f"  最大误差: {error_stats['max_error']:.6f}")
    print(f"  最小误差: {error_stats['min_error']:.6f}")
    
    # 异常检测
    anomaly_results = vectorizer.detect_anomalies(X_raw, threshold_percentile=95)
    print(f"\n异常检测结果:")
    print(f"  异常数量: {anomaly_results['n_anomalies']}")
    print(f"  异常阈值: {anomaly_results['threshold']:.6f}")
    if anomaly_results['n_anomalies'] > 0:
        print(f"  异常窗口索引（前5个）: {anomaly_results['anomaly_indices'][:5]}")
    
    # 显示编码特征
    print(f"\n前3个窗口的编码特征:")
    for i in range(min(3, Z.shape[0])):
        print(f"  窗口{i}: {Z[i, :8]}...")  # 只显示前8个维度
    
    # 分析潜在空间
    print(f"\n潜在空间分析:")
    z_mean = Z.mean(axis=0)
    z_std = Z.std(axis=0)
    print(f"  各维度均值范围: [{z_mean.min():.3f}, {z_mean.max():.3f}]")
    print(f"  各维度标准差范围: [{z_std.min():.3f}, {z_std.max():.3f}]")
    
    # 可选：绘制训练损失和重构效果
    try:
        vectorizer.plot_training_loss()
        vectorizer.visualize_reconstruction(X_raw, sample_idx=0)
    except:
        print("\n(跳过可视化)")
    
    return X_raw, Z


if __name__ == "__main__":
    X_raw, Z = main()
