"""
Demo 06: 线性降维（PCA）
使用主成分分析对高维时间序列窗口进行降维
"""

import numpy as np
from sklearn.decomposition import PCA
from demo_01_raw import RawVectorizer
import matplotlib.pyplot as plt


class PCAVectorizer:
    """PCA降维向量化处理类"""
    
    def __init__(self, window_size=60, step_size=5, n_components=10):
        """
        初始化参数
        
        Args:
            window_size: 窗口大小
            step_size: 步长
            n_components: 保留的主成分数量
        """
        self.window_size = window_size
        self.step_size = step_size
        self.n_components = n_components
        self.raw_vectorizer = RawVectorizer(window_size, step_size)
        self.pca = PCA(n_components=n_components, random_state=42)
        
    def fit_pca(self, X):
        """
        拟合PCA模型
        
        Args:
            X: 输入数据矩阵
            
        Returns:
            numpy.ndarray: 降维后的数据
        """
        X_pca = self.pca.fit_transform(X)
        return X_pca
    
    def transform_pca(self, X):
        """
        使用已拟合的PCA转换数据
        
        Args:
            X: 输入数据矩阵
            
        Returns:
            numpy.ndarray: 降维后的数据
        """
        return self.pca.transform(X)
    
    def get_explained_variance(self):
        """
        获取解释方差信息
        
        Returns:
            dict: 包含解释方差相关信息的字典
        """
        return {
            'explained_variance': self.pca.explained_variance_,
            'explained_variance_ratio': self.pca.explained_variance_ratio_,
            'cumulative_variance_ratio': np.cumsum(self.pca.explained_variance_ratio_)
        }
    
    def get_principal_components(self):
        """
        获取主成分（载荷矩阵）
        
        Returns:
            numpy.ndarray: 主成分矩阵
        """
        return self.pca.components_
    
    def reconstruct(self, X_pca):
        """
        从降维数据重构原始数据
        
        Args:
            X_pca: PCA降维后的数据
            
        Returns:
            numpy.ndarray: 重构的数据
        """
        return self.pca.inverse_transform(X_pca)
    
    def compute_reconstruction_error(self, X_original, X_pca):
        """
        计算重构误差
        
        Args:
            X_original: 原始数据
            X_pca: PCA降维后的数据
            
        Returns:
            dict: 重构误差统计
        """
        X_reconstructed = self.reconstruct(X_pca)
        errors = np.sqrt(np.sum((X_original - X_reconstructed) ** 2, axis=1))
        
        return {
            'mean_error': np.mean(errors),
            'std_error': np.std(errors),
            'max_error': np.max(errors),
            'min_error': np.min(errors)
        }
    
    def find_optimal_components(self, X, variance_threshold=0.95):
        """
        找出解释指定方差比例所需的最小主成分数
        
        Args:
            X: 输入数据
            variance_threshold: 方差阈值
            
        Returns:
            int: 所需的主成分数量
        """
        pca_full = PCA()
        pca_full.fit(X)
        cumsum = np.cumsum(pca_full.explained_variance_ratio_)
        n_components = np.argmax(cumsum >= variance_threshold) + 1
        return n_components
    
    def fit_transform(self, ticker="AAPL", start="2020-01-01", end="2024-12-31"):
        """
        完整的PCA降维流程
        
        Args:
            ticker: 股票代码
            start: 开始日期
            end: 结束日期
            
        Returns:
            tuple: (原始窗口矩阵, PCA降维矩阵)
        """
        # 获取原始窗口
        X_raw = self.raw_vectorizer.fit_transform(ticker, start, end)
        
        # PCA降维
        X_pca = self.fit_pca(X_raw)
        
        return X_raw, X_pca
    
    def plot_variance_explained(self):
        """
        绘制解释方差图
        """
        if not hasattr(self.pca, 'explained_variance_ratio_'):
            print("PCA模型尚未拟合")
            return
        
        var_info = self.get_explained_variance()
        
        plt.figure(figsize=(12, 4))
        
        # 单个主成分的解释方差
        plt.subplot(1, 2, 1)
        plt.bar(range(1, self.n_components + 1), 
                var_info['explained_variance_ratio'])
        plt.xlabel('Principal Component')
        plt.ylabel('Explained Variance Ratio')
        plt.title('Variance Explained by Each PC')
        plt.grid(True, alpha=0.3)
        
        # 累积解释方差
        plt.subplot(1, 2, 2)
        plt.plot(range(1, self.n_components + 1), 
                var_info['cumulative_variance_ratio'], 
                'bo-')
        plt.axhline(y=0.9, color='r', linestyle='--', label='90% threshold')
        plt.axhline(y=0.95, color='g', linestyle='--', label='95% threshold')
        plt.xlabel('Number of Components')
        plt.ylabel('Cumulative Explained Variance Ratio')
        plt.title('Cumulative Variance Explained')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()


def main():
    """主函数，演示使用方法"""
    print("=" * 60)
    print("Demo 06: 线性降维（PCA）")
    print("=" * 60)
    
    # 创建PCA向量化器
    vectorizer = PCAVectorizer(window_size=60, step_size=5, n_components=10)
    
    # 执行向量化
    print("正在下载AAPL股票数据并进行PCA降维...")
    X_raw, X_pca = vectorizer.fit_transform(
        ticker="AAPL",
        start="2020-01-01",
        end="2024-12-31"
    )
    
    # 输出结果
    print(f"\n原始窗口 shape: {X_raw.shape}")
    print(f"PCA降维后 shape: {X_pca.shape}")
    print(f"降维比例: {X_pca.shape[1] / X_raw.shape[1]:.2%}")
    
    # 显示解释方差
    var_info = vectorizer.get_explained_variance()
    print(f"\n累计解释方差比率:")
    for i, ratio in enumerate(var_info['cumulative_variance_ratio']):
        print(f"  前{i+1}个主成分: {ratio:.4f}")
    
    # 找出最优主成分数
    n_opt_90 = vectorizer.find_optimal_components(X_raw, 0.90)
    n_opt_95 = vectorizer.find_optimal_components(X_raw, 0.95)
    print(f"\n最优主成分数:")
    print(f"  解释90%方差需要: {n_opt_90}个主成分")
    print(f"  解释95%方差需要: {n_opt_95}个主成分")
    
    # 计算重构误差
    error_stats = vectorizer.compute_reconstruction_error(X_raw, X_pca)
    print(f"\n重构误差统计:")
    print(f"  平均误差: {error_stats['mean_error']:.4f}")
    print(f"  标准差: {error_stats['std_error']:.4f}")
    print(f"  最大误差: {error_stats['max_error']:.4f}")
    print(f"  最小误差: {error_stats['min_error']:.4f}")
    
    # 分析主成分
    components = vectorizer.get_principal_components()
    print(f"\n主成分分析:")
    print(f"  第一主成分最大载荷位置: {np.abs(components[0]).argmax()}")
    print(f"  第二主成分最大载荷位置: {np.abs(components[1]).argmax()}")
    
    # 显示前几个样本的PCA特征
    print(f"\n前3个窗口的PCA特征（前5个主成分）:")
    for i in range(min(3, X_pca.shape[0])):
        print(f"  窗口{i}: {X_pca[i, :5]}")
    
    # 可选：绘制解释方差图
    try:
        vectorizer.plot_variance_explained()
    except:
        print("\n(跳过方差图绘制)")
    
    return X_raw, X_pca


if __name__ == "__main__":
    X_raw, X_pca = main()
