#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高维数据PCA降维分析 - 从零实现
作者: QZ251_徐中秋_2025720841
日期: 2025/10/10

本文件包含了完整的PCA算法从零实现，用于分析5只科技股的高维数据降维。
"""

import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json

class PCAFromScratch:
    """
    从零实现的PCA算法
    包含完整的数学推导步骤和数值优化
    """
    
    def __init__(self, n_components=None, random_state=None):
        self.n_components = n_components
        self.random_state = random_state
        self.mean_ = None
        self.components_ = None
        self.explained_variance_ = None
        self.explained_variance_ratio_ = None
        self.singular_values_ = None
        
    def fit(self, X):
        """拟合PCA模型"""
        # 1. 数据验证和预处理
        X = np.asarray(X, dtype=np.float64)
        n_samples, n_features = X.shape
        
        if self.n_components is None:
            self.n_components = min(n_samples, n_features)
        elif not 1 <= self.n_components <= min(n_samples, n_features):
            raise ValueError(f"n_components={self.n_components} 必须在1和{min(n_samples, n_features)}之间")
        
        # 2. 计算均值并进行中心化处理
        self.mean_ = np.mean(X, axis=0)
        X_centered = X - self.mean_
        
        # 3. 构建协方差矩阵
        if n_samples > n_features:
            covariance_matrix = np.cov(X_centered.T)
        else:
            covariance_matrix = (X_centered.T @ X_centered) / (n_samples - 1)
        
        # 4. 特征值分解 - 使用稳定的方法
        eigenvalues, eigenvectors = self._stable_eigendecomposition(covariance_matrix)
        
        # 5. 排序特征值和特征向量（降序）
        idx = np.argsort(eigenvalues)[::-1]
        eigenvalues = eigenvalues[idx]
        eigenvectors = eigenvectors[:, idx]
        
        # 6. 选择前n_components个主成分
        self.components_ = eigenvectors[:, :self.n_components].T
        self.explained_variance_ = eigenvalues[:self.n_components]
        
        # 7. 计算解释方差比例
        total_variance = np.sum(eigenvalues)
        self.explained_variance_ratio_ = self.explained_variance_ / total_variance
        
        # 8. 计算奇异值
        self.singular_values_ = np.sqrt(self.explained_variance_ * (n_samples - 1))
        
        return self
    
    def _stable_eigendecomposition(self, matrix):
        """稳定的特征值分解实现"""
        try:
            # 方法1：使用numpy的eigh（适用于对称矩阵）
            eigenvalues, eigenvectors = np.linalg.eigh(matrix)
            return eigenvalues, eigenvectors
        except np.linalg.LinAlgError:
            try:
                # 方法2：使用numpy的eig（通用方法）
                eigenvalues, eigenvectors = np.linalg.eig(matrix)
                return eigenvalues, eigenvectors
            except np.linalg.LinAlgError:
                # 方法3：使用SVD分解
                U, S, Vt = np.linalg.svd(matrix)
                eigenvalues = S ** 2 / (matrix.shape[0] - 1)
                eigenvectors = Vt.T
                return eigenvalues, eigenvectors
    
    def transform(self, X):
        """将数据转换到主成分空间"""
        X = np.asarray(X, dtype=np.float64)
        X_centered = X - self.mean_
        return X_centered @ self.components_.T
    
    def fit_transform(self, X):
        """拟合模型并转换数据"""
        return self.fit(X).transform(X)
    
    def inverse_transform(self, X_transformed):
        """将降维数据转换回原始空间"""
        X_transformed = np.asarray(X_transformed, dtype=np.float64)
        return X_transformed @ self.components_ + self.mean_
    
    def get_covariance(self):
        """获取PCA协方差矩阵"""
        if self.components_ is None:
            raise ValueError("PCA模型尚未拟合")
        
        return self.components_.T @ np.diag(self.explained_variance_) @ self.components_


def verify_implementation(pca_model, X_std):
    """验证PCA实现的正确性"""
    
    # 1. 验证特征向量的正交性
    components = pca_model.components_
    orthogonality_check = []
    for i in range(len(components)):
        for j in range(i+1, len(components)):
            dot_product = np.dot(components[i], components[j])
            orthogonality_check.append(abs(dot_product) < 1e-10)
    
    print(f"特征向量正交性检查: {np.all(orthogonality_check)}")
    
    # 2. 验证特征向量的单位长度
    norm_check = np.allclose(np.linalg.norm(components, axis=1), 1.0)
    print(f"特征向量单位长度检查: {norm_check}")
    
    # 3. 验证方差保持性
    total_variance_original = np.sum(np.var(X_std, axis=0))
    total_variance_pca = np.sum(pca_model.explained_variance_)
    variance_preservation = np.isclose(total_variance_original, total_variance_pca, rtol=1e-5)
    print(f"方差保持性检查: {variance_preservation}")
    
    # 4. 验证降维变换
    X_transformed = pca_model.transform(X_std)
    X_reconstructed = pca_model.inverse_transform(X_transformed)
    reconstruction_error = np.mean(np.sum((X_std - X_reconstructed) ** 2, axis=1))
    print(f"重构误差: {reconstruction_error:.6f}")
    
    # 5. 验证特征值非负
    eigenvalues_nonnegative = np.all(pca_model.explained_variance_ >= 0)
    print(f"特征值非负检查: {eigenvalues_nonnegative}")
    
    return {
        'orthogonality': np.all(orthogonality_check),
        'unit_norm': norm_check,
        'variance_preservation': variance_preservation,
        'reconstruction_error': reconstruction_error,
        'eigenvalues_nonnegative': eigenvalues_nonnegative
    }


def load_stock_data():
    """加载股票数据"""
    stock_symbols = ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA']
    data_frames = []
    
    for symbol in stock_symbols:
        try:
            # 读取JSON文件
            with open(f'{symbol}_historical.json', 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 转换为DataFrame
            df = pd.DataFrame(data['historical'])
            df['symbol'] = symbol
            data_frames.append(df)
            
        except FileNotFoundError:
            print(f"警告: 未找到 {symbol}_historical.json 文件")
    
    if not data_frames:
        raise FileNotFoundError("未找到任何股票数据文件")
    
    # 合并所有数据
    combined_df = pd.concat(data_frames, ignore_index=True)
    
    # 转换日期列
    combined_df['date'] = pd.to_datetime(combined_df['date'])
    
    # 选择需要的列并重命名
    feature_columns = ['open', 'high', 'low', 'close', 'volume']
    df_features = combined_df[['date', 'symbol'] + feature_columns].copy()
    
    return df_features


def create_feature_matrix(df):
    """创建特征矩阵"""
    # 按日期分组，创建宽格式数据
    pivot_data = []
    
    for date in df['date'].unique():
        date_data = df[df['date'] == date]
        row = {'date': date}
        
        for _, stock_row in date_data.iterrows():
            symbol = stock_row['symbol']
            for feature in ['open', 'high', 'low', 'close', 'volume']:
                row[f'{symbol}_{feature}'] = stock_row[feature]
        
        pivot_data.append(row)
    
    feature_df = pd.DataFrame(pivot_data)
    feature_df = feature_df.sort_values('date').reset_index(drop=True)
    
    # 提取特征列（排除日期列）
    feature_columns = [col for col in feature_df.columns if col != 'date']
    X = feature_df[feature_columns].values
    
    return X, feature_columns, feature_df['date']


def perform_pca_analysis():
    """执行完整的PCA分析"""
    print("=" * 60)
    print("高维数据PCA降维分析 - 从零实现")
    print("作者: QZ251_徐中秋_2025720841")
    print("日期: 2025/10/10")
    print("=" * 60)
    
    # 1. 加载数据
    print("\n1. 加载股票数据...")
    df = load_stock_data()
    print(f"数据形状: {df.shape}")
    print(f"股票代码: {df['symbol'].unique()}")
    
    # 2. 创建特征矩阵
    print("\n2. 创建特征矩阵...")
    X, feature_names, dates = create_feature_matrix(df)
    print(f"特征矩阵形状: {X.shape}")
    print(f"特征维度: {len(feature_names)}")
    
    # 3. 数据标准化
    print("\n3. 数据标准化...")
    scaler = StandardScaler()
    X_std = scaler.fit_transform(X)
    print("标准化完成")
    
    # 4. PCA降维
    print("\n4. 执行PCA降维...")
    n_components = 7  # 选择7个主成分以保留95%的信息
    pca_model = PCAFromScratch(n_components=n_components)
    X_reduced = pca_model.fit_transform(X_std)
    
    print(f"降维后数据形状: {X_reduced.shape}")
    print(f"解释方差比例: {pca_model.explained_variance_ratio_}")
    print(f"累积解释方差: {np.sum(pca_model.explained_variance_ratio_):.2%}")
    
    # 5. 验证实现
    print("\n5. 验证PCA实现...")
    verification_results = verify_implementation(pca_model, X_std)
    
    # 6. 保存结果
    print("\n6. 保存分析结果...")
    
    # 保存降维后的数据
    reduced_df = pd.DataFrame(X_reduced, columns=[f'PC{i+1}' for i in range(n_components)])
    reduced_df['date'] = dates
    reduced_df.to_csv('pca_from_scratch_reduced_data.csv', index=False)
    print("降维数据已保存到: pca_from_scratch_reduced_data.csv")
    
    # 保存分析结果
    results = {
        'analysis_info': {
            'author': 'QZ251_徐中秋_2025720841',
            'date': '2025/10/10',
            'description': '高维数据PCA降维分析 - 从零实现',
            'stock_symbols': ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA'],
            'original_dimensions': X.shape[1],
            'reduced_dimensions': n_components,
            'total_samples': X.shape[0]
        },
        'pca_results': {
            'explained_variance_ratio': pca_model.explained_variance_ratio_.tolist(),
            'explained_variance': pca_model.explained_variance_.tolist(),
            'cumulative_variance_ratio': np.cumsum(pca_model.explained_variance_ratio_).tolist(),
            'total_variance_explained': float(np.sum(pca_model.explained_variance_ratio_)),
            'information_loss': float(1 - np.sum(pca_model.explained_variance_ratio_)),
            'dimensionality_reduction_ratio': float((X.shape[1] - n_components) / X.shape[1])
        },
        'verification_results': verification_results,
        'feature_names': feature_names,
        'component_analysis': {
            f'PC{i+1}': {
                'variance_explained': float(pca_model.explained_variance_ratio_[i]),
                'cumulative_variance': float(np.sum(pca_model.explained_variance_ratio_[:i+1])),
                'loadings': pca_model.components_[i].tolist()
            }
            for i in range(n_components)
        }
    }
    
    with open('pca_from_scratch_results.json', 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    print("分析结果已保存到: pca_from_scratch_results.json")
    
    # 7. 生成可视化图表
    print("\n7. 生成可视化图表...")
    create_visualization_plots(pca_model, X_reduced, results)
    
    print("\n" + "=" * 60)
    print("PCA分析完成！")
    print("=" * 60)
    
    return pca_model, X_reduced, results


def create_visualization_plots(pca_model, X_reduced, results):
    """创建可视化图表"""
    plt.style.use('default')
    
    # 设置中文字体
    plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    
    # 1. 方差解释比例图
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
    
    # 方差解释比例柱状图
    n_components = len(pca_model.explained_variance_ratio_)
    components = range(1, n_components + 1)
    variance_ratios = pca_model.explained_variance_ratio_
    cumulative_ratios = np.cumsum(variance_ratios)
    
    bars = ax1.bar(components, variance_ratios, color='skyblue', alpha=0.7, edgecolor='navy')
    ax1.set_xlabel('主成分编号')
    ax1.set_ylabel('方差解释比例')
    ax1.set_title('PCA主成分方差解释比例')
    ax1.grid(True, alpha=0.3)
    
    # 添加数值标签
    for bar, ratio in zip(bars, variance_ratios):
        height = bar.get_height()
        ax1.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                f'{ratio:.3f}', ha='center', va='bottom', fontsize=9)
    
    # 累积方差解释比例线图
    ax2.plot(components, cumulative_ratios, 'ro-', linewidth=2, markersize=6)
    ax2.axhline(y=0.95, color='red', linestyle='--', alpha=0.7, label='95%阈值')
    ax2.axvline(x=n_components, color='green', linestyle='--', alpha=0.7, label=f'{n_components}个主成分')
    ax2.set_xlabel('主成分编号')
    ax2.set_ylabel('累积方差解释比例')
    ax2.set_title('累积方差解释比例')
    ax2.grid(True, alpha=0.3)
    ax2.legend()
    
    # 添加数值标签
    for i, ratio in enumerate(cumulative_ratios):
        ax2.text(i+1, ratio + 0.02, f'{ratio:.3f}', ha='center', va='bottom', fontsize=9)
    
    plt.tight_layout()
    plt.savefig('from_scratch_charts/pca_variance_analysis_from_scratch.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    # 2. 综合PCA分析图
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
    
    # 主成分载荷热图
    loadings = pca_model.components_.T
    im = ax1.imshow(loadings, cmap='RdBu_r', aspect='auto')
    ax1.set_title('主成分载荷矩阵')
    ax1.set_xlabel('主成分')
    ax1.set_ylabel('原始特征')
    ax1.set_xticks(range(n_components))
    ax1.set_xticklabels([f'PC{i+1}' for i in range(n_components)])
    
    # 添加颜色条
    cbar = plt.colorbar(im, ax=ax1)
    cbar.set_label('载荷系数')
    
    # 前两个主成分散点图
    ax2.scatter(X_reduced[:, 0], X_reduced[:, 1], alpha=0.6, c='blue')
    ax2.set_xlabel(f'第一主成分 (解释方差: {variance_ratios[0]:.1%})')
    ax2.set_ylabel(f'第二主成分 (解释方差: {variance_ratios[1]:.1%})')
    ax2.set_title('前两个主成分散点图')
    ax2.grid(True, alpha=0.3)
    
    # 重构误差分析
    reconstruction_errors = []
    for i in range(1, min(n_components + 1, 11)):
        temp_pca = PCAFromScratch(n_components=i)
        temp_pca.fit(X_std)
        X_temp = temp_pca.transform(X_std)
        X_reconstructed = temp_pca.inverse_transform(X_temp)
        error = np.mean(np.sum((X_std - X_reconstructed) ** 2, axis=1))
        reconstruction_errors.append(error)
    
    ax3.plot(range(1, len(reconstruction_errors) + 1), reconstruction_errors, 'go-', linewidth=2)
    ax3.set_xlabel('主成分数量')
    ax3.set_ylabel('重构误差')
    ax3.set_title('重构误差 vs 主成分数量')
    ax3.grid(True, alpha=0.3)
    
    # 信息保留率
    info_retention = np.cumsum(variance_ratios)
    ax4.plot(components, info_retention, 'mo-', linewidth=2, markersize=6)
    ax4.axhline(y=0.95, color='red', linestyle='--', alpha=0.7, label='95%阈值')
    ax4.set_xlabel('主成分数量')
    ax4.set_ylabel('信息保留率')
    ax4.set_title('信息保留率')
    ax4.grid(True, alpha=0.3)
    ax4.legend()
    
    plt.tight_layout()
    plt.savefig('from_scratch_charts/pca_comprehensive_analysis_from_scratch.png', dpi=300, bbox_inches='tight')
    plt.close()
    
    print("可视化图表已保存到 from_scratch_charts/ 目录")


if __name__ == "__main__":
    try:
        # 执行PCA分析
        pca_model, X_reduced, results = perform_pca_analysis()
        
        # 打印关键结果
        print(f"\n关键结果摘要:")
        print(f"原始维度: {results['analysis_info']['original_dimensions']}")
        print(f"降维后维度: {results['analysis_info']['reduced_dimensions']}")
        print(f"信息保留率: {results['pca_results']['total_variance_explained']:.2%}")
        print(f"信息损失: {results['pca_results']['information_loss']:.2%}")
        print(f"维度减少率: {results['pca_results']['dimensionality_reduction_ratio']:.2%}")
        
    except Exception as e:
        print(f"分析过程中出现错误: {e}")
        import traceback
        traceback.print_exc()
