import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap, LocallyLinearEmbedding, SpectralEmbedding
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
import os
import time
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import ParameterGrid
import seaborn as sns

# 设置环境变量避免OpenBLAS多线程问题
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'

# 创建结果保存目录
if not os.path.exists('results'):
    os.makedirs('results')


def load_and_preprocess_data(sample_size=None, random_state=42):
    """加载MNIST数据集并进行预处理"""
    print("正在加载MNIST数据集...")
    mnist = fetch_openml('mnist_784', version=1, as_frame=False, parser='auto')
    X, y = mnist.data, mnist.target.astype(np.uint8)

    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 如果指定了样本大小，则进行随机采样
    if sample_size is not None and sample_size < len(X):
        np.random.seed(random_state)
        sample_idx = np.random.choice(len(X), sample_size, replace=False)
        X_scaled = X_scaled[sample_idx]
        y = y[sample_idx]

    return X_scaled, y, mnist.feature_names


def visualize_original_images(X, y, n_samples=30, save_path=None):
    """可视化原始MNIST图像"""
    plt.figure(figsize=(12, 10))
    for i in range(n_samples):
        plt.subplot(5, 6, i + 1)
        plt.imshow(X[i].reshape(28, 28), cmap='gray_r')
        plt.title(f"Label: {y[i]}", fontsize=10)
        plt.axis('off')
    plt.suptitle('Original MNIST Images', fontsize=20)
    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()


def visualize_pca(X, y, save_path=None):
    """执行PCA降维并可视化"""
    print("正在执行PCA降维...")
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)

    # 计算解释方差比例
    explained_variance = pca.explained_variance_ratio_
    print(f"PCA解释方差比例: {explained_variance}")
    print(f"PCA累计解释方差: {sum(explained_variance):.4f}")

    plt.figure(figsize=(14, 12))
    scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='tab10',
                          alpha=0.6, s=5, edgecolor='none')
    plt.colorbar(scatter, ticks=range(10), label='Digit Class')
    plt.xlabel(f'Principal Component 1 ({explained_variance[0]:.2%})', fontsize=12)
    plt.ylabel(f'Principal Component 2 ({explained_variance[1]:.2%})', fontsize=12)
    plt.title('PCA Projection of MNIST Dataset', fontsize=18)
    plt.grid(alpha=0.2)

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()

    return X_pca, explained_variance


def visualize_pca_3d(X, y, save_path=None):
    """执行PCA 3D降维并可视化"""
    print("正在执行PCA 3D降维...")
    pca = PCA(n_components=3)
    X_pca = pca.fit_transform(X)

    # 计算解释方差比例
    explained_variance = pca.explained_variance_ratio_
    print(f"PCA 3D解释方差比例: {explained_variance}")
    print(f"PCA 3D累计解释方差: {sum(explained_variance):.4f}")

    fig = plt.figure(figsize=(14, 12))
    ax = fig.add_subplot(111, projection='3d')

    scatter = ax.scatter(X_pca[:, 0], X_pca[:, 1], X_pca[:, 2],
                         c=y, cmap='tab10', alpha=0.6, s=5)
    ax.set_xlabel(f'PC1 ({explained_variance[0]:.2%})', fontsize=12)
    ax.set_ylabel(f'PC2 ({explained_variance[1]:.2%})', fontsize=12)
    ax.set_zlabel(f'PC3 ({explained_variance[2]:.2%})', fontsize=12)
    plt.title('3D PCA Projection of MNIST Dataset', fontsize=18)

    fig.colorbar(scatter, ax=ax, ticks=range(10), label='Digit Class')

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()

    return X_pca, explained_variance


def optimize_tsne(X, y, n_iter=500, n_jobs=-1):
    """优化t-SNE参数"""
    print("正在优化t-SNE参数...")

    # 定义参数网格
    param_grid = {
        'perplexity': [10, 30, 50, 100],
        'learning_rate': ['auto', 50, 100, 200]
    }

    results = []

    # 由于t-SNE计算量大，使用较小的样本
    sample_idx = np.random.choice(len(X), min(5000, len(X)), replace=False)
    X_sample = X[sample_idx]
    y_sample = y[sample_idx]

    for params in ParameterGrid(param_grid):
        print(f"尝试参数: {params}")
        try:
            tsne = TSNE(n_components=2, n_iter=n_iter, random_state=42,
                        n_jobs=n_jobs, **params)
            X_tsne = tsne.fit_transform(X_sample)

            # 计算轮廓系数
            silhouette = silhouette_score(X_tsne, y_sample)

            results.append({
                'perplexity': params['perplexity'],
                'learning_rate': params['learning_rate'],
                'silhouette': silhouette,
                'X_tsne': X_tsne
            })

            print(f"  轮廓系数: {silhouette:.4f}")
        except Exception as e:
            print(f"  参数 {params} 尝试失败: {e}")

    # 找到最佳参数
    if results:
        best_result = max(results, key=lambda x: x['silhouette'])
        print(f"\n最佳参数: perplexity={best_result['perplexity']}, "
              f"learning_rate={best_result['learning_rate']}")
        print(f"最佳轮廓系数: {best_result['silhouette']:.4f}")
        return best_result
    else:
        print("所有参数尝试均失败，使用默认参数")
        return None


def compare_dimensionality_reduction(X, y, n_samples=2000, save_path=None):
    """比较多种降维方法"""
    # 如果样本量过大，进行采样
    if len(X) > n_samples:
        np.random.seed(42)
        sample_idx = np.random.choice(len(X), n_samples, replace=False)
        X_sample = X[sample_idx]
        y_sample = y[sample_idx]
    else:
        X_sample, y_sample = X, y

    # 定义降维方法列表
    methods = [
        ("PCA", PCA(n_components=2)),
        ("Isomap", Isomap(n_components=2, n_neighbors=10)),
        ("LLE", LocallyLinearEmbedding(n_components=2, n_neighbors=10, method='standard')),
        ("Laplacian Eigenmaps", SpectralEmbedding(n_components=2, n_neighbors=10)),
        ("t-SNE", TSNE(n_components=2, perplexity=30, n_iter=1000, random_state=42))
    ]

    results = []
    times = {}
    scores = {}

    plt.figure(figsize=(20, 16))
    print("正在进行多种降维方法可视化...")

    for i, (name, model) in enumerate(methods, 1):
        plt.subplot(2, 3, i)

        # 执行降维并计时
        print(f"正在执行 {name} 降维...")
        start_time = time.time()

        try:
            X_red = model.fit_transform(X_sample)
            elapsed = time.time() - start_time
            times[name] = elapsed
            print(f"{name} 完成，耗时: {elapsed:.2f}秒")

            # 计算轮廓系数（除了t-SNE，因为它非常耗时）
            if name != "t-SNE":
                try:
                    score = silhouette_score(X_red, y_sample)
                    scores[name] = score
                    print(f"{name} 轮廓系数: {score:.4f}")
                except:
                    scores[name] = None
                    print(f"{name} 无法计算轮廓系数")

            # 绘制降维结果
            scatter = plt.scatter(X_red[:, 0], X_red[:, 1], c=y_sample,
                                  cmap='tab10', alpha=0.8, s=25, edgecolor='none')
            plt.title(f'{name} Projection', fontsize=16)
            plt.xlabel('Component 1', fontsize=12)
            plt.ylabel('Component 2', fontsize=12)
            plt.grid(alpha=0.2)

            # 记录结果
            results.append({
                'name': name,
                'X_red': X_red,
                'time': elapsed,
                'score': scores.get(name)
            })

            # 仅在最后一个子图添加颜色条
            if i == len(methods):
                cbar = plt.colorbar(scatter, ticks=range(10), pad=0.05)
                cbar.set_label('Digit Class', fontsize=12)

                # 添加性能指标表格
                table_data = []
                for name in times:
                    time_str = f"{times[name]:.2f}s"
                    score_str = f"{scores.get(name, 'N/A'):.4f}" if scores.get(name) is not None else "N/A"
                    table_data.append([name, time_str, score_str])

                plt.figtext(0.5, 0.01,
                            "Performance Metrics:\nName\tTime\tSilhouette Score\n" +
                            "\n".join([f"{row[0]}\t{row[1]}\t{row[2]}" for row in table_data]),
                            ha="center", fontsize=10, bbox={"facecolor": "orange", "alpha": 0.5, "pad": 5})

        except Exception as e:
            print(f"{name} 降维失败: {e}")
            plt.text(0.5, 0.5, f"Error: {str(e)}", ha='center', va='center')
            results.append({
                'name': name,
                'X_red': None,
                'time': None,
                'score': None
            })

    plt.suptitle('MNIST Visualization with Different Dimensionality Reduction Methods',
                 fontsize=24, y=0.98)
    plt.tight_layout(rect=[0, 0, 1, 0.96])

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()

    return results


def visualize_explained_variance(X, max_components=100, save_path=None):
    """可视化PCA解释方差"""
    print(f"正在计算PCA解释方差（最多{max_components}个主成分）...")
    pca = PCA(n_components=min(max_components, X.shape[1]))
    pca.fit(X)

    # 计算解释方差
    explained_variance = pca.explained_variance_ratio_
    cumulative_variance = np.cumsum(explained_variance)

    # 找出解释95%方差所需的主成分数量
    n_components_95 = np.argmax(cumulative_variance >= 0.95) + 1
    print(f"解释95%方差所需的主成分数量: {n_components_95}")

    plt.figure(figsize=(12, 8))

    # 绘制解释方差比例
    plt.subplot(2, 1, 1)
    plt.bar(range(1, len(explained_variance) + 1), explained_variance)
    plt.axvline(x=n_components_95, color='r', linestyle='--',
                label=f'95% variance: {n_components_95} components')
    plt.xlabel('Number of Components')
    plt.ylabel('Explained Variance Ratio')
    plt.title('Explained Variance Ratio by Principal Components')
    plt.legend()
    plt.grid(alpha=0.3)

    # 绘制累计解释方差
    plt.subplot(2, 1, 2)
    plt.plot(range(1, len(cumulative_variance) + 1), cumulative_variance, 'o-')
    plt.axvline(x=n_components_95, color='r', linestyle='--')
    plt.axhline(y=0.95, color='g', linestyle='--')
    plt.xlabel('Number of Components')
    plt.ylabel('Cumulative Explained Variance')
    plt.title('Cumulative Explained Variance by Principal Components')
    plt.grid(alpha=0.3)

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()

    return n_components_95, explained_variance, cumulative_variance


def visualize_feature_importance(X, feature_names, n_components=10, save_path=None):
    """可视化PCA特征重要性"""
    print(f"正在计算PCA特征重要性（前{n_components}个主成分）...")
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X)

    # 获取主成分载荷（特征重要性）
    loadings = pca.components_

    # 绘制热图
    plt.figure(figsize=(14, 10))
    sns.heatmap(loadings, annot=False, cmap='viridis',
                xticklabels=feature_names[:50] + ['...'],  # 仅显示前50个特征名称
                yticklabels=[f'PC{i + 1}' for i in range(n_components)])
    plt.title('PCA Component Loadings (Feature Importance)', fontsize=16)
    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()

    # 为每个主成分找出最重要的特征
    for i in range(n_components):
        pc_loadings = np.abs(loadings[i])
        top_indices = np.argsort(pc_loadings)[-5:][::-1]  # 前5个最重要的特征
        print(f"PC{i + 1} 最重要的特征:")
        for idx in top_indices:
            print(f"  {feature_names[idx]}: {loadings[i, idx]:.4f}")
        print()

    return loadings


def main():
    """主函数"""
    # 1. 加载和预处理数据
    X_scaled, y, feature_names = load_and_preprocess_data(sample_size=None)

    # 2. 可视化原始图像
    visualize_original_images(X_scaled, y, n_samples=30,
                              save_path='results/original_mnist.png')

    # 3. PCA降维可视化
    X_pca, pca_variance = visualize_pca(X_scaled, y,
                                        save_path='results/pca_2d.png')

    # 4. PCA 3D降维可视化
    X_pca_3d, pca_variance_3d = visualize_pca_3d(X_scaled, y,
                                                 save_path='results/pca_3d.png')

    # 5. 解释方差可视化
    n_components_95, explained_variance, cumulative_variance = visualize_explained_variance(
        X_scaled, max_components=100, save_path='results/explained_variance.png')

    # 6. 特征重要性可视化
    visualize_feature_importance(X_scaled, feature_names, n_components=10,
                                 save_path='results/feature_importance.png')

    # 7. 优化t-SNE参数（可选，计算量大）
    # best_tsne = optimize_tsne(X_scaled, y, n_iter=500)

    # 8. 比较多种降维方法
    results = compare_dimensionality_reduction(X_scaled, y, n_samples=2000,
                                               save_path='results/all_methods.png')

    print("\n分析完成! 所有结果已保存到'results'目录。")


if __name__ == "__main__":
    main()
