import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import Isomap, LocallyLinearEmbedding, TSNE
from sklearn.manifold import SpectralEmbedding
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import seaborn as sns
import os
plt.rcParams["font.family"] = ["SimHei"]
# 设置随机种子以确保结果可复现
np.random.seed(42)

# 模拟加载MNIST数据（实际使用时请从Kaggle下载并修改文件路径）
def load_mnist_data():
    # 注意：实际使用时需要从Kaggle下载digit-recognizer数据集
    # 这里为了演示，我们使用sklearn内置的简化版MNIST数据集
    from sklearn.datasets import fetch_openml
    print("正在加载MNIST数据集...")
    X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
    X = X.astype(float)
    y = y.astype(int)
    
    # 数据标准化
    X = StandardScaler().fit_transform(X)
    
    # 为了减少计算量，我们只使用部分数据
    indices = np.random.choice(len(X), 5000, replace=False)
    X, y = X[indices], y[indices]
    
    print(f"数据集加载完成，样本数: {len(X)}, 特征数: {X.shape[1]}")
    return X, y

# 显示原始图像
def display_original_images(X, y, n=10):
    plt.figure(figsize=(12, 4))
    for i in range(n):
        # 从数据中随机选择样本
        idx = np.random.randint(0, len(X))
        img = X[idx].reshape(28, 28)
        
        plt.subplot(1, n, i+1)
        plt.imshow(img, cmap='gray')
        plt.title(f"数字: {y[idx]}")
        plt.axis('off')
    plt.tight_layout()
    plt.savefig('original_mnist_images.png')
    plt.show()
    print("原始图像显示完成")

# 使用PCA降维并可视化
def pca_visualization(X, y):
    print("正在执行PCA降维...")
    # 应用PCA降维到2维
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)
    
    # 计算解释方差比例
    explained_variance = pca.explained_variance_ratio_.sum()
    print(f"PCA降维完成，解释方差比例: {explained_variance:.4f}")
    
    # 可视化
    plt.figure(figsize=(10, 8))
    scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis', alpha=0.7)
    plt.colorbar(scatter, label='数字类别')
    plt.title(f'PCA降维结果 (解释方差: {explained_variance:.2%})')
    plt.xlabel('主成分1')
    plt.ylabel('主成分2')
    
    # 为了更好的可视化效果，我们标注每个类别的中心点
    centers = []
    for i in range(10):
        idx = np.where(y == i)
        centers.append(np.mean(X_pca[idx], axis=0))
    
    centers = np.array(centers)
    for i, (x, y) in enumerate(centers):
        plt.annotate(str(i), (x, y), fontsize=12, fontweight='bold')
    
    plt.savefig('pca_mnist_visualization.png')
    plt.show()
    print("PCA降维可视化完成")
    return X_pca

# 实现多种降维算法并可视化
def visualize_dimensionality_reduction(X, y):
    # 准备降维算法列表
    reducers = [
        ('PCA', PCA(n_components=2)),
        ('Isomap', Isomap(n_components=2, n_neighbors=10)),
        ('LLE', LocallyLinearEmbedding(n_components=2, n_neighbors=10, method='standard')),
        ('拉普拉斯特征映射', SpectralEmbedding(n_components=2, n_neighbors=10)),
        ('t-SNE', TSNE(n_components=2, perplexity=30, random_state=42))
    ]
    
    plt.figure(figsize=(15, 12))
    
    for i, (name, reducer) in enumerate(reducers):
        print(f"正在执行{name}降维...")
        try:
            # 对t-SNE使用较少数据以加快计算
            if name == 't-SNE':
                # 为t-SNE选择更少的数据点
                subset_size = 1000
                indices = np.random.choice(len(X), subset_size, replace=False)
                X_subset, y_subset = X[indices], y[indices]
                X_reduced = reducer.fit_transform(X_subset)
            else:
                X_reduced = reducer.fit_transform(X)
            
            plt.subplot(2, 3, i+1)
            scatter = plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap='viridis', alpha=0.7)
            plt.colorbar(scatter, label='数字类别')
            plt.title(name)
            plt.xlabel('维度1')
            plt.ylabel('维度2')
            
            # 为t-SNE标注类别中心点
            if name == 't-SNE':
                centers = []
                for i in range(10):
                    idx = np.where(y_subset == i)
                    if len(idx[0]) > 0:  # 确保类别存在
                        centers.append(np.mean(X_reduced[idx], axis=0))
                centers = np.array(centers)
                for i, (x, y) in enumerate(centers):
                    plt.annotate(str(i), (x, y), fontsize=10, fontweight='bold')
            
            print(f"{name}降维完成")
        except Exception as e:
            print(f"执行{name}降维时出错: {e}")
            plt.subplot(2, 3, i+1)
            plt.text(0.5, 0.5, f"错误: {e}", ha='center', va='center', transform=plt.gca().transAxes)
            plt.axis('off')
    
    plt.tight_layout()
    plt.savefig('dimensionality_reduction_comparison.png')
    plt.show()
    print("所有降维算法可视化完成")

# 主函数
def main():
    # 加载数据
    X, y = load_mnist_data()
    
    # 显示原始图像
    display_original_images(X, y)
    
    # PCA可视化
    pca_visualization(X, y)
    
    # 多种降维算法可视化
    visualize_dimensionality_reduction(X, y)

if __name__ == "__main__":
    main()