import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from torchvision.datasets import FashionMNIST
from torchvision.transforms import ToTensor
import torch

plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# Fashion-MNIST类别名称
CLASS_NAMES = [
    "T恤/上衣", "裤子", "套衫", "连衣裙", "外套",
    "凉鞋", "衬衫", "运动鞋", "包", "短靴"
]


def load_fashion_mnist(sample_size=1000):
    """加载Fashion-MNIST数据集"""
    dataset = FashionMNIST(
        root='./data',
        train=True,
        download=True,
        transform=ToTensor()
    )

    # 转换为numpy数组并采样
    images = dataset.data.numpy()[:sample_size]
    labels = dataset.targets.numpy()[:sample_size]

    # 归一化到[0,1]范围
    images = images / 255.0

    print(f"成功加载Fashion-MNIST数据集，采样 {sample_size} 个样本")
    print(f"图像形状: {images.shape} (样本数, 高度, 宽度)")
    print(f"标签形状: {labels.shape}")

    return images, labels


class CustomPCA:

    def __init__(self, n_components=0.95):
        self.n_components = n_components
        self.components_ = None  # 主成分（特征向量）
        self.explained_variance_ = None  # 特征值
        self.explained_variance_ratio_ = None  # 解释方差比例
        self.mean_ = None  # 均值
        self.std_ = None  # 标准差
        self.n_components_ = None  # 实际使用的主成分数量

    def fit(self, X):
        """
        拟合PCA模型
        X: 形状为(n_samples, n_features)的输入数据
        """
        # 1.标准化（均值为0，标准差为1）
        self.mean_ = np.mean(X, axis=0)
        self.std_ = np.std(X, axis=0)

        # 避免除以零
        self.std_[self.std_ == 0] = 1.0

        X_scaled = (X - self.mean_) / self.std_

        # 2. 计算协方差矩阵 (n_features x n_features)
        # 对于标准化数据，协方差矩阵 = (X_scaled.T @ X_scaled) / (n_samples - 1)
        n_samples = X_scaled.shape[0]
        cov_matrix = np.cov(X_scaled.T)  # 计算特征之间的协方差

        # 3. 对协方差矩阵进行特征值分解
        eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)

        # 4. 按特征值从大到小排序
        sorted_indices = np.argsort(eigenvalues)[::-1]
        self.explained_variance_ = eigenvalues[sorted_indices]
        self.components_ = eigenvectors[:, sorted_indices].T  # 转置使每行是一个主成分

        # 5. 计算解释方差比例
        total_variance = np.sum(self.explained_variance_)
        self.explained_variance_ratio_ = self.explained_variance_ / total_variance

        # 6. 确定要保留的主成分数量
        if isinstance(self.n_components, float):
            # 如果是小数，表示要保留的方差比例
            cumulative_variance = np.cumsum(self.explained_variance_ratio_)
            self.n_components_ = np.argmax(cumulative_variance >= self.n_components) + 1
        else:
            # 如果是整数，表示要保留的主成分数量
            self.n_components_ = min(self.n_components, X.shape[1])

        # 保留前n_components_个主成分
        self.components_ = self.components_[:self.n_components_]
        self.explained_variance_ = self.explained_variance_[:self.n_components_]
        self.explained_variance_ratio_ = self.explained_variance_ratio_[:self.n_components_]

        return self

    def transform(self, X):
        """将数据投影到主成分上"""
        X_scaled = (X - self.mean_) / self.std_
        # 投影: (n_samples, n_features) @ (n_features, n_components) = (n_samples, n_components)
        return np.dot(X_scaled, self.components_.T)

    def inverse_transform(self, X_pca):
        """从PCA结果重构原始数据"""
        # 重构: (n_samples, n_components) @ (n_components, n_features) = (n_samples, n_features)
        X_reconstructed_scaled = np.dot(X_pca, self.components_)
        # 反标准化
        return X_reconstructed_scaled * self.std_ + self.mean_


def apply_pca(images, n_components=0.95):
    """对Fashion-MNIST图像应用自定义PCA降维"""
    n_samples, height, width = images.shape

    # 展平图像: (n_samples, height*width)
    X = images.reshape(n_samples, -1)

    # 应用PCA
    pca = CustomPCA(n_components=n_components)
    pca.fit(X)
    X_pca = pca.transform(X)

    # 重构图像
    X_reconstructed = pca.inverse_transform(X_pca)

    # 转换回原始形状并裁剪到[0,1]范围
    reconstructed_images = X_reconstructed.reshape(n_samples, height, width)
    reconstructed_images = np.clip(reconstructed_images, 0, 1)

    return reconstructed_images, pca, X


def visualize_results(original, reconstructed, labels, pca, num_display=10):
    """可视化原始图像与PCA重构结果"""
    # 随机选择num_display个样本展示
    indices = np.random.choice(len(original), num_display, replace=False)

    plt.figure(figsize=(15, 4 * num_display // 2))

    for i, idx in enumerate(indices):
        # 原始图像
        plt.subplot(num_display // 2, 4, 2 * i + 1)
        plt.imshow(original[idx], cmap='gray')
        plt.title(f"原始图像: {CLASS_NAMES[labels[idx]]}")
        plt.axis('off')

        # 重构图像
        plt.subplot(num_display // 2, 4, 2 * i + 2)
        plt.imshow(reconstructed[idx], cmap='gray')
        plt.title("PCA重构图像")
        plt.axis('off')

    plt.tight_layout()
    plt.show()

    # 绘制累计解释方差曲线
    plt.figure(figsize=(10, 6))
    plt.plot(np.cumsum(pca.explained_variance_ratio_), 'b-')
    plt.xlabel('主成分数量')
    plt.ylabel('累计解释方差比例')
    plt.title(f'PCA累计解释方差 (保留{sum(pca.explained_variance_ratio_):.2%}方差)')
    plt.grid(True)
    plt.axhline(y=sum(pca.explained_variance_ratio_), color='r', linestyle='--',
                label=f'阈值: {sum(pca.explained_variance_ratio_):.2%}')
    plt.legend()
    plt.show()


def main():
    # 加载数据
    images, labels = load_fashion_mnist(sample_size=1000)

    # 设置PCA参数
    n_components = 0.95  # 保留95%的方差

    # 应用PCA
    reconstructed_images, pca, X = apply_pca(images, n_components)

    # 输出维度信息
    print(f"原始特征维度: {X.shape[1]} (28x28=784)")
    print(f"降维后特征维度: {pca.n_components_}")
    print(f"保留的方差比例: {sum(pca.explained_variance_ratio_):.2%}")

    # 可视化结果
    visualize_results(images, reconstructed_images, labels, pca, num_display=10)


if __name__ == "__main__":
    main()
