import numpy as np
import math


class PCAManual:
    def __init__(self, n_components):
        self.n_components = n_components
        self.components = None
        self.mean = None
        self.explained_variance_ratio = None

    def fit(self, X):
        """
        训练PCA模型
        """
        # 1. 数据标准化（去中心化）
        self.mean = np.mean(X, axis=0)
        X_normalized = X - self.mean

        print("原始数据:")
        self._print_matrix(X)
        print(f"\n均值: {self.mean}")
        print("\n标准化后的数据:")
        self._print_matrix(X_normalized)

        # 2. 计算协方差矩阵
        covariance_matrix = self._calculate_covariance(X_normalized)
        print("\n协方差矩阵:")
        self._print_matrix(covariance_matrix)

        # 3. 计算特征值和特征向量
        eigenvalues, eigenvectors = self._eigen_decomposition(covariance_matrix)

        print("\n特征值:")
        for i, val in enumerate(eigenvalues):
            print(f"λ{i + 1} = {val:.6f}")

        print("\n特征向量:")
        for i, vec in enumerate(eigenvectors.T):
            print(f"v{i + 1} = [{', '.join(f'{x:.6f}' for x in vec)}]")

        # 4. 选择主成分
        self.components = self._select_components(eigenvalues, eigenvectors)

        print("\n选择的主成分:")
        self._print_matrix(self.components)

        # 5. 计算解释方差比例
        self.explained_variance_ratio = self._calculate_variance_ratio(eigenvalues)

        print("\n解释方差比例:")
        for i, ratio in enumerate(self.explained_variance_ratio):
            print(f"主成分 {i + 1}: {ratio:.4f} ({ratio * 100:.2f}%)")

        total_variance = sum(self.explained_variance_ratio[:self.n_components])
        print(f"\n前{self.n_components}个主成分保留的方差: {total_variance:.4f} ({total_variance * 100:.2f}%)")

        return self

    def transform(self, X):
        """
        将数据投影到主成分空间
        """
        if self.mean is None or self.components is None:
            raise ValueError("必须先调用fit方法训练模型")

        X_normalized = X - self.mean
        return np.dot(X_normalized, self.components.T)

    def fit_transform(self, X):
        """
        训练并转换数据
        """
        self.fit(X)
        return self.transform(X)

    def _calculate_covariance(self, X):
        """
        手动计算协方差矩阵
        """
        n_samples, n_features = X.shape
        covariance = np.zeros((n_features, n_features))

        for i in range(n_features):
            for j in range(n_features):
                cov = 0
                for k in range(n_samples):
                    cov += X[k, i] * X[k, j]
                covariance[i, j] = cov / (n_samples - 1)

        return covariance

    def _eigen_decomposition(self, matrix, max_iter=1000, tolerance=1e-10):
        """
        使用幂迭代法进行特征值分解 - 修正版本
        """
        n = matrix.shape[0]
        eigenvalues = []
        eigenvectors = []

        # 保存原始矩阵用于特征值计算
        original_matrix = matrix.copy()
        # 复制矩阵用于收缩
        current_matrix = matrix.copy()

        for comp in range(n):
            print(f"  正在计算第 {comp + 1} 个特征向量...")

            # 初始化随机向量
            eigenvector = np.random.rand(n)
            eigenvector = eigenvector / np.linalg.norm(eigenvector)

            for iteration in range(max_iter):
                # 矩阵乘以向量
                new_vector = np.dot(current_matrix, eigenvector)

                # 归一化
                new_vector_norm = np.linalg.norm(new_vector)
                if new_vector_norm < 1e-12:  # 避免除零
                    break

                new_vector = new_vector / new_vector_norm

                # 检查收敛（只比较方向，不比较大小）
                if np.linalg.norm(new_vector - eigenvector) < tolerance:
                    break

                eigenvector = new_vector

            # 使用原始矩阵计算特征值（瑞利商）
            # 这是关键修正：使用原始矩阵而不是收缩后的矩阵
            eigenvalue = np.dot(eigenvector, np.dot(original_matrix, eigenvector))

            # 存储结果
            eigenvalues.append(eigenvalue)
            eigenvectors.append(eigenvector)

            print(f"    找到特征值: {eigenvalue:.6f}")

            # 矩阵收缩（Hotelling收缩法）- 使用当前矩阵
            if comp < n - 1:
                outer_product = np.outer(eigenvector, eigenvector)
                current_matrix = current_matrix - eigenvalue * outer_product

        # 按特征值大小排序
        sorted_indices = np.argsort(eigenvalues)[::-1]
        eigenvalues = np.array(eigenvalues)[sorted_indices]
        eigenvectors = np.array(eigenvectors)[sorted_indices]

        return eigenvalues, eigenvectors.T

    def _select_components(self, eigenvalues, eigenvectors):
        """
        选择前n_components个主成分
        """
        # 按特征值降序排列
        sorted_indices = np.argsort(eigenvalues)[::-1]
        selected_indices = sorted_indices[:self.n_components]
        return eigenvectors[:, selected_indices].T

    def _calculate_variance_ratio(self, eigenvalues):
        """
        计算每个主成分解释的方差比例
        """
        total_variance = np.sum(eigenvalues)
        return eigenvalues / total_variance

    def _print_matrix(self, matrix):
        """
        美化打印矩阵
        """
        if len(matrix.shape) == 1:
            print(f"[{', '.join(f'{x:8.4f}' for x in matrix)}]")
        else:
            for row in matrix:
                print(f"[{', '.join(f'{x:8.4f}' for x in row)}]")


def main():
    """
    主函数：演示PCA从3维降到2维
    """
    # 固定的三维数据
    data_3d = np.array([
        [2.5, 2.4, 1.8],
        [0.5, 0.7, 1.2],
        [2.2, 2.9, 2.1],
        [1.9, 2.2, 1.9],
        [3.1, 3.0, 2.8],
        [2.3, 2.7, 2.3],
        [2.0, 1.6, 1.5],
        [1.0, 1.1, 0.9],
        [1.5, 1.6, 1.4],
        [1.1, 0.9, 1.0]
    ])

    print("=" * 60)
    print("PCA手动实现：三维数据降维到二维")
    print("=" * 60)

    # 创建PCA模型
    pca = PCAManual(n_components=2)

    # 执行PCA
    data_2d = pca.fit_transform(data_3d)

    print("\n" + "=" * 60)
    print("最终结果")
    print("=" * 60)
    print("降维后的二维数据:")
    pca._print_matrix(data_2d)

    print(f"\n原始数据形状: {data_3d.shape}")
    print(f"降维后形状: {data_2d.shape}")
    print(
        f"信息保留率: {sum(pca.explained_variance_ratio[:2]):.4f} ({sum(pca.explained_variance_ratio[:2]) * 100:.2f}%)")

    # 验证结果
    print(f"\n=== 验证 ===")
    # 使用numpy的PCA验证
    from sklearn.decomposition import PCA
    pca_sklearn = PCA(n_components=2)
    data_2d_sklearn = pca_sklearn.fit_transform(data_3d)
    print("sklearn PCA结果:")
    pca._print_matrix(data_2d_sklearn)
    print(f"sklearn信息保留率: {sum(pca_sklearn.explained_variance_ratio_):.4f}")


if __name__ == "__main__":
    main()