import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
import matplotlib

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['WenQuanYi Micro Hei', 'SimHei', 'DejaVu Sans Fallback']
plt.rcParams['axes.unicode_minus'] = False

class PCA:
    def __init__(self, n_components):
        self.n_components = n_components
        self.components = None
        self.mean = None
        self.explained_variance_ratio = None
    
    def fit(self, X):
        # 1. 数据标准化
        self.mean = np.mean(X, axis=0)
        X_centered = X - self.mean
        
        # 2. 计算协方差矩阵
        cov_matrix = np.cov(X_centered, rowvar=False)
        
        # 3. 计算特征值和特征向量
        eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
        
        # 4. 对特征值和特征向量排序（降序）
        sorted_idx = np.argsort(eigenvalues)[::-1]
        eigenvalues = eigenvalues[sorted_idx]
        eigenvectors = eigenvectors[:, sorted_idx]
        
        # 5. 选择前n个主成分
        self.components = eigenvectors[:, :self.n_components]
        
        # 6. 计算解释方差比
        total_variance = np.sum(eigenvalues)
        self.explained_variance_ratio = eigenvalues[:self.n_components] / total_variance
        
        return self
    
    def transform(self, X):
        X_centered = X - self.mean
        return np.dot(X_centered, self.components)
    
    def fit_transform(self, X):
        self.fit(X)
        return self.transform(X)

# 示例使用
def demo_pca():
    # 加载鸢尾花数据集
    iris = load_iris()
    X = iris.data
    y = iris.target
    
    print("Original data shape:", X.shape)
    print("First 5 rows of original data:")
    print(X[:5])
    
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    # 应用PCA
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_scaled)
    
    print("\nReduced data shape:", X_pca.shape)
    print("First 5 rows of reduced data:")
    print(X_pca[:5])
    
    print(f"\nExplained variance ratio: {pca.explained_variance_ratio}")
    print(f"Cumulative explained variance: {np.sum(pca.explained_variance_ratio):.4f}")
    
    # 可视化结果
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    for target in np.unique(y):
        plt.scatter(X_pca[y == target, 0], X_pca[y == target, 1], 
                   label=iris.target_names[target], alpha=0.8)
    plt.xlabel('First Principal Component')
    plt.ylabel('Second Principal Component')
    plt.title('PCA Dimensionality Reduction Result')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 方差解释图
    plt.subplot(1, 2, 2)
    pca_full = PCA(n_components=4)
    pca_full.fit(X_scaled)
    cumulative_variance = np.cumsum(pca_full.explained_variance_ratio)
    
    plt.bar(range(1, 5), pca_full.explained_variance_ratio, alpha=0.6, 
            label='Individual')
    plt.plot(range(1, 5), cumulative_variance, 'ro-', label='Cumulative')
    plt.xlabel('Number of Principal Components')
    plt.ylabel('Explained Variance Ratio')
    plt.title('Variance Explained')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig('pca_result.png')  # 保存图片而不是显示
    print("Plot saved as 'pca_result.png'")

if __name__ == "__main__":
    demo_pca()