import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

# 打印原始数据信息
print("原始数据形状:", X.shape)
print("特征数量:", X.shape[1])
print("样本数量:", X.shape[0])

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 尝试不同的n_components值
for n_components in range(1, X.shape[1] + 1):
    print(f"\n降维到{n_components}维:")
    
    # 创建PCA对象并拟合数据
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)
    
    # 打印降维结果信息
    print("降维后数据形状:", X_pca.shape)
    print("主成分方差贡献率:", pca.explained_variance_ratio_)
    print("累计方差贡献率:", np.cumsum(pca.explained_variance_ratio_))
    
    # 可视化（仅当n_components=2时）
    if n_components == 2:
        plt.figure(figsize=(10, 6))
        colors = ['navy', 'turquoise', 'darkorange']
        lw = 2

        for color, i, target_name in zip(colors, [0, 1, 2], target_names):
            plt.scatter(X_pca[y == i, 0], X_pca[y == i, 1], 
                        color=color, alpha=.8, lw=lw,
                        label=target_name)

        plt.legend(loc='best', shadow=False, scatterpoints=1)
        plt.title(f'PCA of IRIS dataset (n_components={n_components})')
        plt.xlabel('First Principal Component')
        plt.ylabel('Second Principal Component')
        plt.grid()
        plt.show()

# 保存降维后的数据
np.savetxt('iris_pca.csv', X_pca, delimiter=',', header='PC1,PC2', comments='')