import numpy as np
import pandas as pd
import matplotlib

matplotlib.use('TkAgg')  # 让 PyCharm 能显示图像
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

# ==========================================
# 设置中文显示与图像格式
# ==========================================
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# ==========================================
# 符号校正函数（新增）
# ==========================================
def correct_signs(manual_result, sklearn_result):
    """
    校正特征向量的符号差异
    由于PCA中特征向量的方向可以取正或负，这会导致投影结果的符号相反
    此函数通过比较与sklearn结果的相关性来校正符号
    """
    corrected = manual_result.copy()
    correction_info = []

    for i in range(manual_result.shape[1]):
        # 计算相关系数
        correlation = np.corrcoef(manual_result[:, i], sklearn_result[:, i])[0, 1]

        # 如果负相关，则翻转符号
        if correlation < 0:
            corrected[:, i] = -manual_result[:, i]
            correction_info.append(f"主成分 {i + 1}: 符号已翻转 (相关系数: {correlation:.6f})")
        else:
            correction_info.append(f"主成分 {i + 1}: 符号一致 (相关系数: {correlation:.6f})")

    return corrected, correction_info


# ==========================================
# 验证特征向量正交性函数
# ==========================================
def verify_orthogonality(eig_vectors):
    """
    验证特征向量的正交性
    """
    dot_product = np.dot(eig_vectors.T, eig_vectors)
    np.fill_diagonal(dot_product, 0)  # 忽略对角线
    max_off_diag = np.max(np.abs(dot_product))
    print(f"🔍 特征向量正交性检验 - 最大非对角线值: {max_off_diag:.10f}")
    return max_off_diag


# ==========================================
# 1. 加载原始数据集（Iris）
# ==========================================
data = load_iris()
X = data.data  # 特征数据 (150, 4)
y = data.target  # 标签数据 (150,)
feature_names = data.feature_names

df_original = pd.DataFrame(X, columns=feature_names)
df_original['target'] = y
df_original.to_csv("iris_original.csv", index=False, encoding='utf-8-sig')
print("✅ 已保存原始数据集为 iris_original.csv")

# ==========================================
# 2. 数据标准化
# ==========================================
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 验证均值与方差
print("\n📊 标准化后每个特征的均值（应接近0）:")
print(X_scaled.mean(axis=0))
print("📏 标准化后每个特征的方差（应接近1）:")
print(X_scaled.var(axis=0))

# ==========================================
# 3. 手动实现 PCA 数学过程
# ==========================================

# Step 1: 计算协方差矩阵
cov_matrix = np.cov(X_scaled.T)
print("\n📘 协方差矩阵:")
print(cov_matrix)

# Step 2: 计算特征值与特征向量
eig_values, eig_vectors = np.linalg.eig(cov_matrix)

# Step 3: 按特征值从大到小排序
sorted_idx = np.argsort(eig_values)[::-1]
eig_values = eig_values[sorted_idx]
eig_vectors = eig_vectors[:, sorted_idx]

print("\n🔢 特征值（解释方差大小）:")
print(eig_values)
print("\n🔢 特征向量（主成分方向）:")
print(eig_vectors)

# 验证特征向量正交性
ortho_error = verify_orthogonality(eig_vectors)

# Step 4: 计算解释方差比
explained_variance_ratio = eig_values / np.sum(eig_values)
print("\n📊 解释方差比:")
print(explained_variance_ratio)
print("累计解释方差比:", np.cumsum(explained_variance_ratio))

# Step 5: 选择前两个主成分
W = eig_vectors[:, :2]
X_reduced_manual = np.dot(X_scaled, W)

# 保存结果
df_manual_pca = pd.DataFrame(X_reduced_manual, columns=['PC1', 'PC2'])
df_manual_pca['target'] = y
df_manual_pca.to_csv("iris_pca_manual.csv", index=False, encoding='utf-8-sig')
print("✅ 已保存手动计算的 PCA 降维结果为 iris_pca_manual.csv")

# ==========================================
# 4. 使用 sklearn 进行 PCA（验证对比）
# ==========================================
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)

print("\n✅ sklearn PCA 解释方差比:", pca.explained_variance_ratio_)
print("✅ sklearn PCA 累计解释方差:", np.sum(pca.explained_variance_ratio_))

df_sklearn_pca = pd.DataFrame(X_pca, columns=['PC1', 'PC2'])
df_sklearn_pca['target'] = y
df_sklearn_pca.to_csv("iris_pca_sklearn.csv", index=False, encoding='utf-8-sig')
print("✅ 已保存 sklearn PCA 降维结果为 iris_pca_sklearn.csv")

# ==========================================
# 5. 对比两种结果的差异
# ==========================================
diff = np.abs(X_reduced_manual - X_pca)
print("\n🔍 手写 PCA 与 sklearn 结果平均差异:")
print(np.mean(diff))

# ==========================================
# 6. 符号校正与详细对比
# ==========================================
print("\n" + "=" * 50)
print("符号校正过程")
print("=" * 50)

# 原始差异
original_diff = np.abs(X_reduced_manual - X_pca)
print("🔍 校正前手写 PCA 与 sklearn 结果平均差异:")
print(f"  平均绝对误差: {np.mean(original_diff):.10f}")
print(f"  最大绝对误差: {np.max(original_diff):.10f}")

# 进行符号校正
X_reduced_corrected, correction_info = correct_signs(X_reduced_manual, X_pca)

# 输出校正信息
print("\n📝 符号校正信息:")
for info in correction_info:
    print(f"  {info}")

# 校正后差异
corrected_diff = np.abs(X_reduced_corrected - X_pca)
print("\n🔍 校正后手写 PCA 与 sklearn 结果平均差异:")
print(f"  平均绝对误差: {np.mean(corrected_diff):.10f}")
print(f"  最大绝对误差: {np.max(corrected_diff):.10f}")

# 保存校正后的结果
df_corrected_pca = pd.DataFrame(X_reduced_corrected, columns=['PC1', 'PC2'])
df_corrected_pca['target'] = y
df_corrected_pca.to_csv("iris_pca_corrected.csv", index=False, encoding='utf-8-sig')
print("✅ 已保存符号校正后的 PCA 降维结果为 iris_pca_corrected.csv")

# ==========================================
# 7. 可视化结果（降维分布图）
# ==========================================
plt.figure(figsize=(8, 6))
for label, name in zip([0, 1, 2], data.target_names):
    plt.scatter(X_pca[y == label, 0], X_pca[y == label, 1], label=name)

plt.xlabel("主成分1 (PC1)")
plt.ylabel("主成分2 (PC2)")
plt.title("Iris 数据集 PCA 降维结果（sklearn）")
plt.legend()
plt.grid(True)
plt.savefig("pca_sklearn_result.png", dpi=300, bbox_inches='tight')
plt.show()

# ==========================================
# 8. 绘制特征值与解释方差对比图
# ==========================================
plt.figure(figsize=(8, 5))
plt.bar(range(1, len(eig_values) + 1), eig_values, alpha=0.7, label='特征值')
plt.plot(range(1, len(eig_values) + 1), np.cumsum(explained_variance_ratio), marker='o', color='r', label='累计解释方差')
plt.xlabel("主成分序号")
plt.ylabel("特征值 / 方差比")
plt.title("各主成分贡献度与累计解释方差")
plt.legend()
plt.grid(True)
plt.savefig("pca_variance_analysis.png", dpi=300, bbox_inches='tight')
plt.show()

# ==========================================
# 9. 对比可视化
# ==========================================
fig, axes = plt.subplots(1, 3, figsize=(18, 5))

# sklearn PCA 结果
for label, name in zip([0, 1, 2], data.target_names):
    axes[0].scatter(X_pca[y == label, 0], X_pca[y == label, 1], label=name, alpha=0.7)
axes[0].set_xlabel("主成分1 (PC1)")
axes[0].set_ylabel("主成分2 (PC2)")
axes[0].set_title("sklearn PCA 结果")
axes[0].legend()
axes[0].grid(True)

# 手动PCA原始结果
for label, name in zip([0, 1, 2], data.target_names):
    axes[1].scatter(X_reduced_manual[y == label, 0], X_reduced_manual[y == label, 1], label=name, alpha=0.7)
axes[1].set_xlabel("主成分1 (PC1)")
axes[1].set_ylabel("主成分2 (PC2)")
axes[1].set_title("手动PCA原始结果\n(可能存在符号差异)")
axes[1].legend()
axes[1].grid(True)

# 手动PCA校正后结果
for label, name in zip([0, 1, 2], data.target_names):
    axes[2].scatter(X_reduced_corrected[y == label, 0], X_reduced_corrected[y == label, 1], label=name, alpha=0.7)
axes[2].set_xlabel("主成分1 (PC1)")
axes[2].set_ylabel("主成分2 (PC2)")
axes[2].set_title("手动PCA校正后结果\n(符号已校正)")
axes[2].legend()
axes[2].grid(True)

plt.tight_layout()
plt.savefig("pca_comparison.png", dpi=300, bbox_inches='tight')
plt.show()

# ==========================================
# 10. 重构数据，计算信息损失（MSE）
# ==========================================
X_reconstructed = np.dot(X_reduced_manual, W.T)
mse = np.mean((X_scaled - X_reconstructed) ** 2)
print(f"\n💡 使用前两个主成分重构数据的均方误差 (MSE): {mse:.6f}")

# ==========================================
# 11. 保存最终报告（数据与分析结果）
# ==========================================
report = {
    "特征值": eig_values.tolist(),
    "解释方差比": explained_variance_ratio.tolist(),
    "累计解释方差": np.cumsum(explained_variance_ratio).tolist(),
    "重构误差(MSE)": mse,
    "特征向量正交性误差": ortho_error,
    "校正前平均差异": np.mean(original_diff),
    "校正后平均差异": np.mean(corrected_diff)
}

df_detailed_report = pd.DataFrame({
    '主成分': [f'PC{i + 1}' for i in range(len(eig_values))],
    '特征值': eig_values,
    '解释方差比': explained_variance_ratio,
    '累计解释方差比': np.cumsum(explained_variance_ratio)
})

# 添加其他指标作为单独的行
additional_info = pd.DataFrame({
    '主成分': ['其他指标', '', '', ''],
    '特征值': [f'重构误差(MSE): {mse:.6f}',
            f'正交性误差: {ortho_error:.10f}',
            f'校正前差异: {np.mean(original_diff):.10f}',
            f'校正后差异: {np.mean(corrected_diff):.10f}'],
    '解释方差比': ['', '', '', ''],
    '累计解释方差比': ['', '', '', '']
})

df_report = pd.concat([df_detailed_report, additional_info], ignore_index=True)
df_report.to_csv("pca_analysis_report.csv", index=False, encoding='utf-8-sig')
print("✅ 已保存 PCA 分析报告为 pca_analysis_report.csv")

# ==========================================
# 12. 总结输出
# ==========================================
print("\n" + "=" * 60)
print("PCA 分析总结")
print("=" * 60)
print(f"📊 数据维度: {X.shape} → {X_reduced_corrected.shape}")
print(f"🎯 累计解释方差: {np.sum(pca.explained_variance_ratio_):.4f} ({np.sum(pca.explained_variance_ratio_) * 100:.2f}%)")
print(f"🔧 特征向量正交性误差: {ortho_error:.10f}")
print(f"✅ 符号校正效果: 平均差异从 {np.mean(original_diff):.6f} 降低到 {np.mean(corrected_diff):.6f}")
print(f"📉 信息损失 (MSE): {mse:.6f}")
print("🎯 所有步骤完成！")
print("=" * 60)
