import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 固定的200个核桃样本数据（6维特征）
n_samples = 200

# 固定数据 - 核桃大小 (mm)
walnut_size = np.array([
    28.5, 32.1, 35.8, 29.2, 33.7, 37.4, 30.8, 34.5, 38.2, 31.6,
    25.9, 29.6, 33.3, 27.7, 31.4, 35.1, 28.5, 32.2, 35.9, 30.3,
    26.8, 30.5, 34.2, 28.6, 32.3, 36.0, 29.4, 33.1, 36.8, 31.2,
    27.5, 31.2, 34.9, 29.3, 33.0, 36.7, 30.1, 33.8, 37.5, 31.9,
    26.2, 29.9, 33.6, 28.0, 31.7, 35.4, 28.8, 32.5, 36.2, 30.6,
    24.8, 28.5, 32.2, 26.6, 30.3, 34.0, 27.4, 31.1, 34.8, 29.2,
    25.5, 29.2, 32.9, 27.3, 31.0, 34.7, 28.1, 31.8, 35.5, 29.9,
    26.9, 30.6, 34.3, 28.7, 32.4, 36.1, 29.5, 33.2, 36.9, 31.3,
    27.8, 31.5, 35.2, 29.6, 33.3, 37.0, 30.4, 34.1, 37.8, 32.2,
    25.1, 28.8, 32.5, 26.9, 30.6, 34.3, 27.7, 31.4, 35.1, 29.5,
    26.4, 30.1, 33.8, 28.2, 31.9, 35.6, 29.0, 32.7, 36.4, 30.8,
    27.1, 30.8, 34.5, 28.9, 32.6, 36.3, 29.7, 33.4, 37.1, 31.5,
    26.5, 30.2, 33.9, 28.3, 32.0, 35.7, 29.1, 32.8, 36.5, 30.9,
    27.3, 31.0, 34.7, 29.1, 32.8, 36.5, 29.9, 33.6, 37.3, 31.7,
    25.7, 29.4, 33.1, 27.5, 31.2, 34.9, 28.3, 32.0, 35.7, 30.1,
    26.7, 30.4, 34.1, 28.5, 32.2, 35.9, 29.3, 33.0, 36.7, 31.1,
    27.6, 31.3, 35.0, 29.4, 33.1, 36.8, 30.2, 33.9, 37.6, 32.0,
    25.3, 29.0, 32.7, 27.1, 30.8, 34.5, 27.9, 31.6, 35.3, 29.7,
    26.3, 30.0, 33.7, 28.1, 31.8, 35.5, 28.9, 32.6, 36.3, 30.7,
    27.4, 31.1, 34.8, 29.2, 32.9, 36.6, 30.0, 33.7, 37.4, 31.8
])

# 固定数据 - 生长日期 (天)
growth_days = np.array([
    165, 172, 179, 168, 175, 182, 170, 177, 184, 173,
    160, 167, 174, 163, 170, 177, 165, 172, 179, 168,
    158, 165, 172, 161, 168, 175, 163, 170, 177, 166,
    162, 169, 176, 165, 172, 179, 167, 174, 181, 170,
    155, 162, 169, 158, 165, 172, 160, 167, 174, 163,
    150, 157, 164, 153, 160, 167, 155, 162, 169, 158,
    152, 159, 166, 155, 162, 169, 157, 164, 171, 160,
    161, 168, 175, 164, 171, 178, 166, 173, 180, 169,
    164, 171, 178, 167, 174, 181, 169, 176, 183, 172,
    156, 163, 170, 159, 166, 173, 161, 168, 175, 164,
    157, 164, 171, 160, 167, 174, 162, 169, 176, 165,
    159, 166, 173, 162, 169, 176, 164, 171, 178, 167,
    158, 165, 172, 161, 168, 175, 163, 170, 177, 166,
    160, 167, 174, 163, 170, 177, 165, 172, 179, 168,
    154, 161, 168, 157, 164, 171, 159, 166, 173, 162,
    159, 166, 173, 162, 169, 176, 164, 171, 178, 167,
    163, 170, 177, 166, 173, 180, 168, 175, 182, 171,
    151, 158, 165, 154, 161, 168, 156, 163, 170, 159,
    157, 164, 171, 160, 167, 174, 162, 169, 176, 165,
    161, 168, 175, 164, 171, 178, 166, 173, 180, 169
])

# 固定数据 - 叶片大小 (cm²)
leaf_size = np.array([
    45.2, 48.7, 52.3, 46.5, 50.1, 53.8, 47.8, 51.4, 55.1, 49.2,
    42.1, 45.6, 49.2, 43.4, 47.0, 50.7, 44.7, 48.3, 52.0, 46.1,
    40.8, 44.3, 47.9, 42.1, 45.7, 49.4, 43.4, 47.0, 50.7, 44.8,
    41.5, 45.0, 48.6, 42.8, 46.4, 50.1, 44.1, 47.7, 51.4, 45.5,
    39.2, 42.7, 46.3, 40.5, 44.1, 47.8, 41.8, 45.4, 49.1, 43.2,
    37.5, 41.0, 44.6, 38.8, 42.4, 46.1, 40.1, 43.7, 47.4, 41.5,
    38.2, 41.7, 45.3, 39.5, 43.1, 46.8, 40.8, 44.4, 48.1, 42.2,
    41.8, 45.3, 48.9, 43.1, 46.7, 50.4, 44.4, 48.0, 51.7, 45.8,
    43.5, 47.0, 50.6, 44.8, 48.4, 52.1, 46.1, 49.7, 53.4, 47.5,
    38.8, 42.3, 45.9, 40.1, 43.7, 47.4, 41.4, 45.0, 48.7, 42.8,
    39.5, 43.0, 46.6, 40.8, 44.4, 48.1, 42.1, 45.7, 49.4, 43.5,
    40.2, 43.7, 47.3, 41.5, 45.1, 48.8, 42.8, 46.4, 50.1, 44.2,
    39.8, 43.3, 46.9, 41.1, 44.7, 48.4, 42.4, 46.0, 49.7, 43.8,
    40.5, 44.0, 47.6, 41.8, 45.4, 49.1, 43.1, 46.7, 50.4, 44.5,
    38.1, 41.6, 45.2, 39.4, 43.0, 46.7, 40.7, 44.3, 48.0, 42.1,
    40.7, 44.2, 47.8, 42.0, 45.6, 49.3, 43.3, 46.9, 50.6, 44.7,
    42.8, 46.3, 49.9, 44.1, 47.7, 51.4, 45.4, 49.0, 52.7, 46.8,
    37.8, 41.3, 44.9, 39.1, 42.7, 46.4, 40.4, 44.0, 47.7, 41.8,
    39.6, 43.1, 46.7, 40.9, 44.5, 48.2, 42.2, 45.8, 49.5, 43.6,
    41.2, 44.7, 48.3, 42.5, 46.1, 49.8, 43.8, 47.4, 51.1, 45.2
])

# 固定数据 - 光照强度 (千勒克斯)
light_intensity = np.array([
    6.2, 7.8, 9.4, 6.7, 8.3, 9.9, 7.1, 8.7, 10.3, 7.5,
    5.1, 6.7, 8.3, 5.6, 7.2, 8.8, 6.0, 7.6, 9.2, 6.4,
    4.8, 6.4, 8.0, 5.3, 6.9, 8.5, 5.7, 7.3, 8.9, 6.1,
    5.3, 6.9, 8.5, 5.8, 7.4, 9.0, 6.2, 7.8, 9.4, 6.6,
    4.5, 6.1, 7.7, 5.0, 6.6, 8.2, 5.4, 7.0, 8.6, 5.8,
    4.0, 5.6, 7.2, 4.5, 6.1, 7.7, 4.9, 6.5, 8.1, 5.3,
    4.2, 5.8, 7.4, 4.7, 6.3, 7.9, 5.1, 6.7, 8.3, 5.5,
    5.5, 7.1, 8.7, 6.0, 7.6, 9.2, 6.4, 8.0, 9.6, 6.8,
    6.0, 7.6, 9.2, 6.5, 8.1, 9.7, 6.9, 8.5, 10.1, 7.3,
    4.7, 6.3, 7.9, 5.2, 6.8, 8.4, 5.6, 7.2, 8.8, 6.0,
    4.9, 6.5, 8.1, 5.4, 7.0, 8.6, 5.8, 7.4, 9.0, 6.2,
    5.0, 6.6, 8.2, 5.5, 7.1, 8.7, 5.9, 7.5, 9.1, 6.3,
    4.8, 6.4, 8.0, 5.3, 6.9, 8.5, 5.7, 7.3, 8.9, 6.1,
    5.1, 6.7, 8.3, 5.6, 7.2, 8.8, 6.0, 7.6, 9.2, 6.4,
    4.3, 5.9, 7.5, 4.8, 6.4, 8.0, 5.2, 6.8, 8.4, 5.6,
    5.2, 6.8, 8.4, 5.7, 7.3, 8.9, 6.1, 7.7, 9.3, 6.5,
    5.8, 7.4, 9.0, 6.3, 7.9, 9.5, 6.7, 8.3, 9.9, 7.1,
    4.1, 5.7, 7.3, 4.6, 6.2, 7.8, 5.0, 6.6, 8.2, 5.4,
    4.9, 6.5, 8.1, 5.4, 7.0, 8.6, 5.8, 7.4, 9.0, 6.2,
    5.4, 7.0, 8.6, 5.9, 7.5, 9.1, 6.3, 7.9, 9.5, 6.7
])

# 固定数据 - 虫害数量 (只)
pest_count = np.array([
    15, 8, 3, 12, 6, 1, 10, 5, 0, 9,
    25, 18, 12, 22, 15, 9, 20, 13, 7, 17,
    28, 21, 15, 25, 18, 12, 23, 16, 10, 20,
    26, 19, 13, 23, 16, 10, 21, 14, 8, 18,
    32, 25, 19, 29, 22, 16, 27, 20, 14, 24,
    35, 28, 22, 32, 25, 19, 30, 23, 17, 26,
    33, 26, 20, 30, 23, 17, 28, 21, 15, 25,
    24, 17, 11, 21, 14, 8, 19, 12, 6, 16,
    20, 13, 7, 17, 10, 4, 15, 8, 2, 12,
    30, 23, 17, 27, 20, 14, 25, 18, 12, 22,
    31, 24, 18, 28, 21, 15, 26, 19, 13, 23,
    29, 22, 16, 26, 19, 13, 24, 17, 11, 21,
    32, 25, 19, 29, 22, 16, 27, 20, 14, 24,
    27, 20, 14, 24, 17, 11, 22, 15, 9, 19,
    34, 27, 21, 31, 24, 18, 29, 22, 16, 26,
    28, 21, 15, 25, 18, 12, 23, 16, 10, 20,
    22, 15, 9, 19, 12, 6, 17, 10, 4, 14,
    36, 29, 23, 33, 26, 20, 31, 24, 18, 28,
    31, 24, 18, 28, 21, 15, 26, 19, 13, 23,
    25, 18, 12, 22, 15, 9, 20, 13, 7, 17
])

# 固定数据 - 水源供应 (升/天)
water_supply = np.array([
    8.5, 9.2, 9.9, 8.7, 9.4, 10.1, 8.9, 9.6, 10.3, 9.1,
    7.2, 7.9, 8.6, 7.4, 8.1, 8.8, 7.6, 8.3, 9.0, 7.8,
    6.8, 7.5, 8.2, 7.0, 7.7, 8.4, 7.2, 7.9, 8.6, 7.4,
    7.0, 7.7, 8.4, 7.2, 7.9, 8.6, 7.4, 8.1, 8.8, 7.6,
    6.5, 7.2, 7.9, 6.7, 7.4, 8.1, 6.9, 7.6, 8.3, 7.1,
    5.8, 6.5, 7.2, 6.0, 6.7, 7.4, 6.2, 6.9, 7.6, 6.4,
    6.0, 6.7, 7.4, 6.2, 6.9, 7.6, 6.4, 7.1, 7.8, 6.6,
    7.4, 8.1, 8.8, 7.6, 8.3, 9.0, 7.8, 8.5, 9.2, 8.0,
    7.8, 8.5, 9.2, 8.0, 8.7, 9.4, 8.2, 8.9, 9.6, 8.4,
    6.7, 7.4, 8.1, 6.9, 7.6, 8.3, 7.1, 7.8, 8.5, 7.3,
    6.9, 7.6, 8.3, 7.1, 7.8, 8.5, 7.3, 8.0, 8.7, 7.5,
    7.1, 7.8, 8.5, 7.3, 8.0, 8.7, 7.5, 8.2, 8.9, 7.7,
    6.8, 7.5, 8.2, 7.0, 7.7, 8.4, 7.2, 7.9, 8.6, 7.4,
    7.2, 7.9, 8.6, 7.4, 8.1, 8.8, 7.6, 8.3, 9.0, 7.8,
    6.3, 7.0, 7.7, 6.5, 7.2, 7.9, 6.7, 7.4, 8.1, 6.9,
    7.3, 8.0, 8.7, 7.5, 8.2, 8.9, 7.7, 8.4, 9.1, 7.9,
    7.6, 8.3, 9.0, 7.8, 8.5, 9.2, 8.0, 8.7, 9.4, 8.2,
    6.2, 6.9, 7.6, 6.4, 7.1, 7.8, 6.6, 7.3, 8.0, 6.8,
    6.9, 7.6, 8.3, 7.1, 7.8, 8.5, 7.3, 8.0, 8.7, 7.5,
    7.5, 8.2, 8.9, 7.7, 8.4, 9.1, 7.9, 8.6, 9.3, 8.1
])

# 组合成6维数据矩阵
feature_names = ['核桃大小', '生长日期', '叶片大小', '光照强度', '虫害数量', '水源供应']
data = np.column_stack([
    walnut_size, growth_days, leaf_size, light_intensity, pest_count, water_supply
])

print("固定核桃数据集统计:")
print("=" * 60)
print(f"{'特征':<8} {'均值':<8} {'标准差':<8} {'最小值':<8} {'最大值':<8}")
print("-" * 60)
for i, name in enumerate(feature_names):
    col = data[:, i]
    print(f"{name:<8} {col.mean():<8.2f} {col.std():<8.2f} {col.min():<8.2f} {col.max():<8.2f}")

# PCA分析
print("\n" + "="*80)
print("PCA数学推导过程 (6维 → 2维)")
print("="*80)

# 数据标准化
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)

# 计算协方差矩阵
cov_matrix = np.cov(data_scaled.T)

# 计算特征值和特征向量
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)

# 特征值排序
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvalues = eigenvalues[sorted_indices]
sorted_eigenvectors = eigenvectors[:, sorted_indices]

# 计算解释方差比
explained_variance_ratio = sorted_eigenvalues / np.sum(sorted_eigenvalues)
cumulative_variance_ratio = np.cumsum(explained_variance_ratio)

# 使用PCA降维到2维
pca = PCA(n_components=2)
data_pca = pca.fit_transform(data_scaled)

print("\n主成分分析结果:")
for i in range(6):
    print(f"PC{i+1}: 解释方差 {explained_variance_ratio[i]:.4f} ({explained_variance_ratio[i]*100:.2f}%)")

print(f"\n前2个主成分累计解释方差: {cumulative_variance_ratio[1]*100:.2f}%")

# 可视化结果
plt.figure(figsize=(20, 12))

# 子图1: 特征相关性热力图
plt.subplot(2, 3, 1)
correlation_matrix = np.corrcoef(data.T)
im = plt.imshow(correlation_matrix, cmap='coolwarm', aspect='auto', vmin=-1, vmax=1)
plt.colorbar(im, fraction=0.046, pad=0.04)
plt.xticks(range(6), feature_names, rotation=45)
plt.yticks(range(6), feature_names)
plt.title('6维特征相关性热力图', fontsize=14, fontweight='bold')

# 子图2: 特征值和Scree图
plt.subplot(2, 3, 2)
components = [f'PC{i+1}' for i in range(6)]
plt.bar(components, sorted_eigenvalues, alpha=0.7, 
        color=['red', 'blue', 'green', 'orange', 'purple', 'brown'])
plt.axhline(y=1, color='r', linestyle='--', alpha=0.7, label='特征值=1阈值')
plt.title('特征值 - Scree图', fontsize=14, fontweight='bold')
plt.xlabel('主成分', fontsize=12)
plt.ylabel('特征值', fontsize=12)
plt.legend()
plt.grid(True, alpha=0.3)

# 子图3: 累计解释方差比
plt.subplot(2, 3, 3)
plt.plot(range(1, 7), cumulative_variance_ratio, 'o-', linewidth=3, markersize=8)
plt.axhline(y=0.8, color='r', linestyle='--', alpha=0.7, label='80%阈值')
plt.axhline(y=0.9, color='g', linestyle='--', alpha=0.7, label='90%阈值')
plt.title('累计解释方差比', fontsize=14, fontweight='bold')
plt.xlabel('主成分数量', fontsize=12)
plt.ylabel('累计解释方差比', fontsize=12)
plt.legend()
plt.grid(True, alpha=0.3)

# 子图4: PCA降维结果
plt.subplot(2, 3, 4)
scatter = plt.scatter(data_pca[:, 0], data_pca[:, 1], c=pest_count, 
                     cmap='RdYlBu_r', s=60, alpha=0.8, edgecolors='black', linewidth=0.5)
plt.colorbar(scatter, label='虫害数量')
plt.title('PCA降维结果 (6维→2维)', fontsize=14, fontweight='bold')
plt.xlabel(f'PC1 ({explained_variance_ratio[0]*100:.1f}%)', fontsize=12)
plt.ylabel(f'PC2 ({explained_variance_ratio[1]*100:.1f}%)', fontsize=12)
plt.grid(True, alpha=0.3)

# 子图5: 特征向量热力图
plt.subplot(2, 3, 5)
component_weights = sorted_eigenvectors[:, :2]
im = plt.imshow(component_weights, cmap='RdBu_r', aspect='auto', vmin=-1, vmax=1)
plt.colorbar(im, fraction=0.046, pad=0.04)
plt.xticks(range(2), ['PC1', 'PC2'])
plt.yticks(range(6), feature_names)
plt.title('特征向量热力图', fontsize=14, fontweight='bold')

# 添加权重值
for i in range(6):
    for j in range(2):
        plt.text(j, i, f'{component_weights[i, j]:.2f}', 
                ha='center', va='center', fontsize=10, fontweight='bold')

# 子图6: 主成分与虫害关系
plt.subplot(2, 3, 6)
plt.scatter(data_pca[:, 0], pest_count, alpha=0.7, s=50, edgecolors='black', linewidth=0.5)
plt.title('第一主成分 vs 虫害数量', fontsize=14, fontweight='bold')
plt.xlabel('第一主成分 (PC1)', fontsize=12)
plt.ylabel('虫害数量', fontsize=12)
plt.grid(True, alpha=0.3)

# 计算相关系数
corr_pc1_pest = np.corrcoef(data_pca[:, 0], pest_count)[0, 1]
plt.text(0.05, 0.95, f'相关系数: {corr_pc1_pest:.3f}', 
         transform=plt.gca().transAxes, fontsize=12, 
         bbox=dict(boxstyle="round,pad=0.3", facecolor="white", alpha=0.8))

plt.tight_layout()
plt.show()

# 详细分析报告
print("\n" + "="*80)
print("PCA分析报告 - 核桃6维特征降维")
print("="*80)

print(f"\n数据概况:")
print(f"• 样本数量: {n_samples} 个核桃样本")
print(f"• 原始特征维度: 6维")
print(f"• 降维后维度: 2维")
print(f"• 信息保留率: {cumulative_variance_ratio[1]*100:.1f}%")

print(f"\n主成分生物学解释:")
pc1_weights = sorted_eigenvectors[:, 0]
print(f"• 第一主成分 (PC1):")
for i in range(6):
    if abs(pc1_weights[i]) > 0.3:
        direction = "正向" if pc1_weights[i] > 0 else "负向"
        print(f"  {feature_names[i]}: {pc1_weights[i]:.3f} ({direction}相关)")

print(f"\n关键发现:")
print(f"1. 虫害数量与核桃大小呈负相关: {np.corrcoef(walnut_size, pest_count)[0,1]:.3f}")
print(f"2. 光照强度与虫害数量呈负相关: {np.corrcoef(light_intensity, pest_count)[0,1]:.3f}")
print(f"3. 水源供应与生长日期呈正相关: {np.corrcoef(water_supply, growth_days)[0,1]:.3f}")

print(f"\n前10个样本的主成分得分:")
print("样本 |      PC1     |      PC2     |  虫害数量  ")
print("-" * 50)
for i in range(10):
    print(f"{i+1:4d} | {data_pca[i, 0]:11.4f} | {data_pca[i, 1]:11.4f} | {pest_count[i]:8d}")

    import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 使用之前提供的固定数据（这里省略数据定义部分，沿用之前的数据）
# ... [数据定义部分与之前相同] ...

# 组合成6维数据矩阵
feature_names = ['核桃大小', '生长日期', '叶片大小', '光照强度', '虫害数量', '水源供应']
data = np.column_stack([
    walnut_size, growth_days, leaf_size, light_intensity, pest_count, water_supply
])

print("固定核桃数据集统计:")
print("=" * 60)
print(f"{'特征':<8} {'均值':<8} {'标准差':<8} {'最小值':<8} {'最大值':<8}")
print("-" * 60)
for i, name in enumerate(feature_names):
    col = data[:, i]
    print(f"{name:<8} {col.mean():<8.2f} {col.std():<8.2f} {col.min():<8.2f} {col.max():<8.2f}")

print("\n" + "="*80)
print("PCA数学推导详细过程 (6维 → 2维)")
print("="*80)

# 步骤1: 数据标准化
print("\n1. 数据标准化 (Z-score标准化)")
print("公式: X_std = (X - μ) / σ")
print("目的: 消除量纲影响，使各特征具有可比性")

# 原始数据
print(f"\n原始数据形状: {data.shape}")
print("前5个样本的原始数据:")
print(data[:5])

# 标准化
scaler = StandardScaler()
data_scaled = scaler.fit_transform(data)

print(f"\n标准化后数据形状: {data_scaled.shape}")
print("标准化后数据均值:", np.round(np.mean(data_scaled, axis=0), 6))
print("标准化后数据标准差:", np.round(np.std(data_scaled, axis=0), 6))
print("前5个样本的标准化数据:")
print(np.round(data_scaled[:5], 4))

# ============================================================================
# 步骤2: 计算协方差矩阵
# ============================================================================
print("\n\n2. 计算协方差矩阵")
print("公式: Σ = (1/(n-1)) × X_std^T × X_std")
print("目的: 衡量各特征之间的线性相关性")

cov_matrix = np.cov(data_scaled.T)
print(f"\n协方差矩阵形状: {cov_matrix.shape} (6×6)")
print("协方差矩阵:")
for i in range(6):
    print(f"特征{i+1}: {np.round(cov_matrix[i], 4)}")

print("\n协方差矩阵对角线(各特征的方差):")
for i in range(6):
    print(f"{feature_names[i]}: {cov_matrix[i,i]:.6f}")

# ============================================================================
# 步骤3: 计算特征值和特征向量
# ============================================================================
print("\n\n3. 计算特征值和特征向量")
print("方程: Σ × v = λ × v")
print("目的: 找到数据变化的主要方向")

eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
print(f"\n计算得到的特征值 (λ): {np.round(eigenvalues, 6)}")
print("特征向量矩阵 (v):")
print(np.round(eigenvectors, 4))

# 验证特征值分解的正确性
print("\n验证特征值分解:")
for i in range(6):
    left_side = cov_matrix @ eigenvectors[:, i]
    right_side = eigenvalues[i] * eigenvectors[:, i]
    error = np.linalg.norm(left_side - right_side)
    print(f"特征值 {i+1}: 误差 = {error:.10f}")

# ============================================================================
# 步骤4: 特征值排序
# ============================================================================
print("\n\n4. 特征值排序 (从大到小)")
print("目的: 按重要性排序主成分")

sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvalues = eigenvalues[sorted_indices]
sorted_eigenvectors = eigenvectors[:, sorted_indices]

print("排序后特征值:", np.round(sorted_eigenvalues, 6))
print("总方差:", np.sum(sorted_eigenvalues))

print("\n排序后特征向量矩阵 (按特征值从大到小):")
for i in range(6):
    print(f"PC{i+1}: {np.round(sorted_eigenvectors[:, i], 4)}")

# ============================================================================
# 步骤5: 计算解释方差比
# ============================================================================
print("\n\n5. 计算解释方差比")
print("公式: 解释方差比 = λ_i / Σ(λ_j)")
print("目的: 衡量每个主成分保留的原始信息量")

explained_variance_ratio = sorted_eigenvalues / np.sum(sorted_eigenvalues)
cumulative_variance_ratio = np.cumsum(explained_variance_ratio)

print("各主成分解释方差比:")
for i in range(6):
    print(f"PC{i+1}: {explained_variance_ratio[i]:.6f} ({explained_variance_ratio[i]*100:.2f}%)")

print("\n累计解释方差比:")
for i in range(6):
    print(f"前{i+1}个主成分: {cumulative_variance_ratio[i]:.6f} ({cumulative_variance_ratio[i]*100:.2f}%)")

# ============================================================================
# 步骤6: 选择主成分和投影
# ============================================================================
print("\n\n6. 选择主成分和投影")
print("选择前2个主成分进行降维")
print("投影公式: Y = X_std × W")
print("其中 W 是前k个特征向量组成的矩阵")

# 选择前2个特征向量
W = sorted_eigenvectors[:, :2]
print(f"\n投影矩阵 W 形状: {W.shape}")
print("投影矩阵 W:")
print(np.round(W, 4))

# 手动计算投影
data_pca_manual = data_scaled @ W
print(f"\n手动计算的降维数据形状: {data_pca_manual.shape}")

# ============================================================================
# 步骤7: 使用sklearn PCA验证
# ============================================================================
print("\n\n7. 使用sklearn PCA验证结果")
pca = PCA(n_components=2)
data_pca = pca.fit_transform(data_scaled)

print("sklearn PCA特征值:", pca.explained_variance_)
print("sklearn PCA解释方差比:", pca.explained_variance_ratio_)

# 比较手动计算和sklearn的结果
manual_sklearn_diff = np.linalg.norm(data_pca_manual - data_pca)
print(f"手动计算与sklearn结果差异: {manual_sklearn_diff:.10f}")

# ============================================================================
# 步骤8: 重构数据（可选）
# ============================================================================
print("\n\n8. 数据重构")
print("重构公式: X_reconstructed = Y × W^T")

data_reconstructed = data_pca @ W.T
print(f"重构数据形状: {data_reconstructed.shape}")

# 计算重构误差
reconstruction_error = np.linalg.norm(data_scaled - data_reconstructed, 'fro')
print(f"重构误差 (Frobenius范数): {reconstruction_error:.6f}")

# 计算重构精度
original_variance = np.sum(np.var(data_scaled, axis=0))
reconstructed_variance = np.sum(np.var(data_reconstructed, axis=0))
reconstruction_accuracy = reconstructed_variance / original_variance
print(f"重构精度: {reconstruction_accuracy:.6f} ({reconstruction_accuracy*100:.2f}%)")

# ============================================================================
# 数学推导总结
# ============================================================================
print("\n" + "="*80)
print("PCA数学推导总结")
print("="*80)

print(f"""
数学公式总结:

1. 标准化: 
   X_std = (X - μ) / σ

2. 协方差矩阵:
   Σ = (1/(n-1)) × X_std^T × X_std

3. 特征分解:
   Σ × v_i = λ_i × v_i

4. 特征值排序:
   λ_1 ≥ λ_2 ≥ ... ≥ λ_6

5. 选择主成分:
   W = [v_1, v_2]  (前k个特征向量)

6. 投影降维:
   Y = X_std × W

7. 数据重构:
   X_reconstructed = Y × W^T

在我们的核桃数据中:
• 原始维度: 6维
• 降维后: 2维  
• 信息保留: {cumulative_variance_ratio[1]*100:.2f}%
• 主要成分: PC1 ({explained_variance_ratio[0]*100:.2f}%) + PC2 ({explained_variance_ratio[1]*100:.2f}%)
""")

# 可视化结果
plt.figure(figsize=(20, 15))

# 子图1: 特征值和Scree图
plt.subplot(2, 3, 1)
components = [f'PC{i+1}' for i in range(6)]
plt.bar(components, sorted_eigenvalues, alpha=0.7, 
        color=['red', 'blue', 'green', 'orange', 'purple', 'brown'])
plt.axhline(y=1, color='r', linestyle='--', alpha=0.7, label='特征值=1阈值(Kaiser准则)')
plt.title('特征值 - Scree图\n(特征值>1表示重要成分)', fontsize=14, fontweight='bold')
plt.xlabel('主成分', fontsize=12)
plt.ylabel('特征值', fontsize=12)
plt.legend()
plt.grid(True, alpha=0.3)

# 子图2: 累计解释方差比
plt.subplot(2, 3, 2)
plt.plot(range(1, 7), cumulative_variance_ratio, 'o-', linewidth=3, markersize=8)
plt.axhline(y=0.8, color='r', linestyle='--', alpha=0.7, label='80%信息阈值')
plt.axhline(y=0.9, color='g', linestyle='--', alpha=0.7, label='90%信息阈值')
plt.title('累计解释方差比', fontsize=14, fontweight='bold')
plt.xlabel('主成分数量', fontsize=12)
plt.ylabel('累计解释方差比', fontsize=12)
plt.legend()
plt.grid(True, alpha=0.3)

# 子图3: PCA降维结果
plt.subplot(2, 3, 3)
scatter = plt.scatter(data_pca[:, 0], data_pca[:, 1], c=pest_count, 
                     cmap='RdYlBu_r', s=60, alpha=0.8, edgecolors='black', linewidth=0.5)
plt.colorbar(scatter, label='虫害数量')
plt.title('PCA降维结果 (6维→2维)', fontsize=14, fontweight='bold')
plt.xlabel(f'PC1 ({explained_variance_ratio[0]*100:.1f}%)', fontsize=12)
plt.ylabel(f'PC2 ({explained_variance_ratio[1]*100:.1f}%)', fontsize=12)
plt.grid(True, alpha=0.3)

# 子图4: 特征向量可视化
plt.subplot(2, 3, 4)
x_pos = np.arange(len(feature_names))
width = 0.35

plt.bar(x_pos - width/2, sorted_eigenvectors[:, 0], width, label='PC1', alpha=0.7)
plt.bar(x_pos + width/2, sorted_eigenvectors[:, 1], width, label='PC2', alpha=0.7)

plt.xlabel('原始特征', fontsize=12)
plt.ylabel('特征向量权重', fontsize=12)
plt.title('主成分的特征向量权重', fontsize=14, fontweight='bold')
plt.xticks(x_pos, feature_names, rotation=45)
plt.legend()
plt.grid(True, alpha=0.3)

# 子图5: 原始特征与第一主成分的关系
plt.subplot(2, 3, 5)
correlations_with_pc1 = []
for i in range(6):
    corr = np.corrcoef(data_scaled[:, i], data_pca[:, 0])[0, 1]
    correlations_with_pc1.append((feature_names[i], corr))

features, corrs = zip(*correlations_with_pc1)
plt.barh(features, corrs, alpha=0.7, color='steelblue')
plt.title('各特征与第一主成分的相关性', fontsize=14, fontweight='bold')
plt.xlabel('相关系数', fontsize=12)
plt.grid(True, alpha=0.3)

# 子图6: 重构误差分布
plt.subplot(2, 3, 6)
reconstruction_errors_per_sample = np.sqrt(np.sum((data_scaled - data_reconstructed)**2, axis=1))
plt.hist(reconstruction_errors_per_sample, bins=20, alpha=0.7, color='orange', edgecolor='black')
plt.title('样本重构误差分布', fontsize=14, fontweight='bold')
plt.xlabel('重构误差', fontsize=12)
plt.ylabel('样本数量', fontsize=12)
plt.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# 最终分析报告
print("\n" + "="*80)
print("PCA降维分析最终报告")
print("="*80)

print(f"\n降维效果:")
print(f"• 原始维度: 6维 → 降维后: 2维 (减少66.7%的维度)")
print(f"• 信息保留: {cumulative_variance_ratio[1]*100:.2f}%")
print(f"• 信息损失: {(1-cumulative_variance_ratio[1])*100:.2f}%")

print(f"\n主成分解释:")
pc1_weights = sorted_eigenvectors[:, 0]
pc2_weights = sorted_eigenvectors[:, 1]

print("第一主成分 (PC1) 主要代表:")
for i in range(6):
    if abs(pc1_weights[i]) > 0.4:
        direction = "正向影响" if pc1_weights[i] > 0 else "负向影响"
        print(f"  • {feature_names[i]}: {pc1_weights[i]:.3f} ({direction})")

print("\n第二主成分 (PC2) 主要代表:")
for i in range(6):
    if abs(pc2_weights[i]) > 0.4:
        direction = "正向影响" if pc2_weights[i] > 0 else "负向影响"
        print(f"  • {feature_names[i]}: {pc2_weights[i]:.3f} ({direction})")