# step5_comparison.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler

print("=== 步骤5: PCA与LDA对比分析 ===")

# 定义列名
column_names = ['class', 'alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 
                'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
                'proanthocyanins', 'color_intensity', 'hue', 'od280/od315', 'proline']

try:
    # 读取并预处理数据
    print("正在加载数据...")
    wine_data = pd.read_csv('wine.data', header=None, names=column_names)
    
    # 筛选类别1和2的数据
    filtered_data = wine_data[wine_data['class'].isin([1, 2])].copy()
    X = filtered_data.drop('class', axis=1)
    y = filtered_data['class']
    
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    print(f"数据加载完成！")
    print(f"数据形状: {X_scaled.shape}")
    print(f"类别分布: 类别1 - {sum(y == 1)}个样本, 类别2 - {sum(y == 2)}个样本")

    # 重新定义PCA类
    class SimplePCA:
        def __init__(self, n_components=2):
            self.n_components = n_components
            self.components = None
            self.mean = None
            
        def fit(self, X):
            self.mean = np.mean(X, axis=0)
            X_centered = X - self.mean
            cov_matrix = np.cov(X_centered, rowvar=False)
            eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
            sorted_indices = np.argsort(eigenvalues)[::-1]
            self.components = eigenvectors[:, sorted_indices[:self.n_components]]
            self.explained_variance = eigenvalues[sorted_indices[:self.n_components]]
            self.explained_variance_ratio = self.explained_variance / np.sum(eigenvalues)
            return self
        
        def transform(self, X):
            X_centered = X - self.mean
            return np.dot(X_centered, self.components)
        
        def fit_transform(self, X):
            self.fit(X)
            return self.transform(X)

    # 重新定义LDA类
    class SimpleLDA:
        def __init__(self, n_components=1):
            self.n_components = n_components
            self.components = None
            
        def fit(self, X, y):
            overall_mean = np.mean(X, axis=0)
            n_features = X.shape[1]
            S_w = np.zeros((n_features, n_features))
            S_b = np.zeros((n_features, n_features))
            
            classes = np.unique(y)
            for c in classes:
                X_c = X[y == c]
                mean_c = np.mean(X_c, axis=0)
                n_c = len(X_c)
                S_w += np.cov(X_c, rowvar=False) * (n_c - 1)
                mean_diff = (mean_c - overall_mean).reshape(-1, 1)
                S_b += n_c * np.dot(mean_diff, mean_diff.T)
            
            matrix = np.dot(np.linalg.pinv(S_w), S_b)
            eigenvalues, eigenvectors = np.linalg.eig(matrix)
            sorted_indices = np.argsort(eigenvalues.real)[::-1]
            self.components = eigenvectors.real[:, sorted_indices[:self.n_components]]
            self.eigenvalues = eigenvalues.real[sorted_indices[:self.n_components]]
            return self
        
        def transform(self, X):
            return np.dot(X, self.components)
        
        def fit_transform(self, X, y):
            self.fit(X, y)
            return self.transform(X)

    # 计算PCA和LDA
    print("\n计算PCA...")
    pca = SimplePCA(n_components=2)
    X_pca = pca.fit_transform(X_scaled)
    
    print("计算LDA...")
    lda = SimpleLDA(n_components=1)
    X_lda = lda.fit_transform(X_scaled, y)
    
    print("降维计算完成！")

    # 综合对比可视化
    print("\n生成综合对比图...")
    plt.figure(figsize=(15, 5))
    
    # 子图1: PCA结果
    plt.subplot(1, 3, 1)
    colors = ['red', 'blue']
    for i, class_label in enumerate([1, 2]):
        mask = (y == class_label)
        plt.scatter(X_pca[mask, 0], X_pca[mask, 1], 
                   c=colors[i], alpha=0.7, s=50,
                   label=f'Class {class_label}')
    plt.xlabel('Principal Component 1')
    plt.ylabel('Principal Component 2')
    plt.title('PCA Projection\n(无监督降维)')
    plt.legend()
    plt.grid(True, alpha=0.3)
    # 添加解释方差信息
    plt.text(0.05, 0.95, f'Explained Variance:\nPC1: {pca.explained_variance_ratio[0]:.3f}\nPC2: {pca.explained_variance_ratio[1]:.3f}', 
             transform=plt.gca().transAxes, verticalalignment='top',
             bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))

    # 子图2: LDA结果
    plt.subplot(1, 3, 2)
    for i, class_label in enumerate([1, 2]):
        mask = (y == class_label)
        y_jitter = np.random.normal(i * 0.5, 0.05, size=len(X_lda[mask]))
        plt.scatter(X_lda[mask, 0], y_jitter, 
                   c=colors[i], alpha=0.7, s=50,
                   label=f'Class {class_label}')
    plt.xlabel('Linear Discriminant')
    plt.ylabel('Class (with jitter)')
    plt.title('LDA Projection\n(有监督降维)')
    plt.yticks([0, 0.5], ['Class 1', 'Class 2'])
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 子图3: 原始数据前两个特征
    plt.subplot(1, 3, 3)
    for i, class_label in enumerate([1, 2]):
        mask = (y == class_label)
        plt.scatter(X_scaled[mask, 0], X_scaled[mask, 1], 
                   c=colors[i], alpha=0.7, s=50,
                   label=f'Class {class_label}')
    plt.xlabel('Feature 1 (Alcohol) - Standardized')
    plt.ylabel('Feature 2 (Malic Acid) - Standardized')
    plt.title('Original Data\n(前两个特征)')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig('pca_lda_comparison.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("对比图已保存为: pca_lda_comparison.png")

    # 详细对比分析
    print("\n" + "="*50)
    print("PCA与LDA详细对比分析")
    print("="*50)
    
    print("\n方法原理对比:")
    print("PCA (主成分分析):")
    print("  - 无监督学习方法")
    print("  - 目标: 最大化数据方差")
    print("  - 不考虑类别标签")
    print("  - 用于数据压缩和可视化")
    
    print("\nLDA (线性判别分析):")
    print("  - 有监督学习方法") 
    print("  - 目标: 最大化类间距离，最小化类内距离")
    print("  - 利用类别标签信息")
    print("  - 用于分类任务的特征提取")
    
    print("\n性能指标对比:")
    print("PCA性能:")
    print(f"  - 主成分1解释方差: {pca.explained_variance_ratio[0]:.4f}")
    print(f"  - 主成分2解释方差: {pca.explained_variance_ratio[1]:.4f}")
    print(f"  - 累计解释方差: {np.sum(pca.explained_variance_ratio):.4f}")
    
    print("\nLDA性能:")
    print(f"  - 特征值: {lda.eigenvalues[0]:.4f}")
    # 计算LDA的分类准确率
    class_1_mean = np.mean(X_lda[y == 1, 0])
    class_2_mean = np.mean(X_lda[y == 2, 0])
    threshold = (class_1_mean + class_2_mean) / 2
    predictions = (X_lda[:, 0] > threshold).astype(int) + 1
    lda_accuracy = np.mean(predictions == y.values)
    print(f"  - 基于LDA的简单分类准确率: {lda_accuracy:.4f}")
    
    print("\n适用场景总结:")
    print("PCA适合:")
    print("  - 数据探索和可视化")
    print("  - 特征数量很多时的数据压缩")
    print("  - 去除数据中的噪声")
    print("  - 无标签数据的预处理")
    
    print("\nLDA适合:")
    print("  - 分类任务的特征提取")
    print("  - 有监督的降维")
    print("  - 提高分类器的性能")
    print("  - 需要利用类别信息的场景")
    
    print("\n生成的结果文件:")
    print("  - wine_data.csv: 原始数据CSV格式")
    print("  - wine_processed.csv: 预处理后的数据") 
    print("  - wine_pca_results.csv: PCA降维结果")
    print("  - wine_lda_results.csv: LDA降维结果")
    print("  - pca_visualization.png: PCA可视化")
    print("  - lda_visualization.png: LDA可视化")
    print("  - pca_lda_comparison.png: 综合对比图")
    
    print("\n第1题所有任务完成！")
    print("现在你可以开始第2题和第3题的KNN和ID3算法实现了。")

except Exception as e:
    print(f"出错: {e}")
    import traceback
    traceback.print_exc()

input("\n按 Enter 键结束第1题...")