# step4_lda.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler

print("=== 步骤4: LDA降维实现 ===")

# 定义列名
column_names = ['class', 'alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 
                'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
                'proanthocyanins', 'color_intensity', 'hue', 'od280/od315', 'proline']

try:
    # 读取并预处理数据
    print("正在加载和预处理数据...")
    wine_data = pd.read_csv('wine.data', header=None, names=column_names)
    
    # 筛选类别1和2的数据
    filtered_data = wine_data[wine_data['class'].isin([1, 2])].copy()
    X = filtered_data.drop('class', axis=1)
    y = filtered_data['class']
    
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    print(f"数据预处理完成！")
    print(f"标准化后的数据形状: {X_scaled.shape}")
    print(f"类别分布: 类别1有{sum(y == 1)}个样本, 类别2有{sum(y == 2)}个样本")

    # 手动实现LDA类
    class SimpleLDA:
        def __init__(self, n_components=1):
            self.n_components = n_components
            self.components = None
            
        def fit(self, X, y):
            print("\nLDA计算中...")
            # 1. 计算总体均值
            overall_mean = np.mean(X, axis=0)
            print(f"   总体均值向量形状: {overall_mean.shape}")
            
            # 2. 计算类内散度矩阵和类间散度矩阵
            n_features = X.shape[1]
            S_w = np.zeros((n_features, n_features))  # 类内散度矩阵
            S_b = np.zeros((n_features, n_features))  # 类间散度矩阵
            
            classes = np.unique(y)
            print(f"   类别标签: {classes}")
            
            for c in classes:
                X_c = X[y == c]
                mean_c = np.mean(X_c, axis=0)
                n_c = len(X_c)
                
                print(f"   类别 {c}: {n_c} 个样本")
                
                # 类内散度：每个类别内样本的散度
                S_w += np.cov(X_c, rowvar=False) * (n_c - 1)
                
                # 类间散度：类别均值与总体均值的差异
                mean_diff = (mean_c - overall_mean).reshape(-1, 1)
                S_b += n_c * np.dot(mean_diff, mean_diff.T)
            
            print(f"   类内散度矩阵 S_w 形状: {S_w.shape}")
            print(f"   类间散度矩阵 S_b 形状: {S_b.shape}")
            
            # 3. 计算矩阵 Sw^-1 * Sb 的特征值和特征向量
            matrix = np.dot(np.linalg.pinv(S_w), S_b)
            eigenvalues, eigenvectors = np.linalg.eig(matrix)
            
            # 4. 选择前n_components个特征向量
            sorted_indices = np.argsort(eigenvalues.real)[::-1]
            self.components = eigenvectors.real[:, sorted_indices[:self.n_components]]
            
            self.eigenvalues = eigenvalues.real[sorted_indices[:self.n_components]]
            
            print(f"   特征值: {self.eigenvalues}")
            
            return self
        
        def transform(self, X):
            return np.dot(X, self.components)
        
        def fit_transform(self, X, y):
            self.fit(X, y)
            return self.transform(X)

    # 应用LDA
    print("\n开始LDA降维...")
    lda = SimpleLDA(n_components=1)
    X_lda = lda.fit_transform(X_scaled, y)

    print(f"\nLDA降维完成！")
    print(f"降维后的数据形状: {X_lda.shape}")
    
    # 显示LDA结果
    print("\nLDA降维结果（前15个样本）:")
    print("样本编号 | 原始类别 | LDA特征")
    print("-" * 40)
    
    for i in range(min(15, len(X_lda))):
        print(f"{i+1:6} | {y.iloc[i]:8} | {X_lda[i, 0]:8.4f}")

    # 保存LDA结果
    lda_results = pd.DataFrame({
        'class': y.values,
        'LDA_feature': X_lda[:, 0]
    })
    lda_results.to_csv('wine_lda_results.csv', index=False)
    print(f"\nLDA结果已保存到: wine_lda_results.csv")
    
    # 可视化LDA结果
    print("\n生成LDA可视化图...")
    plt.figure(figsize=(12, 5))
    
    # 子图1: LDA投影分布
    plt.subplot(1, 2, 1)
    
    colors = ['red', 'blue']
    labels = ['Class 1', 'Class 2']
    
    for i, class_label in enumerate([1, 2]):
        mask = (y == class_label)
        # 为了可视化，在y轴上添加一些随机抖动
        y_jitter = np.random.normal(i * 0.5, 0.05, size=len(X_lda[mask]))
        plt.scatter(X_lda[mask, 0], y_jitter, 
                   c=colors[i], alpha=0.7, s=50,
                   label=labels[i])
    
    plt.xlabel('Linear Discriminant')
    plt.ylabel('Class (with jitter)')
    plt.title('LDA Projection of Wine Data')
    plt.yticks([0, 0.5], ['Class 1', 'Class 2'])
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 子图2: LDA直方图
    plt.subplot(1, 2, 2)
    
    for i, class_label in enumerate([1, 2]):
        mask = (y == class_label)
        plt.hist(X_lda[mask, 0], bins=20, alpha=0.7, 
                color=colors[i], label=labels[i])
    
    plt.xlabel('Linear Discriminant')
    plt.ylabel('Frequency')
    plt.title('LDA Projection Histogram')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig('lda_visualization.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print("可视化图已保存为: lda_visualization.png")
    
    # 显示LDA的分类效果
    print("\n📋 LDA分类效果分析:")
    class_1_lda = X_lda[y == 1, 0]
    class_2_lda = X_lda[y == 2, 0]
    
    print(f"类别1的LDA特征均值: {np.mean(class_1_lda):.4f}")
    print(f"类别2的LDA特征均值: {np.mean(class_2_lda):.4f}")
    print(f"类别1的LDA特征标准差: {np.std(class_1_lda):.4f}")
    print(f"类别2的LDA特征标准差: {np.std(class_2_lda):.4f}")
    print(f"类别间均值差异: {abs(np.mean(class_1_lda) - np.mean(class_2_lda)):.4f}")
    
    # 简单的分类准确率计算（基于阈值）
    threshold = (np.mean(class_1_lda) + np.mean(class_2_lda)) / 2
    predictions = (X_lda[:, 0] > threshold).astype(int) + 1
    accuracy = np.mean(predictions == y.values)
    print(f"基于LDA的简单分类准确率: {accuracy:.4f}")

except Exception as e:
    print(f"出错: {e}")
    import traceback
    traceback.print_exc()

input("\n按 Enter 键继续下一步...")