import numpy as np
import pandas as pd

# 读取Wine原始数据
# 列名含义：第1列是类别标签，后13列是特征（如酒精含量、苹果酸含量等）
wine_columns = [
    "class", "alcohol", "malic_acid", "ash", "alcalinity_of_ash",
    "magnesium", "total_phenols", "flavanoids", "nonflavanoid_phenols",
    "proanthocyanins", "color_intensity", "hue", "od280/od315_of_diluted_wines",
    "proline"
]
# 读取数据
wine_data = pd.read_csv("wine.data", header=None, names=wine_columns)

filtered_wine = wine_data[wine_data["class"].isin([1, 2])]

# 拆分特征和标签
X = filtered_wine.iloc[:, 1:].values
y = filtered_wine.iloc[:, 0].values

# 打印筛选结果，确认数据正确
print(f"筛选后的数据量：{len(filtered_wine)}条（原始178条，仅保留1、2类）")
print(f"特征维度：{X.shape}（{X.shape[0]}条样本，{X.shape[1]}个特征）")


def pca_dimension_reduction(X, n_components=2):
    # 数据标准化
    X_mean = np.mean(X, axis=0)  # 特征的均值
    X_std = np.std(X, axis=0)    # 计的标准差
    X_standardized = (X - X_mean) / X_std

    # 计算协方差矩阵
    cov_matrix = np.cov(X_standardized.T)

    # 求解协方差矩阵的特征值和特征向量
    eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)

    # 选择Top2特征向量（按特征值从大到小排序，取前2个）
    top_eigen_indices = np.argsort(eigenvalues)[::-1][:n_components]
    top_eigenvectors = eigenvectors[:, top_eigen_indices]  # 筛选Top2特征向量（shape：[13,2]）

    # 步骤5：投影降维
    X_pca = np.dot(X_standardized, top_eigenvectors)  # 结果shape：[130,2]（130样本，2特征）
    return X_pca

# 调用PCA函数，得到降维后的两维特征
X_pca = pca_dimension_reduction(X, n_components=2)

# 输出最终两维特征
print("\nPCA降维后的前10条两维特征：")
print("第1维特征\t第2维特征")
print("-" * 30)
for i in range(10):
    print(f"{X_pca[i, 0]:.4f}\t\t{X_pca[i, 1]:.4f}")


def lda_dimension_reduction(X, y, n_components=1):
    # 按类别拆分样本
    X_class1 = X[y == 1]
    X_class2 = X[y == 2]

    # 计算类别均值
    mean_class1 = np.mean(X_class1, axis=0)
    mean_class2 = np.mean(X_class2, axis=0)

    # 计算类内散度矩阵Sw
    cov_class1 = np.cov(X_class1.T)
    cov_class2 = np.cov(X_class2.T)
    Sw = cov_class1 + cov_class2

    # 计算类间散度矩阵Sb
    mean_diff = mean_class1 - mean_class2
    Sb = np.outer(mean_diff, mean_diff)

    # 求解Sw^-1 * Sb的特征值和特征向量
    sw_inv = np.linalg.inv(Sw)
    sw_inv_sb = np.dot(sw_inv, Sb)
    # 取特征值最大的实部
    eigenvalues, eigenvectors = np.linalg.eig(sw_inv_sb)
    eigenvalues = eigenvalues.real
    eigenvectors = eigenvectors.real

    # 选择Top1特征向量
    top_idx = np.argmax(eigenvalues)
    top_vector = eigenvectors[:, top_idx].reshape(-1, 1)

    # 投影降维
    X_lda = np.dot(X, top_vector).real
    return X_lda

# 调用LDA函数
X_lda = lda_dimension_reduction(X, y, n_components=1)

print("\nLDA降维后的前10条特征（1维）：")
print("LDA降维特征")
print("-" * 20)
for i in range(10):
    print(f"{X_lda[i, 0]:.4f}")