import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine

# 加载wine数据集
wine = load_wine()
data = pd.DataFrame(data=np.c_[wine['data'], wine['target']], columns=wine['feature_names'] + ['target'])

# 选择类别标签为一类和二类的数据
class_1_2_data = data[data['target'].isin([1, 2])]

# 提取特征和标签
X = class_1_2_data.drop('target', axis=1)
y = class_1_2_data['target']

# 标准化数据
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# PCA降维到2个主成分
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)

# 输出PCA降维后的两维特征的解释方差比例
explained_variance_ratio = pca.explained_variance_ratio_
print(f"PCA Component 1 explains {explained_variance_ratio[0]:.2%} of the variance")
print(f"PCA Component 2 explains {explained_variance_ratio[1]:.2%} of the variance")

# LDA降维
lda = LinearDiscriminantAnalysis(n_components=1)  # 因为我们只有两类，所以最多只能有一个判别轴
X_lda = lda.fit_transform(X_scaled, y)

# 设置颜色映射
cmap = plt.cm.get_cmap('viridis', len(np.unique(y)))

# 绘制可视化图
plt.figure(figsize=(16, 6))

# PCA结果可视化
plt.subplot(1, 2, 1)
scatter_pca = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap=cmap, edgecolor='k', s=80)
plt.title('PCA of Wine Data (Classes 1 and 2)')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.colorbar(scatter_pca, label='Class')

# LDA结果可视化
plt.subplot(1, 2, 2)
scatter_lda = plt.scatter(X_lda, np.zeros_like(X_lda), c=y, cmap=cmap, edgecolor='k', s=80)
plt.title('LDA of Wine Data (Classes 1 and 2)')
plt.xlabel('Linear Discriminant')
plt.yticks([])  # 隐藏y轴刻度
plt.colorbar(scatter_lda, label='Class')

plt.tight_layout()
plt.show()