import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import urllib.request

# 设置Matplotlib支持中文和半角符号（消除字体警告）
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']  # 支持中文的字体列表
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示异常问题

url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
local_filename = "wine.data"

try:
    urllib.request.urlretrieve(url, local_filename)
    print(f"数据集已下载至：{local_filename}")
except Exception as e:
    print(f"使用本地数据集：{local_filename}（下载失败：{str(e)}）")

# 2. 数据预处理（筛选1、2类样本）
feature_names = [
    "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium",
    "Total phenols", "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins",
    "Color intensity", "Hue", "OD280/OD315", "Proline"
]
wine_data = pd.read_csv(local_filename, header=None)
wine_data.columns = ["label"] + feature_names

# 筛选标签为1和2的样本
new_wine = wine_data[wine_data["label"].isin([1, 2])].reset_index(drop=True)
X = new_wine[feature_names].values
y = new_wine["label"].values

# 特征标准化
scaler = StandardScaler()
X_std = scaler.fit_transform(X)

print(f"预处理后数据规模：{X_std.shape[0]}个样本，{X_std.shape[1]}个特征")
print(f"类别1样本数：{sum(y==1)}，类别2样本数：{sum(y==2)}")

#  PCA降维
print("\n===== PCA降维结果 =====")
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_std)

explained_variance = pca.explained_variance_ratio_
print(f"PC1解释方差比例：{explained_variance[0]:.4f}（{explained_variance[0]*100:.2f}%）")
print(f"PC2解释方差比例：{explained_variance[1]:.4f}（{explained_variance[1]*100:.2f}%）")
print(f"累计解释方差：{np.sum(explained_variance):.4f}（{np.sum(explained_variance)*100:.2f}%）")

# 输出PCA主成分表达式
print("\nPCA两维主成分表达式：")
for i in range(2):
    components = pca.components_[i]
    expr = " + ".join([f"{c:.4f}×{name}" for c, name in zip(components, feature_names)])
    print(f"PC{i+1} = {expr}")

# 输出前10个样本的PCA特征
pca_df = pd.DataFrame(X_pca, columns=["PC1", "PC2"])
pca_df["label"] = y
print("\n前10个样本的PCA两维特征：")
print(pca_df.head(10))

# PCA可视化
plt.figure(figsize=(10, 6))
for label in [1, 2]:
    mask = pca_df["label"] == label
    plt.scatter(pca_df.loc[mask, "PC1"], pca_df.loc[mask, "PC2"],
                label=f"Class {label}", alpha=0.7, s=60, edgecolors='k')
plt.xlabel(f"PC1 ({explained_variance[0]*100:.1f}%)")  # 半角括号
plt.ylabel(f"PC2 ({explained_variance[1]*100:.1f}%)")  # 半角括号
plt.title("PCA降维结果（2D）")
plt.legend()
plt.grid(alpha=0.3)
plt.show()

# LDA降维（二分类降为1维）
print("\n===== LDA降维结果 =====")
lda = LDA(n_components=1)
X_lda = lda.fit_transform(X_std, y)

# 输出LDA投影系数
print("LDA投影方向（特征系数）：")
lda_coef = pd.DataFrame({
    "特征": feature_names,
    "系数": lda.coef_[0]
}).sort_values(by="系数", key=abs, ascending=False)
print(lda_coef)

# 输出前10个样本的LDA特征
lda_df = pd.DataFrame(X_lda, columns=["LDA_1D"])
lda_df["label"] = y
print("\n前10个样本的LDA一维特征：")
print(lda_df.head(10))

# LDA可视化
plt.figure(figsize=(10, 6))
for label in [1, 2]:
    mask = lda_df["label"] == label
    plt.hist(lda_df.loc[mask, "LDA_1D"],
             alpha=0.6, label=f"Class {label}", bins=15, edgecolor='k')
plt.xlabel("LDA 1D Feature")
plt.ylabel("样本数量")
plt.title("LDA降维结果（1D）")
plt.legend()
plt.grid(alpha=0.3)
plt.show()