# 导包
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split

# 导入数据集
wine = load_wine()
df = pd.DataFrame(wine.data, columns=wine.feature_names)
df['target'] = wine.target

# 相关性分析
corr=np.corrcoef(df,rowvar=False)

#切分数据集
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, :-1], df.iloc[:, -1], test_size=0.3, random_state=42)
from sklearn.linear_model import LogisticRegression

#这里只有原数据的代码 original，只能选择2个列，必须是最相关的2列
logreg = LogisticRegression()
logreg.fit(X_train.iloc[:,[6,11]], y_train)
y_pred = logreg.predict(X_test.iloc[:,[6,11]])

# Q:数据预处理（PCA和LDA）处理以后分别再进行预测

################################################
#PCA
#from sklearn import preprocessing
#from sklearn.decomposition import PCA
# mms=preprocessing.MinMaxScaler()
# data=mms.fit_transform(data)
# pca = PCA(n_components=3)
# X_pca = pca.fit_transform(data)
################################################
#PCA
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)

logreg_pca = LogisticRegression()
logreg_pca.fit(X_train_pca, y_train)
y_pred_pca = logreg_pca.predict(X_test_pca)

#######################
# #LDA降维 数据挖掘
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# #定义的小一点
# lda = LinearDiscriminantAnalysis()
# #拟合
# lda.fit(data,y_train1)
# #转化
# X_lda = lda.transform(data)
# #LDA的贡献率
# ratio1=lda.explained_variance_ratio_
#######################

#LDA
# lda = LDA(n_components=2)
# X_train_lda = lda.fit_transform(X_train)
# X_test_lda = lda.transform(X_test)
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train, y_train)#拟合 监督
X_test_lda = lda.transform(X_test)
# logreg_lda = LogisticRegression()
# logreg_lda.fit(X_train_lda, y_train)
# y_pred_lda = logreg_lda.predict(X_test_lda)
logreg_lda = LogisticRegression()
logreg_lda.fit(X_train_lda, y_train)
y_pred_lda = logreg_lda.predict(X_test_lda)

# 评估函数
from sklearn.metrics import accuracy_score

# 打印结果
print('Accuracy of original: {:.2f}%'.format(accuracy_score(y_test, y_pred)*100))
print('Accuracy of PCA: {:.2f}%'.format(accuracy_score(y_test, y_pred_pca)*100))
print('Accuracy of LDA: {:.2f}%'.format(accuracy_score(y_test, y_pred_lda)*100))


#绘图
fig, axs = plt.subplots(3,2, figsize=(10,10))

# logreg = LogisticRegression()
# logreg.fit(X_train.iloc[:,[6,11]], y_train)
# y_pred = logreg.predict(X_test.iloc[:,[6,11]])

#原始数据
axs[0,0].scatter(X_train.iloc[:,0],X_train.iloc[:,1],c=y_train)
axs[0,0].set_title('Original Data')
axs[0,1].scatter(X_test.iloc[:,0],X_test.iloc[:,1],c=y_test)
axs[0,1].set_title('Original Data(Test Set)')

#PCA
axs[1,0].scatter(X_train_pca[:,0],X_train_pca[:,1],c=y_train)
axs[1,0].set_title('PCA Data')
axs[1,1].scatter(X_test_pca[:,0],X_test_pca[:,1],c=y_test)
axs[1,1].set_title('PCA Data(Test Set)')


#LDA
axs[2,0].scatter(X_train_lda[:,0],X_train_lda[:,1],c=y_train)
axs[2,0].set_title('LDA Data')
axs[2,1].scatter(X_test_lda[:,0],X_test_lda[:,1],c=y_test)
axs[2,1].set_title('LDA Data(Test Set)')
plt.show()