# 数据载入和可视化
import pandas as pd
import numpy as np

data = pd.read_csv('data_class_processed.csv')
# print(笔记.md.head())
x = data.drop(['y'], axis=1)
y = data.loc[:, 'y']

from matplotlib import pyplot as plt

fig1 = plt.figure(figsize=(5, 5))
# bad = plt.scatter(x.loc[:, 'x1'][y == 0], x.loc[:, 'x2'][y == 0])
# good = plt.scatter(x.loc[:, 'x1'][y == 1], x.loc[:, 'x2'][y == 1])
# plt.legend((good, bad), ('good', 'bad'))
plt.title('质量好坏检测', fontproperties='SimHei', fontsize=20)
plt.xlabel('x1')
plt.ylabel('x2')
# plt.show()


# 1、数据预处理，pca降维
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA

x_norm = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
x_reduced = pca.fit_transform(x_norm)
var_ratio = pca.explained_variance_ratio_
# print(var_ratio)

# 2、数据分离
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=4, test_size=0.4)
# print(x_train.shape)

# 3、建立KNN模型完成分类，并且可视化边界
from sklearn.neighbors import KNeighborsClassifier

knn_10 = KNeighborsClassifier(n_neighbors=10)
knn_10.fit(x_train, y_train)
y_train_predict = knn_10.predict(x_train)
y_test_predict = knn_10.predict(x_test)
# 计算准确率
from sklearn.metrics import accuracy_score

print('n_neighbors=10的训练数据准确率', accuracy_score(y_train, y_train_predict))
print('n_neighbors=10的测试数据准确率', accuracy_score(y_test, y_test_predict))

# 4、可视化分类边界
xx, yy = np.meshgrid(np.arange(0, 10, 0.05), np.arange(0, 10, 0.05))
# print(xx.shape)
x_range = np.c_[xx.ravel(), yy.ravel()]
# print(x_range.shape)
y_range_predict = knn_10.predict(x_range)
knn_bad = plt.scatter(x_range[:, 0][y_range_predict == 0], x_range[:, 1][y_range_predict == 0])
knn_good = plt.scatter(x_range[:, 0][y_range_predict == 1], x_range[:, 1][y_range_predict == 1])
bad = plt.scatter(x.loc[:, 'x1'][y == 0], x.loc[:, 'x2'][y == 0])
good = plt.scatter(x.loc[:, 'x1'][y == 1], x.loc[:, 'x2'][y == 1])
plt.legend((good, bad, knn_good, knn_bad), ('good', 'bad', 'knn_good', 'knn_bad'))
# plt.show()

# 5、计算混淆矩阵
from sklearn.metrics import confusion_matrix

cm = confusion_matrix(y_test, y_test_predict)
# print(cm)
TP = cm[1, 1]
TN = cm[0, 0]
FP = cm[0, 1]
FN = cm[1, 0]
# 计算召回率


# 循环测试n_neighbors取值与准确率的不同
accuracy_train = []
accuracy_test = []
n = [i for i in range(1,22)]
for i in n:
    knn_i = KNeighborsClassifier(n_neighbors=i)
    knn_i.fit(x_train, y_train)
    y_train_predict_i = knn_i.predict(x_train)
    y_test_predict_i = knn_i.predict(x_test)
    accuracy_train_i = accuracy_score(y_train, y_train_predict_i)
    accuracy_test_i = accuracy_score(y_test, y_test_predict_i)
    print(f'n_neighbors={i}的训练数据准确率', accuracy_train_i)
    print(f'n_neighbors{i}的测试数据准确率', accuracy_test_i)
    # 保存
    accuracy_train.append(accuracy_train_i)
    accuracy_test.append(accuracy_test_i)

print(accuracy_train)

# 可视化n值与准确率的关系
plt.figure(figsize=(12,5))
# plt.subplot(121)
accuracy_train_plt = plt.plot(n,accuracy_train,marker='o')
# plt.subplot(122)
accuracy_test_plt = plt.plot(n,accuracy_test,marker='o')
plt.show()