# -*- coding: utf-8 -*-
"""
Created on 2023/6/13
@author: 张春宇
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from time import sleep
from tqdm import tqdm

# 读入数据
data = pd.read_csv('MTPLdata.csv')

# 数据信息
print(data.info())

# 数据摘要
print(data.describe())


# 数据可视化
data.hist(bins=50, figsize=(20,15))
plt.show()

# 哑变量处理
data['gas'] = data['gas'].map({1: 'diesel', 0: 'petrol'})
X_raw = data[['age', 'ac', 'power', 'gas', 'brand', 'area']]
X = pd.get_dummies(X_raw)

# 目标变量
y = data['clm']

# 训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=1)

# 标准化
scaler = StandardScaler()
scaler.fit(X_train)
X_train_s = scaler.transform(X_train)
X_test_s = scaler.transform(X_test)

# 训练模型
model = KNeighborsClassifier(n_neighbors=5)
model.fit(X_train_s, y_train)

# 测试模型
print('Accuracy on test set:', model.score(X_test_s, y_test))

# 混淆矩阵
pred = model.predict(X_test_s)
pd.crosstab(y_test, pred, rownames=['Actual'], colnames=['Predicted'])
confusion_matrix = pd.crosstab(y_test, pred, rownames=['Actual'], colnames=['Predicted'])
print("Confusion Matrix:")
print(confusion_matrix)
# 选择最佳的k值
scores = []
ks = range(1, 11)
for k in tqdm(ks):
    model = KNeighborsClassifier(n_neighbors=k)
    model.fit(X_train_s, y_train)
    score = model.score(X_test_s, y_test)
    scores.append(score)
    sleep(0.01)
sleep(0.5)
max(scores)
index_max = np.argmax(scores)
print(f'Optimal K: {ks[index_max]}')

# 绘制准确率和误差率与k的关系图
plt.plot(ks, scores, 'o-')
plt.xlabel('K')
plt.axvline(ks[index_max], linewidth=1, linestyle='--', color='k')
plt.ylabel('Accuracy')
plt.title('KNN')
plt.tight_layout()
plt.show()
errors = 1 - np.array(scores)
plt.plot(ks, errors, 'o-')
plt.xlabel('K')
plt.axvline(ks[index_max], linewidth=1, linestyle='--', color='k')
plt.ylabel('Error Rate')
plt.title('KNN')
plt.tight_layout()
plt.show()
ks_inverse = 1 / np.array(ks)
plt.plot(ks_inverse, errors, 'o-')
plt.xlabel('1/K')
plt.ylabel('Error Rate')
plt.title('KNN')
plt.tight_layout()
plt.show()

# 交叉验证选择最佳的k值
param_grid = {'n_neighbors': range(1, 11)}
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)
model = GridSearchCV(KNeighborsClassifier(), param_grid, cv=kfold)
model.fit(X_train_s, y_train)


# 在模型上进行预测
pred = model.predict(X_test_s)

# 生成混淆矩阵
confusion_matrix = pd.crosstab(y_test, pred, rownames=['Actual'], colnames=['Predicted'])
print("Confusion Matrix:")
print(confusion_matrix)

# 测试模型
print('Accuracy on test set:', model.score(X_test_s, y_test)) 