# 导入必要的库和模块
from sklearn.datasets import load_digits
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pickle
import numpy as np

from tqdm import tqdm
import time 

# 加载数字数据集
digits=load_digits()
X=digits.data
y=digits.target

# 将数据集划分为训练集和测试集
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)

# 初始化变量以存储最佳准确率，相应的k值和最佳knn模型
acc_best=0
k_best=None
knn_model_best=None

# 初始化一个列表以存储每个k值的准确率
accs=[]
# 尝试从1到40的k值，对于每个k值，训练knn模型，保存最佳准确率，k值和knn模型
for i in tqdm(range(1,41),desc="program run:"):
    knn=KNeighborsClassifier(n_neighbors=i)
    knn.fit(X_train,y_train)
    y_pred=knn.predict(X_test)
    acc=accuracy_score(y_test,y_pred)
    accs.append(acc)
    if acc>acc_best:
       k_best=i
       knn_model_best=knn
       acc_best=acc
    time.sleep(0.1)

# 将最佳KNN模型保存到二进制文件
with open('best_knn_model.pkl','wb')as f:
    pickle.dump(knn_model_best,f) 

# 打印最佳准确率和相应的k值
print("best accuracy:",acc_best)
print("relevant k:",k_best)
x=np.linspace(1,40,40)
plt.plot(x,accs,color='blue')
plt.xlabel("k-value")
plt.ylabel("Accuracy")
plt.title("Accuracy of different k-value",fontsize=16)
plt.axvline(k_best,color='r')
plt.plot(k_best,acc_best,color='r',marker='o',markerfacecolor='red', markersize=5)
plt.text(k_best, acc_best, f'k={k_best}, Accuracy={acc_best:.2f}', color='r', fontsize=12)
plt.savefig("accuracy_plot.pdf")

