# TODO: 导入必要的库和模块
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import pickle
from tqdm import tqdm
import matplotlib.pyplot as plt
# TODO: 加载数字数据集
digits = datasets.load_digits()
X = digits.data
y = digits.target
# TODO: 将数据集划分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# TODO: 初始化变量以存储最佳准确率，相应的k值和最佳knn模型
k_values = range(1, 41)
accuracies = []
# TODO: 初始化一个列表以存储每个k值的准确率
best_accuracy=0
best_k=0
best_knn_model=None
# TODO: 尝试从1到40的k值，对于每个k值，训练knn模型，保存最佳准确率，k值和knn模型
for k in tqdm(k_values, desc="Testing k values"):
    knn = KNeighborsClassifier(n_neighbors=k)
    knn.fit(X_train, y_train)
    y_pred = knn.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    accuracies.append(accuracy)
    if accuracy > best_accuracy:
        best_accuracy=accuracy
        best_k=k
        best_knn_model=knn
optimal_k = k_values[np.argmax(accuracies)]
optimal_accuracy = max(accuracies)
# 绘制折线图
plt.figure(figsize=(10, 6))
plt.plot(k_values, accuracies, marker='o', linestyle='-', color='b', label='Accuracy vs K')
# 绘制垂直红线表示最优K值
plt.axvline(x=optimal_k, color='r', linestyle='--', label='Optimal K')
# 标记最优K值和准确率
plt.text(optimal_k, optimal_accuracy, f'K={optimal_k}, Accuracy={optimal_accuracy:.2f}', ha='center', va='bottom',color='r')
# 添加图例和标题
plt.xlabel('K value')
plt.ylabel('Accuracy')
plt.title('KNN Optimal K Value Selection')
plt.legend()
# TODO: 将最佳KNN模型保存到二进制文件
with open('best_knn_model.pkl', 'wb') as file:
    pickle.dump(best_knn_model, file)
# TODO: 打印最佳准确率和相应的k值
print("Best accuracy:",best_accuracy)
print("Best k value",best_k)
# 保存为PDF文件
plt.savefig('knn_optimal_k.pdf', bbox_inches='tight')
# 显示图形（可选，如果你使用的是非图形界面的环境，则可能不需要这一行）
plt.show()

