# 导入必要的库和模块
import numpy as np
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
import joblib
import matplotlib.pyplot as plt

# 加载数字数据集
digits = load_digits()
X, y = digits.data, digits.target

# 将数据集划分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 特征降维
pca = PCA(n_components=0.95)  # 保留95%的方差
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)

# 使用交叉验证选择最佳的k值
best_cv_score = 0
best_k_cv = 0
for k in range(1, 41):
    knn = KNeighborsClassifier(n_neighbors=k)
    cv_scores = cross_val_score(knn, X_train_pca, y_train, cv=5, scoring='accuracy')
    mean_cv_score = np.mean(cv_scores)
    if mean_cv_score > best_cv_score:
        best_cv_score = mean_cv_score
        best_k_cv = k
        best_knn = knn

# 打印最佳k值和对应的交叉验证分数
print(f"最佳k值 (CV): {best_k_cv}, 交叉验证平均准确率: {best_cv_score:.4f}")

# 使用网格搜索进一步优化模型参数
param_grid = {'n_neighbors': range(1, 41), 'weights': ['uniform', 'distance']}
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=5, scoring='accuracy')
grid_search.fit(X_train_pca, y_train)
best_knn = grid_search.best_estimator_
print(f"网格搜索最佳参数: {grid_search.best_params_}")

# 在测试集上评估最优模型
best_knn.fit(X_train_pca, y_train)
y_pred = best_knn.predict(X_test_pca)
accuracy = accuracy_score(y_test, y_pred)
print(f"测试集准确率: {accuracy:.4f}")

# 将最佳KNN模型保存到二进制文件
joblib.dump(best_knn, 'optimized_best_knn_model.joblib')

# 可选：绘制k值与交叉验证准确率的关系图
plt.figure(figsize=(10, 6))
plt.plot(range(1, 41), [cross_val_score(KNeighborsClassifier(n_neighbors=k), X_train_pca, y_train, cv=5, scoring='accuracy').mean() for k in range(1, 41)], marker='o')
plt.title('K值与交叉验证准确率的关系')
plt.xlabel('K值')
plt.ylabel('平均准确率')
plt.grid(True)
plt.show()