import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score

# 准备数据集（特征与标签）
features = np.array([
    [19, 30], [30, 40], [39, 47], [40, 52], [47, 50], [50, 55],
    [60, 60], [62, 65], [73, 70], [75, 82], [77, 85], [90, 95], [92, 90]
])
labels = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])

# 划分训练集与测试集（测试集占30%，固定随机种子确保结果可复现）
X_train, X_test, y_train, y_test = train_test_split(
    features, labels, test_size=0.3, random_state=0
)

# 通过交叉验证选择最佳k值（k取值范围：2-10）
k_values = range(2, 11)
error_rates = []  # 存储不同k值对应的误差率

for k in k_values:
    # 初始化KNN模型
    knn_model = KNeighborsClassifier(n_neighbors=k)
    # 5折交叉验证计算准确率
    cv_scores = cross_val_score(knn_model, features, labels, cv=5, scoring='accuracy')
    # 计算误差率（1-平均准确率）并存储
    error_rates.append(1 - cv_scores.mean())

# 可视化k值与误差率的关系
plt.rcParams['font.sans-serif'] = 'Simhei'  # 支持中文显示
plt.plot(k_values, error_rates, 'r-')
plt.xlabel('k的取值')
plt.ylabel('预测误差率')
plt.title('k值与预测误差率关系图')  # 增加标题
plt.show()

# 根据交叉验证结果，选择最优k值（k=5和k=7时误差率最低）
# 分别训练两个模型
model_k5 = KNeighborsClassifier(n_neighbors=5)
model_k5.fit(X_train, y_train)

model_k7 = KNeighborsClassifier(n_neighbors=7)
model_k7.fit(X_train, y_train)

# 预测新样本[75, 65]
new_sample = [[75, 65]]
pred_k5 = model_k5.predict(new_sample)
pred_k7 = model_k7.predict(new_sample)

# 输出预测结果
print(f"k=5时，预测样本的分类结果为：{pred_k5}")
print(f"k=7时，预测样本的分类结果为：{pred_k7}")