# 一、寻找k近邻的最佳参数值
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt

#导入拆分数据集
dataset=load_iris
x,y=dataset().data,dataset().target   
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=0.5)

#k取不同值的情况下，计算模型的预测误差率
k_range=range(1,15)		                          #设置k值的取值范围
k_error=[]	                                      #k_error用于保存预测误差率数据
for k in k_range:
    model=KNeighborsClassifier(n_neighbors=k)
    scores=cross_val_score(model,x,y,cv=5,scoring='accuracy') #5折交叉验证
    k_error.append(1-scores.mean())

    
#画图，x轴表示k的取值，y轴表示预测误差率
plt.rcParams['font.sans-serif']='Simhei'
plt.plot(k_range,k_error,'r-')
plt.xlabel('k的取值')
plt.ylabel('预测误差率')
plt.show() 


# 二、分别训练k近邻模型，和基于k近邻bagging模型
#定义k近邻模型
model_knn=KNeighborsClassifier(6)	
#定义bagging模型				                                                                          
model_bagging=BaggingClassifier(KNeighborsClassifier(6),n_estimators=130,max_samples=0.4,max_features=4,random_state=1)

#训练模型
model_knn.fit(x_train,y_train)
model_bagging.fit(x_train,y_train)

#评估模型
pred_knn=model_knn.predict(x_test)
ac_knn=accuracy_score(y_test,pred_knn)
print(f"k近邻模型预测准确率：{ac_knn}")

pred_bagging=model_bagging.predict(x_test)
ac_bagging=accuracy_score(y_test,pred_bagging)
print(f"基于k近邻算法的Bagging模型的预测准确率：{ac_bagging}")

