#第一步寻找K近邻最优参数值
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt

#导入并拆分数据集
dataset = load_iris()
x,y=dataset.data,dataset.target
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=0.5)

#寻找最优K值
k_range=range(1,15)		       #设置k值的取值范围
k_error=[]	                                      #k_error用于保存预测误差率数据
for k in k_range:
    model=KNeighborsClassifier(n_neighbors=k)
    scores=cross_val_score(model,x,y,cv=5,scoring='accuracy')
    k_error.append(1-scores.mean())

#绘图，可视化最优K值
plt.rcParams['font.sans-serif']='Simhei'
plt.plot(k_range,k_error,'r-')
plt.xlabel('k值')
plt.ylabel('误差率')
plt.show() 



#第二步分别训练K近邻模型，和基于K近邻的Bagging模型

#定义k近邻模型
model_Knn=KNeighborsClassifier(6)
#定义Bagging模型
model_bagging=BaggingClassifier(KNeighborsClassifier(6),n_estimators=130,max_samples=0.4,max_features=4,random_state=1)                 #

#训练模型
model_Knn.fit(x_train,y_train)
model_bagging.fit(x_train,y_train)

#评估模型
kNN_pre=model_Knn.predict(x_test)
kNN_ac=accuracy_score(y_test,kNN_pre)
print("k近邻模型预测准确率:",kNN_ac)
Bagging_pre=model_bagging.predict(x_test)
Bagging_ac=accuracy_score(y_test,Bagging_pre)
print("基于k近邻算法的Bagging模型的预测准确率:",Bagging_ac)