#先使用单个的K近邻模型训练
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt

#导入并拆分数据集
dataset =load_iris()
x,y=dataset.data,dataset.target
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=0.5)

#寻找最优K值
k_range=range(1,15)
k_error=[]
for k in range:
    model =KNeighborsClassifier(k)
    scores =cross_val_score(model,x,y,cv=5,scoring='accuracy')
    k_error.append(1  -  scores.mean())

#绘图，可视化最优K值
plt.rcParams['front.sans-serif']='Simhei'
plt.plot(k_range,k_error,'r-')
plt.xlable('K值')
plt.ylabel('误差率')
plt.show()

#分别训练K近邻模型,和基于K近邻的Bagging模型

#定义近邻模型
model_knn - KNeighborsclassifier(6) 
#定义Bagging模型
model_bagging= Baggingclassifier(KNeighborsclassifier(6), n estimators-130, max samples-0.4, max features-4, random state-1) 
#训练模型
model_knn.fit(x train, y train) 
model_bagging.fit(x train, y train) 
#评估模型
pred_knn = model_knn.predict(x test) 
ac_knn =accuracy_score(y_test, pred_knn) 
print(f'K近邻模型准确率{ac_knn}') 
pred_bagging = model_bagging.predict(x_test) 
ac_bagging = accuracy_score(y_test, pred_bagging) 
print(f'基于K近邻的Bagging模型准确率:{ac_bagging}')