import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score

# 使用随机数据生成一个简单的猫狗数据集
np.random.seed(0)
X = np.random.rand(100, 2)
y = np.random.randint(0, 2, 100)

# 划分数据集为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 设置参数网格
param_grid = {
    'base_estimator__n_neighbors': [3, 5, 7, 10],
    'n_estimators': [10, 20, 30],
    'max_samples': [0.5, 0.7, 1.0],
    'max_features': [0.5, 0.7, 1.0]
}

# 创建KNeighborsClassifier实例
knn = KNeighborsClassifier()

# 创建BaggingClassifier实例
bagging_knn = BaggingClassifier(base_estimator=knn)

# 创建GridSearchCV对象
grid_search = GridSearchCV(estimator=bagging_knn, param_grid=param_grid, cv=5, scoring='accuracy')

# 训练模型
grid_search.fit(X_train, y_train)

# 获取最佳参数和最佳模型
best_params = grid_search.best_params_
best_model = grid_search.best_estimator_

# 使用最佳模型进行预测
y_pred = best_model.predict(X_test)

# 计算并打印准确率
accuracy = accuracy_score(y_test, y_pred)
print(f'Best parameters: {best_params}')
print(f'Accuracy of the optimized Bagging KNN model: {accuracy:.2f}')