from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import numpy as np

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 生成示例数据
X, y = make_classification(n_samples=1000, n_features=20, 
                          n_informative=15, n_redundant=5,
                          random_state=42)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                    random_state=42)

# 创建Bagging分类器
bagging_clf = BaggingClassifier(
    estimator=DecisionTreeClassifier(),
    n_estimators=50,
    max_samples=0.8,      # 每个基学习器使用80%的样本
    max_features=0.8,     # 每个基学习器使用80%的特征
    bootstrap=True,       # 使用Bootstrap采样
    random_state=42
)

# 训练和评估
bagging_clf.fit(X_train, y_train)
y_pred_bagging = bagging_clf.predict(X_test)
bagging_accuracy = accuracy_score(y_test, y_pred_bagging)

# 比较单个决策树
single_tree = DecisionTreeClassifier(random_state=42)
single_tree.fit(X_train, y_train)
y_pred_single = single_tree.predict(X_test)
single_accuracy = accuracy_score(y_test, y_pred_single)

print(f"Bagging准确率: {bagging_accuracy:.4f}")
print(f"单棵决策树准确率: {single_accuracy:.4f}")
print(f"性能提升: {(bagging_accuracy - single_accuracy):.4f}")

# 可视化不同基学习器数量对性能的影响
n_estimators_range = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
train_scores = []
test_scores = []

for n in n_estimators_range:
    clf = BaggingClassifier(
        estimator=DecisionTreeClassifier(),
        n_estimators=n,
        random_state=42
    )
    clf.fit(X_train, y_train)
    train_scores.append(accuracy_score(y_train, clf.predict(X_train)))
    test_scores.append(accuracy_score(y_test, clf.predict(X_test)))

plt.figure(figsize=(12, 5))

plt.subplot(1, 2, 1)
plt.plot(n_estimators_range, train_scores, 'b-', label='训练集准确率', linewidth=2)
plt.plot(n_estimators_range, test_scores, 'r-', label='测试集准确率', linewidth=2)
plt.xlabel('基学习器数量')
plt.ylabel('准确率')
plt.title('Bagging性能随基学习器数量的变化')
plt.legend()
plt.grid(True, alpha=0.3)

plt.subplot(1, 2, 2)
# 比较不同算法的性能
methods = ['单决策树', 'Bagging']
accuracies = [single_accuracy, bagging_accuracy]
colors = ['lightblue', 'lightcoral']

plt.bar(methods, accuracies, color=colors, alpha=0.7)
plt.ylabel('准确率')
plt.title('算法性能比较')
for i, v in enumerate(accuracies):
    plt.text(i, v + 0.01, f'{v:.4f}', ha='center')

plt.tight_layout()
plt.show()