from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import BaggingClassifier

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 生成示例数据
X, y = make_classification(n_samples=1000, n_features=20, 
                          n_informative=15, n_redundant=5,
                          random_state=42)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                    random_state=42)

# 创建Bagging分类器
bagging_clf = BaggingClassifier(
    estimator=DecisionTreeClassifier(),
    n_estimators=50,
    max_samples=0.8,      # 每个基学习器使用80%的样本
    max_features=0.8,     # 每个基学习器使用80%的特征
    bootstrap=True,       # 使用Bootstrap采样
    random_state=42
)

# 训练和评估
bagging_clf.fit(X_train, y_train)
y_pred_bagging = bagging_clf.predict(X_test)
bagging_accuracy = accuracy_score(y_test, y_pred_bagging)

# 比较单个决策树
single_tree = DecisionTreeClassifier(random_state=42)
single_tree.fit(X_train, y_train)
y_pred_single = single_tree.predict(X_test)
single_accuracy = accuracy_score(y_test, y_pred_single)

# 创建AdaBoost分类器（使用决策树桩）
adaboost_clf = AdaBoostClassifier(
    estimator=DecisionTreeClassifier(max_depth=1),  # 弱学习器：决策树桩
    n_estimators=50,
    learning_rate=1.0,
    random_state=42
)

# 训练和评估
adaboost_clf.fit(X_train, y_train)
y_pred_adaboost = adaboost_clf.predict(X_test)
adaboost_accuracy = accuracy_score(y_test, y_pred_adaboost)

print(f"AdaBoost准确率: {adaboost_accuracy:.4f}")

# 可视化训练过程中样本权重的变化
plt.figure(figsize=(15, 10))

# 模拟AdaBoost权重更新过程
def simulate_adaboost_weights(n_samples=10, n_iterations=3):
    # 初始权重
    weights = np.ones(n_samples) / n_samples
    weight_history = [weights.copy()]
    
    for iteration in range(n_iterations):
        # 模拟错误率（假设第一个样本被错误分类）
        error_rate = 0.3 if iteration == 0 else 0.2
        
        # 计算alpha
        alpha = 0.5 * np.log((1 - error_rate) / error_rate)
        
        # 更新权重（简化版：假设第一个样本错误分类，其他正确）
        for i in range(n_samples):
            if i == 0:  # 错误分类
                weights[i] *= np.exp(alpha)
            else:       # 正确分类
                weights[i] *= np.exp(-alpha)
        
        # 归一化
        weights /= np.sum(weights)
        weight_history.append(weights.copy())
    
    return weight_history

weight_history = simulate_adaboost_weights()

plt.subplot(2, 2, 1)
for i in range(len(weight_history)):
    plt.plot(range(1, 11), weight_history[i], marker='o', label=f'迭代{i}')
plt.xlabel('样本索引')
plt.ylabel('样本权重')
plt.title('AdaBoost样本权重变化过程')
plt.legend()
plt.grid(True, alpha=0.3)

# 不同学习率的影响
learning_rates = [0.1, 0.5, 1.0, 2.0]
lr_scores = []

for lr in learning_rates:
    clf = AdaBoostClassifier(
        estimator=DecisionTreeClassifier(max_depth=1),
        n_estimators=50,
        learning_rate=lr,
        random_state=42
    )
    clf.fit(X_train, y_train)
    lr_scores.append(accuracy_score(y_test, clf.predict(X_test)))

plt.subplot(2, 2, 2)
plt.plot(learning_rates, lr_scores, 'bo-', linewidth=2)
plt.xlabel('学习率')
plt.ylabel('测试集准确率')
plt.title('学习率对AdaBoost性能的影响')
plt.grid(True, alpha=0.3)

# 基学习器数量对性能的影响
n_estimators_list = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
ada_train_scores = []
ada_test_scores = []

for n in n_estimators_list:
    clf = AdaBoostClassifier(
        estimator=DecisionTreeClassifier(max_depth=1),
        n_estimators=n,
        random_state=42
    )
    clf.fit(X_train, y_train)
    ada_train_scores.append(accuracy_score(y_train, clf.predict(X_train)))
    ada_test_scores.append(accuracy_score(y_test, clf.predict(X_test)))

plt.subplot(2, 2, 3)
plt.plot(n_estimators_list, ada_train_scores, 'b-', label='训练集准确率', linewidth=2)
plt.plot(n_estimators_list, ada_test_scores, 'r-', label='测试集准确率', linewidth=2)
plt.xlabel('基学习器数量')
plt.ylabel('准确率')
plt.title('AdaBoost性能随基学习器数量的变化')
plt.legend()
plt.grid(True, alpha=0.3)

# 算法比较
plt.subplot(2, 2, 4)
algorithms = ['单决策树', 'Bagging', 'AdaBoost']
accuracies = [single_accuracy, bagging_accuracy, adaboost_accuracy]
colors = ['lightblue', 'lightgreen', 'lightcoral']

bars = plt.bar(algorithms, accuracies, color=colors, alpha=0.7)
plt.ylabel('准确率')
plt.title('集成算法性能比较')

# 添加数值标签
for bar, accuracy in zip(bars, accuracies):
    plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
             f'{accuracy:.4f}', ha='center')

plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.show()