from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import numpy as np

# 生成示例数据
X, y = make_classification(n_samples=1000, n_features=20, 
                          n_informative=15, n_redundant=5,
                          random_state=42)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                    random_state=42)

# 重新采样实现示例
from sklearn.utils import resample

def boosting_resampling(X, y, base_estimator, n_estimators=50):
    estimators = []
    alphas = []
    
    # 初始权重
    sample_weights = np.ones(len(X)) / len(X)
    
    for t in range(n_estimators):
        # 根据权重重新采样
        indices = resample(range(len(X)), replace=True, n_samples=len(X), 
                         random_state=42+t, weights=sample_weights)
        X_resampled = X[indices]
        y_resampled = y[indices]
        
        # 训练基学习器
        estimator = base_estimator.fit(X_resampled, y_resampled)
        y_pred = estimator.predict(X)
        
        # 计算错误率和alpha
        error_mask = (y_pred != y)
        error_rate = np.sum(sample_weights[error_mask])
        alpha = 0.5 * np.log((1 - error_rate) / error_rate)
        
        # 更新权重
        sample_weights[~error_mask] *= np.exp(-alpha)  # 正确分类
        sample_weights[error_mask] *= np.exp(alpha)    # 错误分类
        sample_weights /= np.sum(sample_weights)       # 归一化
        
        estimators.append(estimator)
        alphas.append(alpha)
    
    return estimators, alphas