import numpy as np
from sklearn.tree import DecisionTreeClassifier

# 读取数据
try:
    raw = np.genfromtxt('Works/第8章提升方法/LearningData.csv', delimiter=',', dtype=str, encoding='utf-8')
except Exception as e:
    raw = np.genfromtxt('LearningData.csv', delimiter=',', dtype=str, encoding='utf-8')


# 跳过第一行（编号），取后9列（1~10号）
X = np.vstack([raw[1,1:], raw[2,1:], raw[3,1:]]).astype(int).T  # shape (10,3)
y = raw[4,1:].astype(int)  # shape (10,)

# AdaBoost参数
n_samples = X.shape[0]
n_estimators = 5
sample_weight = np.ones(n_samples) / n_samples
estimators = []
alphas = []

for m in range(n_estimators):
    # 训练弱分类器（决策树桩，深度1）
    stump = DecisionTreeClassifier(max_depth=1)
    stump.fit(X, y, sample_weight=sample_weight)
    pred = stump.predict(X)
    # 计算加权错误率
    err = np.sum(sample_weight * (pred != y)) / np.sum(sample_weight)
    # 防止除零
    err = np.clip(err, 1e-10, 1-1e-10)
    # 计算alpha
    alpha = 0.5 * np.log((1 - err) / err)
    # 更新权重
    sample_weight *= np.exp(-alpha * y * pred)
    sample_weight /= np.sum(sample_weight)
    # 保存
    estimators.append(stump)
    alphas.append(alpha)
    print(f"第{m+1}轮: 错误率={err:.3f}, alpha={alpha:.3f}, 权重={sample_weight.round(3)}")

# 强分类器预测
def strong_predict(X):
    fx = sum(alpha * est.predict(X) for alpha, est in zip(alphas, estimators))
    return np.sign(fx)

y_pred = strong_predict(X)
print("强分类器预测结果：", y_pred)
print("真实标签：", y)
print("准确率：", np.mean(y_pred == y))
