import numpy as np
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn.model_selection import KFold


class AdaBoost:
    def fit(self, train_x, train_y, clf_num):
        self.weak_clfs = []
        self.clf_alphas = []
        n_train = len(train_x)
        w = np.ones(n_train) / n_train

        for i in range(clf_num):
            # 训练第i个若分类器
            clf = DecisionTreeClassifier(max_depth=3)
            clf.fit(train_x, train_y, sample_weight=w)
            self.weak_clfs.append(clf)
            # 第i个若分类器对训练集分类，并统计错分结果
            pred_train_i = clf.predict(train_x)
            miss = [int(x) for x in (pred_train_i != train_y)]
            print("第%d个弱分类器的准确率: %.4f" % (i + 1, 1 - sum(miss) / n_train))
            # 计算错分率
            err_m = np.dot(w, miss)
            # 计算第i个弱分类器的集成系数
            alpha_m = 0.5 * np.log((1 - err_m) / float(err_m))
            self.clf_alphas.append(alpha_m)
            # 更新训练样本的权值
            miss2 = [x if x == 1 else -1 for x in miss]
            w = np.multiply(w, np.exp([float(x) * alpha_m for x in miss2]))
            w = w / sum(w)

    def predict(self, test_x):
        n_test = len(test_x)
        pred_test = np.zeros(n_test)
        for i in range(len(self.weak_clfs)):
            pred_test_i = self.weak_clfs[i].predict(test_x)
            pred_test_i = [1 if x == 1 else -1 for x in pred_test_i]
            pred_test = pred_test + np.multiply(self.clf_alphas[i], pred_test_i)
        pred_test = (pred_test > 0) * 1
        return pred_test


if __name__ == '__main__':
    # 读取数据集
    data = pd.read_csv(
        '/Users/liuyuanxi/学习/华为智能基座/huawei-smart-base-learning/附件2.课程资源/华为—重邮智能基座机器学习课程课外培训(高级班)/第6章 集成学习/datasets/信用卡精准营销模型.csv',
        encoding='GBK')
    data = data.values
    # 划分数据集
    x = data[:, 0:4]
    y = data[:, -1]
    # 十折交叉验证
    kf = KFold(n_splits=10, shuffle=True)
    index = kf.split(x, y)
    for train_index, test_index in index:
        x_train, x_test = x[train_index], x[test_index]
        y_train, y_test = y[train_index], y[test_index]
        Ada = AdaBoost()
        Ada.fit(x_train, y_train, 10)
        pred_test = Ada.predict(x_test)
        print("pred test:", pred_test)
        # 计算准确率
        ad_acc = 0
        for i in range(len(pred_test)):
            if pred_test[i] == y_test[i]:
                ad_acc += 1
            ad_acc_rate = float(ad_acc)/len(pred_test)
        print("准确率：%.2f%%"%(ad_acc_rate*100))