import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import auc, roc_curve, roc_auc_score


mpl.rcParams['font.sans-serif'] = ['SimHei'] #指定默认字体
mpl.rcParams['axes.unicode_minus'] = False #解决保存图像是负号'-'显示为方块的问题


def load_simple_data():
    """
    数据加载
    :return:
    """
    train_data = np.array([[1, 2.1], [2, 1.1], [1.3, 1], [1, 1], [2, 1]])
    train_label = np.array([1, 1, -1, -1, 1])
    return train_data, train_label


def load_horse_data(file):
    """
    加载马数据
    :param file:
    :return:
    """
    with open(file, 'r') as f:
        data = f.readlines()

    data_arr = []
    label_arr = []
    for line in data:
        arr = line[: -1].split('\t')
        arr = [float(num) for num in arr]

        data_arr.append(arr[:-1])
        label_arr.append(int(arr[-1]))

    return np.array(data_arr), np.array(label_arr)


class AdaBoost(object):
    def __init__(self, train_data, train_label):
        self.train_data = train_data
        self.train_label = train_label
        self.m, self.n = train_data.shape
        self.weak_class_arr = []

    @staticmethod
    def stump_classify_predict(predict_data, dimen, thresh_val, thresh_ineq):
        """
        signal stump classifer predict classify
        :param predict_data:
        :param dimen:
        :param thresh_val:
        :param thresh_ineq:
        :return:
        """
        m = predict_data.shape[0]
        ret_attary = np.ones(m)
        """
        lt = less than
        gt = greater than
        eq = equal
        ne = not equal
        ge = greater than or equal
        le = less than or equal
        """
        if thresh_ineq == 'lt':
            ret_attary[predict_data[:, dimen] <= thresh_val] = -1.0
        else:
            ret_attary[predict_data[:, dimen] > thresh_val] = -1.0

        return ret_attary

    def stump_classify(self, D):
        """
        构建单层决策树：弱分类器
        :param D:
        :return:
        """
        num_steps = 10
        # (最佳数据)存储误差权重最低的属性特征，thresh_val（起始值+步长 * 步数），inequal（大于和小于进行最优选择）
        best_stump = {}
        predict_arr = np.zeros((self.m, 1))
        min_error = np.inf
        for i in range(self.n):
            range_min = self.train_data[:, i].min()
            range_max = self.train_data[:, i].max()
            step_size = (range_max - range_min) / num_steps
            for j in range(-1, int(num_steps) + 1):
                for inequal in ['lt', 'gt']:
                    thresh_val = (range_min + j * step_size)
                    predict_vals = self.stump_classify_predict(self.train_data, i, thresh_val, inequal)

                    err_arr = np.ones(self.m)
                    # 预测结果与真是结果比较
                    err_arr[predict_vals == self.train_label] = 0

                    # 误差权重计算： 预测误差与D矩阵的点积，即为当前分类器的权重
                    weighted_error = np.dot(err_arr, D)[0]
                    # print('dim: {}， thresh: {}, thresh inequal: {}, the weighted error: {}'
                    #       .format(i, thresh_val, inequal, weighted_error))

                    if weighted_error < min_error:
                        min_error = weighted_error
                        predict_arr = predict_vals.copy()
                        best_stump['dim'] = i
                        best_stump['thresh'] = thresh_val
                        best_stump['ineq'] = inequal
        return best_stump, min_error, predict_arr

    def adaboost_train_ds(self, num_it=40):
        """
        ds:单层决策树
        :param num_it: 分类器数量
        :return:
        """
        D = np.ones((self.m, 1)) / self.m  # D.sum() = 1，并且各数据点权重相等
        agg_class_est = np.zeros(self.m)
        # 记录每个分类器的最佳参数
        for i in range(num_it):
            best_stump, error, class_est = self.stump_classify(D)
            print('D: {}'.format(D))
            # 权重alpha的计算公式：
            # alpha = 0.5 * np.log((1 - epsilon) / epsilon)
            alpha = 0.5 * np.log((1 - error) / max(error, 1e-16))
            best_stump['alpha'] = alpha
            self.weak_class_arr.append(best_stump)
            print('class_est: {}'.format(class_est))
            # expon = np.multiply(-1 * alpha * labels, class_est)
            expon = np.multiply(-1 * alpha * self.train_label.reshape((len(self.train_label), 1)),
                                class_est.reshape((len(class_est), 1)))
            # 迭代更新D
            # 样本权重更新公式：
            # 1）分类正确：D(i)(t+1) = D(i)(t) * exp(-alpha) / D.sum()
            # 2）分类错误：D(i)(t+1) = D(i)(t) * exp(alpha) / D.sum()
            D = np.multiply(D, np.exp(expon))
            D = D / D.sum()
            agg_class_est += alpha * class_est
            print('agg_class_est: {}'.format(agg_class_est))
            # agg_errors = np.multiply(np.sign(agg_class_est) != labels, np.ones((m, 1)))
            agg_errors = np.multiply(np.sign(agg_class_est) != self.train_label, np.ones(self.m))

            error_rate = agg_errors.sum() / self.m
            print('total error: {}'.format(error))
            if error_rate == 0:
                break

    def run_stump_sginal(self):
        """
        跑单词弱分类器
        :return:
        """
        D = np.ones((5, 1)) / 5
        best_stump, min_error, predict_arr = self.stump_classify(D)
        print('-' * 60)
        print('best_stump: {}'.format(best_stump))
        print('*' * 60)
        print('min_error: {}'.format(min_error))
        print('-' * 60)
        print('predict_arr: {}'.format(predict_arr))

    def predict(self, predict_data):
        """
        predict

        用所有的分类器(best_stump)对数据data进行predict，
        然后讲分类器的预测结果 * 分类器权重(alpha)，
        求和后采用np.sign输出结果

        np.sign(x): -1 if x < 0 , 0 if x == 0, 1 if x > 0

        :param predict_data:
        :return:
        """
        if len(np.shape(predict_data)) < 2:
            predict_data = np.array([predict_data])

        if not isinstance(predict_data, np.ndarray):
            predict_data = np.array(predict_data)

        m = predict_data.shape[0]
        agg_class_est = np.zeros(m)
        for classifer in self.weak_class_arr:
            class_est = self.stump_classify_predict(predict_data, classifer['dim'], classifer['thresh'], classifer['ineq'])
            agg_class_est += classifer['alpha'] * class_est

        return np.sign(agg_class_est)


def plot_roc(pre_label, test_label):
    """
    画roc curve
    :param pre_label:
    :param test_label:
    :return:
    """
    cur = (1, 1)
    y_sum = 0
    #  正例
    num_true_classes = sum(test_label == 1)
    #  伪例
    num_false_classes = len(test_label) - num_true_classes
    y_step = 1 / num_true_classes
    x_step = 1 / num_false_classes

    # 获取排序后的下标
    sorted_index = pre_label.argsort()

    fig = plt.figure()
    fig.clf()
    ax = plt.subplot(111)
    for index in sorted_index:
        # 真阳性
        if test_label[index] == 1:
            del_x = 0
            del_y = y_step
        # 假阳性
        else:
            del_x = x_step
            del_y = 0
            y_sum += cur[1]
        ax.plot([cur[0], cur[0] - del_x], [cur[1], cur[1] - del_y], c='b')
        cur = (cur[0] - del_x, cur[1] - del_y)
    # 随机猜测结果（p=0.5）
    ax.plot([0, 1], [0, 1], 'b--')
    plt.xlabel('假阳率')
    plt.ylabel('真阳率')
    plt.title('AdaBoost马疝病检测系统的ROC曲线')
    ax.axis([0, 1, 0, 1])

    print('AUC: {}'.format(y_sum * x_step))
    plt.show()


def run_test():
    """
    simple data train and predict
    :return:
    """
    train_data, train_label = load_simple_data()
    test_data = [[5, 5], [0, 0]]

    adaboost = AdaBoost(train_data, train_label)
    adaboost.adaboost_train_ds(num_it=9)
    predict = adaboost.predict(test_data)
    # print('predict: {}'.format(predict))


def run_horse():
    """
    horse colic pre train and predict
    :return:
    """
    file_train = './data/chp7/horseColicTraining2.txt'
    file_test = './data/chp7/horseColicTest2.txt'
    train_data, train_label = load_horse_data(file_train)
    test_data, test_label = load_horse_data(file_test)

    adaboost = AdaBoost(train_data, train_label)
    adaboost.adaboost_train_ds(num_it=50)
    predict = adaboost.predict(test_data)
    # print('predict: {}'.format(predict))

    # 计算错误率
    error_arr = np.ones(len(predict))
    error_count = error_arr[predict != test_label].sum()
    print(error_count)
    print('手写AdaBoost error rate: {}'.format(error_count / len(predict)))

    plot_roc(predict, test_label)

    # begin sklearn
    clf = AdaBoostClassifier()
    clf.fit(train_data, train_label)
    predict_rs = clf.predict(test_data)
    # 计算错误率
    error_arr = np.ones(len(predict_rs))
    error_count = error_arr[predict_rs != test_label].sum()
    print('sklearn AdaBoost error rate: {}'.format(error_count / len(predict_rs)))

    fpr, tpr, thresholds = roc_curve(test_label, predict_rs)
    sk_auc = auc(fpr, tpr)
    print('sklearn auc: {}'.format(sk_auc))


if __name__ == '__main__':
    # run_test()
    run_horse()
