import numpy as np
import matplotlib.pyplot as plt

# 本章代码详解：https://cuijiahua.com/blog/2017/11/ml_10_adaboost.html
# matrix,array,list的区别 https://www.imooc.com/article/31107
# 重点学习：https://blog.csdn.net/qq_30622831/article/details/81713100
# https://blog.csdn.net/lfj742346066/article/details/77880668

def loadSimpData():
    datMat = np.matrix([[1., 2.1],
            [1.5, 1.6],
            [1.3, 1. ],
            [1. , 1. ],
            [2. , 1. ]])
    classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    return datMat, classLabels


def stumpClassify(dataMatrix, dimen, threshVal, threshIneq):
    """
       单层决策树分类函数:通过阈值比较对数据进行分类
       Parameters:
           dataMatrix - 数据矩阵
           dimen - 第dimen列，也就是第几个特征
           threshVal - 阈值
           threshIneq - 标志
       Returns:
           retArray - 分类结果
       """
    retArray = np.ones((np.shape(dataMatrix)[0], 1)) # 初始化retArray全为1
    if threshIneq == "lt":
        # dataMatrix[:, dimen] <= threshVal 的返回值为True或False
        retArray[dataMatrix[:, dimen] <= threshVal] = -1.0
    else:
        retArray[dataMatrix[:, dimen] > threshVal] = -1.0
    return retArray


def buildStump(dataArr, classLabels, D):
    """
        找到数据集上最佳的单层决策树
        Parameters:
            dataArr - 数据矩阵
            classLabels - 数据标签
            D - 样本权重
        Returns:
            bestStump - 最佳单层决策树信息
            minError - 最小误差
            bestClasEst - 最佳的分类结果
        """
    dataMatrix = np.mat(dataArr)
    labelMat = np.mat(classLabels).T # 对标签数组进行转置
    m, n = np.shape(dataMatrix)
    # 步长或区间总数 最优决策树信息 最优单层决策树预测结果
    numSteps = 10.0
    bestStump = {}
    bestClasEst = np.mat(np.zeros((m, 1)))
    # 最小错误率初始化为+∞
    minError = float("inf")
    # 遍历所有的特征
    for i in range(n):
        # 找出列中特征值的最小值和最大值
        rangeMin = dataMatrix[:, i].min()
        rangeMax = dataMatrix[:, i].max()
        # 求取步长大小或者说区间间隔
        stepSize = (rangeMax - rangeMin) / numSteps
        # 遍历各个步长区间
        for j in range(-1, int(numSteps) + 1):
            for inequal in ["lt", "gt"]: # 两种阈值过滤模式:大于和小于的情况，均遍历。lt:less than，gt:greater than
                # 阈值计算公式：最小值+j(-1<=j<=numSteps+1)*步长
                threshVal = (rangeMin + float(j) * stepSize)
                # 选定阈值后，调用阈值过滤函数分类预测
                predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)
                # 初始化错误向量 将错误向量中分类正确项置0
                errArr = np.mat(np.ones((m, 1)))
                errArr[predictedVals == labelMat] = 0
                # 计算"加权"的错误率
                weightedError = D.T * errArr
                #print("split: dim %d, thresh %.2f,thresh inequal:%s, the weighted error is %.3f"
                #       % (i, threshVal, inequal, weightedError))
                # 如果当前错误率小于当前最小错误率，将当前错误率作为最小错误率
                # 存储相关信息
                if weightedError < minError:
                    minError = weightedError
                    bestClasEst = predictedVals.copy()
                    bestStump["dim"] = i
                    bestStump["thresh"] = threshVal
                    bestStump["ineq"] = inequal
    return bestStump, minError, bestClasEst


def adaBoostTrainDS(dataArr, classLabels, numIt=40):
    """

    :param dataArr: 数据矩阵
    :param classLabels: 标签向量
    :param numIt: 迭代次数
    :return:
    """
    weekClassArr = [] # 弱分类器相关信息列表
    m = np.shape(dataArr)[0]
    D = np.mat(np.ones((m, 1)) / m)  #初始化权重向量的每一项值相等 即为：1/N
    aggClassEst = np.mat(np.zeros((m, 1))) # 累计估计值向量 记录每个数据点的类别估计累计值
    for i in range(numIt):
        bestStump, error, classEst = buildStump(dataArr, classLabels, D)
        print("D:", D.T)
        # 计算弱学习算法权重alpha,使error不等于0,因为分母不能为0
        alpha = float(0.5 * np.log((1.0 - error) / max(error, 1e-16)))
        # 存储决策树的系数alpha到字典 将该决策树存入列表
        bestStump["alpha"] = alpha
        weekClassArr.append(bestStump)
        print("classEst:", classEst.T) # 打印决策树的预测结果
        # 接下来三行为更新数据的权重
        expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst)
        D = np.multiply(D, np.exp(expon))
        D = D / D.sum()
        # 累加当前单层决策树的加权预测值
        aggClassEst += alpha * classEst # 计算AdaBoost误差，当误差为0的时候，退出循环
        print("aggClassEst:", aggClassEst.T)
        # 求出分类错的样本个数
        aggErrors = np.multiply(np.sign(aggClassEst) != np.mat(classLabels).T, np.ones((m,1)))
        # 计算错误率
        errorRate = aggErrors.sum() / m
        print("total error: ", errorRate)
        if errorRate == 0.0:  # 误差为0，则退出
            break
    return weekClassArr, aggClassEst


if __name__ == '__main__':
    # dataArr, classLabels = loadSimpData()
    # D = np.mat(np.ones((5, 1)) / 5)
    # bestStump, minError, bestClasEst = buildStump(dataArr, classLabels, D)
    # print('bestStump:\n', bestStump)
    # print('minError:\n', minError)
    # print('bestClasEst:\n', bestClasEst)
    dataArr, classLabels = loadSimpData()
    weakClassArr, aggClassEst = adaBoostTrainDS(dataArr, classLabels)
    print("-" * 20)
    print(weakClassArr)
    print(aggClassEst)