import numpy as np

def loadSimpData():
    dataMat = np.matrix([[1., 2.1],
                         [2., 1.1],
                         [1.3, 1.],
                         [1., 1.],
                         [2., 1.]])
    classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    return dataMat, classLabels

def loadDataSet(fileName):
    numFeat = len(open(fileName).readline().split('\t'))
    dataMat = []; labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = []
        curLine = line.strip().split('\t')
        for i in range(numFeat-1):
            lineArr.append(float(curLine[i]))
        dataMat.append(lineArr)
        labelMat.append(float(curLine[-1]))
    return dataMat, labelMat

# 通过阈值比较对数据进行分类，在阈值一边的数据会分到类别-1，另一边的数据分到类别+1
# dimen指定要比较的列，对于每一列返回长度为样本数的结果数组。每列是样本的一个特征，因此是按特性进行阈值分类
def stumpClassify(dataMatrix, dimen, threshVal, threshIneq):
    retArray = np.ones((np.shape(dataMatrix)[0], 1)) # 先将数组所有元素设为1
    # 通过数组过滤，将满足不等式要求的元素设置为-1
    if threshIneq == 'lt':
        retArray[dataMatrix[:, dimen] <= threshVal] = -1.0
    else:
        retArray[dataMatrix[:, dimen] > threshVal] = -1.0
    return retArray

# D是数据的权重向量，初始的权重对每个样本是均等的1/m
# 该函数创建一个弱分类器，基于权重D，创建的弱分类器是某个特征上某个阈值和某个比较方法的组合
def buildStump(dataArr, classLabels, D):
    dataMatrix = np.matrix(dataArr)
    labelMat = np.matrix(classLabels).T #一维数组构造的matrix是1行n列矩阵（行向量），转置后是n行1列矩阵（列向量）
    m, n = np.shape(dataMatrix)
    numSteps = 10.0
    bestStump = {} #存储给定权重向量D时所得到的最佳单层决策树的相关信息
    bestClasEst = np.matrix(np.zeros((m, 1)))
    minError = np.inf #init error sum, to +infinity

    # 第一层循环在数据集的所有特征上遍历
    for i in range(n): #loop over all dimensions
        #找到当前特征在所有样本中的最大最小值
        rangeMin = dataMatrix[:, i].min()
        rangeMax = dataMatrix[:, i].max()
        #计算步长
        stepSize = (rangeMax - rangeMin)/numSteps
        # 第二层循环在这些值上遍历计算阈值，阈值在整个取值范围外也可以
        for j in range(-1, int(numSteps)+1): #loop over all range in current dimension
            # 第三层循环在大于和小于之间切换不等式
            for inequal in ['lt', 'gt']: #go over less than and greater than
                threshVal = (rangeMin + float(j)*stepSize) #计算阈值
                predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal) #class stump classify with i,j,lessThan
                errArr = np.matrix(np.ones((m, 1))) #构建列向量errArr，如果预测值不等于标签值，errArr相应位置为1
                errArr[predictedVals == labelMat] = 0 #预测值等于标签值的位置，值为0
                # 将错误向量errArr和权重向量D的相应元素相乘并求和（点积）得到数值weightedError
                # 这就是AdaBoost和分类器交互的地方
                weightedError = D.T*errArr #calc total error multiplied by D
                print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" %(i, threshVal, inequal, weightedError))
                if weightedError < minError:
                    minError = weightedError
                    bestClasEst = predictedVals.copy()
                    bestStump['dim'] = i
                    bestStump['thresh'] = threshVal
                    bestStump['ineq'] = inequal
    return bestStump, minError, bestClasEst


# 使用AdaBoost算法训练，DS代表单层决策树(desision stump)
def adaBoostTrainDS(dataArr, classLabels, numIt=40):
    weakClassArr = [] #生成的弱分类器列表
    m = np.shape(dataArr)[0]
    D = np.matrix(np.ones((m,1))/m)  #init D to all equal, m行1列的列向量
    aggClassEst = np.matrix(np.zeros((m, 1))) #列向量，每个样本（数据点）的类别估计累计值
    for i in range(numIt):
        bestStump, error, classEst = buildStump(dataArr, classLabels, D) #build Stump
        print("D:", D.T)
        alpha = float( 0.5*np.log( (1.0-error)/max(error, 1e-16) ) ) #计算分类器的权重alpha
        print('error=%f, alpha=%f' %(error, alpha))
        bestStump['alpha'] = alpha #分类器设置alpha属性
        weakClassArr.append(bestStump)
        print("classEst: ", classEst.T)
        #重新计算D，首先计算指数值，两个列向量相乘，得到的还是一个列向量，是针对每个样本的D的指数
        # 当标签值和预测值相等时（同+1或同-1），乘积为1, 指数为-alpha
        # 当标签值和预测值不等时，乘积为-1, 指数为+alpha
        expon = np.multiply(-1 * alpha * np.matrix(classLabels).T, classEst) #exponent for D calc, getting messy                
        print('expon=',expon.T)

        # 计算新的D,公式为: D_i+1 = D_i * exp(-alpha|alpha) / sum(D_i+1)
        D = np.multiply(D, np.exp(expon))        
        D = D/D.sum()
        print('D_new=',D.T)

        # 错误率累加计算
        # calc training error of all classifiers, if this is 0 quit for loop early(use break)
        aggClassEst += alpha * classEst #当前分类器的分类结果加权加入总结果
        print("aggClassEst: ", aggClassEst.T)
        aggErrors = np.multiply(np.sign(aggClassEst) != np.matrix(classLabels).T, np.ones((m,1))) #根据总结果中每项的符号判断是否错误
        errorRate = aggErrors.sum() / m #计算总错误率，等于总错误/样本数
        print("total error: ", errorRate)
        if errorRate == 0.0: break
    return weakClassArr, aggClassEst

def adaClassify(datToClass, classifierArr):
    dataMatrix = np.matrix(datToClass)
    m = np.shape(dataMatrix)[0]
    aggClassEst = np.matrix(np.zeros((m, 1)))
    for i in range(len(classifierArr)):
        classify = classifierArr[i]
        classEst = stumpClassify(dataMatrix, classify['dim'], classify['thresh'], classify['ineq'])
        aggClassEst += classify['alpha'] * classEst
        print(aggClassEst)
    return np.sign(aggClassEst)

def plotROC(predStrengths, classLabels):
    import matplotlib.pyplot as plt
    cur = (1.0, 1.0) #cursor，从(1.0,1.0)开始往回画
    ySum = 0.0 #variable to calculate AUC
    numPosClas = sum(np.array(classLabels)==1.0) # 样本正例总数=TP+FN
    # x轴假阳率 FP/(FP+TN)=FP/(m-(TP+FN))
    # y轴真阳率 TP/(TP+FN) 召回率
    yStep = 1/float(numPosClas)
    xStep = 1/float(len(classLabels)-numPosClas)
    sortedIndicies = predStrengths.argsort() #get sorted index, it's reverse,从小到到排列
    fig = plt.figure()
    fig.clf()
    ax = plt.subplot(111)
    #loop through all the values, drawing a line segment at each point
    for index in sortedIndicies.tolist()[0]:
        if classLabels[index] == 1.0: #样本属于正例
            delX = 0; delY = yStep #如果是真阳性，则沿y轴方向前进一个步长yStep，x方向不变
        else:
            delX = xStep; delY = 0 #如果是假阳性，沿x轴方向前进一个步长xStep，y方向不变
            ySum += cur[1] #累加当前y坐标值到ySum
        #draw line from cur to (cur[0]-delX, cur[1]-delY)
        ax.plot([cur[0], cur[0]-delX], [cur[1], cur[1]-delY], c='b')
        cur = (cur[0]-delX, cur[1]-delY)
    ax.plot([0,1],[0,1],'b--')
    plt.xlabel('False positive rate')
    plt.ylabel('True positive rate')
    auc = ySum * xStep
    plt.title('ROC curve for AdaBoost horse colic detection system, AUC=%f' %(auc))
    ax.axis([0,1,0,1])
    plt.savefig("roc_adaboost_horsecolic")
    print("the Area Under the Curve is: ", auc)