#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt


#创建数据集
def loadSimpData():
    datMat = np.matrix([[1., 2.1],
                        [1.5, 1.6],
                        [1.3, 1.],
                        [1., 1.],
                        [2., 1.]])
    classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
    return datMat, classLabels



#数据可视化
def showDataSet(dataMat,labelMat):
    #正样本
    data_plus = []
    #负样本
    data_minus= []
    for i in range(len(labelMat)):
        if(labelMat[i]>0):
            data_plus.append(dataMat[i])
        else:
            data_minus.append(dataMat[i])
    data_plus = np.array(data_plus)
    data_minus = np.array(data_minus)
    #第一个参数是x的集合,第二个是y的集合
    plt.scatter(np.transpose(data_plus)[0],np.transpose(data_plus)[1])
    plt.scatter(np.transpose(data_minus)[0],np.transpose(data_minus)[1])
    plt.show()

#对数据进行预测的函数
def stumpClassify(dataMat,dimen,thresh,LessOrGreager):
    '''
        dataMat :数据矩阵
        dimen: 哪一维度的数据从0开始
        thresh: 阈值
        LessOrGreater:是大于(等于)是1 还是小于是1
    '''
    row,col = dataMat.shape
    predLabels = np.ones((row,1))
    if LessOrGreager =='LessThan':
        #把比thresh大的全部设置为-1
        predLabels[dataMat[:,dimen]>thresh]=-1.0
    else:
        predLabels[dataMat[:,dimen]<thresh]=-1.0
    return predLabels

#构建单层决策树,从某个属性中找到一个阈值,根据大于或者小于进行二分类
def buildStump(dataMat,classLabels,D):
    '''
        D是各个属性的权重
    '''
    dataMat = np.mat(dataMat)
    classLabels = np.mat(classLabels).T
    row,col = dataMat.shape
    #每次找某个属性的最优分类值时的循环次数
    numSteps =10.0
    #用来放决策树的字典
    bestStump={}
    #最好的预测结果--误差最小的
    bestClasEst = np.mat(np.zeros((row,1)))
    #最小误差,每次迭代后与当前最小误差进行比较
    minError = float('inf')
    #对每个值进行迭代,找优属性
    for i in range(col):
        #获取每一列最大最小的值
        rangeMin = dataMat[:,i].min()
        rangeMax = dataMat[:,i].max()
        #步长
        stepSize = (rangeMax-rangeMin)/numSteps
        #从所有都比阈值大对应-1 到所有都 比阈值小
        for j in range(-1,int(numSteps)+1):
            for LOG in ['LessThan','GreaterThan']:
                threshCur = rangeMin+j*stepSize
                predLabels=np.mat(stumpClassify(dataMat,i,threshCur,LOG))
                #计算当前误差
                errorLabelsCur = np.mat(np.ones((row,1)))
                errorLabelsCur[predLabels==classLabels]=0
                errorCur = D.T * errorLabelsCur
                if errorCur<minError:
                    minError=errorCur
                    bestClasEst=predLabels.copy()
                    bestStump['dim'] = i
                    bestStump['thresh'] = threshCur
                    bestStump['LessOrGreater'] = LOG

    #最优的单层决策树,最小误差,最优的分类结果
    return bestStump,minError,bestClasEst

#训练AdaBoost找alpha
def adaBoostTrain(dataMat,classLabels,numIts=40):
    '''
        numIts 是迭代次数
    '''
    #弱分类器的集合
    weakClassfiers =[]
    row = np.shape(dataMat)[0]
    #权重数组 
    W = np.mat(np.ones((row,1))/row)
    predLabels = np.mat(np.zeros((row,1)))
    for i in range(numIts):
        # 构建单层决策树
        bestStump, error, classEst = buildStump(dataArr, classLabels, W)  
        alpha = float(0.5*np.log((1.0-error)/error))
        bestStump['alpha'] = alpha
        weakClassfiers.append(bestStump)
        expon = np.multiply(-1 * alpha * np.mat(classLabels).T, classEst)  # 计算e的指数项
        W = np.multiply(W, np.exp(expon))
        #做归一化处理
        W = W / W.sum() 
        #把当前分类器的结果累加到最终结果中
        predLabels += alpha * classEst   
        '''
            np.multify乘对数组array和mat都是对应位置相乘
            *对array是对应位置,对mat是矩阵乘
        '''
        aggErrors = np.multiply(np.sign(predLabels) != np.mat(classLabels).T, np.ones((row, 1)))  # 计算误差
        errorRate = aggErrors.sum() / row
        if errorRate == 0.0: break  # 误差为0，退出循环
    return weakClassfiers,predLabels

#adaboost 分类
def adaClassify(dataArray,weakClassfiers):
    dataMat = np.mat(dataArray)
    row = dataMat.shape[0]
    predLables = np.mat(np.zeros((row,1)))
    for i in weakClassfiers:
        weakPredLabel =  stumpClassify(dataMat,i['dim'],i['thresh'],i['LessOrGreater'])
        predLables+=i['alpha']*weakPredLabel
    return np.sign(predLables)

def loadDataSetFromFile(fileName):
    numFeat = len((open(fileName).readline().split('\t')))
    dataMat = [];
    labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = []
        curLine = line.strip().split('\t')
        for i in range(numFeat - 1):
            lineArr.append(float(curLine[i]))
        dataMat.append(lineArr)
        labelMat.append(float(curLine[-1]))
    return dataMat, labelMat




if __name__ =='__main__':
    '''
    dataArr, classLabels = loadSimpData()
    weakClassArr, aggClassEst = adaBoostTrain(dataArr, classLabels)
    print(weakClassArr)
    print(adaClassify([[0, 0], [5, 5]], weakClassArr))
    '''

    #用自己的写的模型来对正经数据进行预测
   
    dataArr, LabelArr = loadDataSetFromFile('./data/horseColicTraining2.txt')
    weakClassArr, aggClassEst = adaBoostTrain(dataArr, LabelArr)
    testArr, testLabelArr = loadDataSetFromFile('./data/horseColicTest2.txt')
    print(weakClassArr)
    predictions = adaClassify(dataArr, weakClassArr)
    errArr = np.mat(np.ones((len(dataArr), 1)))
    print('训练集的错误率:%.3f%%' % float(errArr[predictions != np.mat(LabelArr).T].sum() / len(dataArr) * 100))
    predictions = adaClassify(testArr, weakClassArr)
    errArr = np.mat(np.ones((len(testArr), 1)))
    print('测试集的错误率:%.3f%%' % float(errArr[predictions != np.mat(testLabelArr).T].sum() / len(testArr) * 100))
   
