import numpy
from matplotlib import pyplot
data=numpy.loadtxt('./testSet.txt',delimiter='\t')
data[data[:,-1]==0,-1]=-1#将标签为0改为-1
pyplot.scatter(data[:,0],data[:,1],c=data[:,-1])
pyplot.show()
#构造决策树弱分类器
def create_stump(weight,data:numpy.ndarray):
    feature=data[:,0:-1]
    label=data[:,-1]
    m,n=feature.shape
    predict_label = numpy.empty(m)
    best_predict_label=numpy.zeros(m)
    numStep=10
    minError=numpy.inf
    bestStump={}
    for i in range(n):
        #寻找每个特征最好的划分标准
        maximum=feature[:,i].max()
        minimum = feature[:, i].min()
        stride=(maximum-minimum)/numStep
        for j in range(numStep+1):
            for criterion in ['>=','<']:
                value=minimum+j*stride
                if criterion=='>=':
                    predict_label[feature[:,i]>=value]=1
                    predict_label[feature[:,i]<value]=-1
                else:
                    predict_label[feature[:,i]<value]=1
                    predict_label[feature[:, i] >= value] = -1
                error = numpy.dot(weight,predict_label != label)#样本的权重对弱分类器的形成有影响
                if error<minError:
                    minError=error
                    bestStump['dim']=i
                    bestStump['criterion']=criterion
                    bestStump['threshold']=value
                    best_predict_label=predict_label.copy()
    return bestStump,minError,best_predict_label
#损失函数为e^-y_iG(x)
def adaboostTrain(data,epoch):
    m, n = data.shape
    weakClassifer=[]
    weight=numpy.ones(m) / m
    aggregate=numpy.zeros(m)#综合多个分类器的结果
    for i in range(epoch):
        bestStump,minError,best_predict_label=create_stump(weight,data)
        #error_rate=numpy.sum(best_predict_label!=data[:,-1])/m
        #计算每个分类器的权重
        print(minError)
        alpha=0.5*numpy.log((1-minError)/max(minError,1e-16))#防止分母为0
        bestStump['alpha']=alpha
        weakClassifer.append(bestStump)
        #更新权重，重新训练分类器，提高分类错误的样本的权重 正确预测：D*e^-a  错误预测:D*e^a
        weight=(weight*numpy.exp(-1*data[:,-1]*best_predict_label*alpha))
        weight=weight/weight.sum()
        #综合多个分类器的结果
        aggregate+=best_predict_label*alpha
        #计算综合多个分类器后的错误率
        rate=numpy.sum(numpy.sign(aggregate)!=data[:,-1])/m
        print(rate,alpha)
        if rate==0:
            break
    return weakClassifer


weak=adaboostTrain(data,100)