import numpy
from matplotlib import pyplot
import pandas
#数据处理
data=pandas.read_table("./datingTestSet.txt",sep="\t",header=None,na_filter=False)
dic={'largeDoses':0, 'smallDoses':1, 'didntLike':2}
data.iloc[:,-1]=data.iloc[:,-1].map(dic)
data=data.values[:,[0,1,3]]
feature=data[:,0:-1]
label=data[:,-1]
m,n=feature.shape
weight=numpy.ones(data.shape[0])/data.shape[0]
#可视化
pyplot.scatter(feature[:,0],feature[:,1],c=label)
pyplot.show()
def get_best_split(feature,label,weight):
    m,n=feature.shape
    unique_label=numpy.unique(label)#不同的类别
    #计算开始的熵
    class_num=unique_label.shape[0]
    p=numpy.empty(class_num)
    for i in range(class_num):
        p[i]=numpy.sum(weight[label==unique_label[i]])
    log_p = numpy.log2(p)
    log_p[log_p == -numpy.inf] = 0
    HX = numpy.sum(-p * log_p)
    step_num=100#划分特征的步数
    numpy.unique(label)
    HX_Y=numpy.empty(n)
    best_values=numpy.empty(n)
    for i in range(n):#选择特征
        best_HX_y=numpy.inf
        maximum=numpy.max(feature[:,i])
        minimum=numpy.min(feature[:,i])
        stride=(maximum-minimum)/step_num
        for j in range(1,step_num):
            right=feature[:,i]>=minimum+j*stride
            left=feature[:,i]<minimum+j*stride
            py_left = numpy.sum(weight[left])
            py_right = numpy.sum(weight[right])
            right_unique_label=numpy.unique(label[right])
            left_unique_label=numpy.unique(label[left])
            #重新计算划分后的数据分布
            right_weight=weight[right]/numpy.sum(weight[right])
            left_weight=weight[left]/numpy.sum(weight[left])
            #计算后验概率
            pp_right= numpy.empty(right_unique_label.shape[0])
            pp_left=numpy.empty(left_unique_label.shape[0])
            for k in range(right_unique_label.shape[0]):
                pp_right[k] = numpy.sum(right_weight[label[right] == right_unique_label[k]])
            for k in range(left_unique_label.shape[0]):
                pp_left[k] = numpy.sum(left_weight[label[left] == left_unique_label[k]])
            #计算条件熵
            log_pp_right=numpy.log2(pp_right)
            log_pp_right[log_pp_right == -numpy.inf] = 0
            log_pp_left=numpy.log2(pp_left)
            log_pp_left[log_pp_left == -numpy.inf] = 0
            new_HX_y=py_right*numpy.sum(-pp_right * log_pp_right)+py_left*numpy.sum(-pp_left*log_pp_left)
            if new_HX_y<best_HX_y:
                best_HX_y = new_HX_y
                best_values[i]=minimum+j*stride
        HX_Y[i]=best_HX_y
    #计算信息增益
    delta=HX-HX_Y
    #选择最好的划分特征和对应的划分值
    split_index=numpy.argmax(delta)
    return split_index,best_values[split_index]
#构建决策树,#限制高度
def create_tree(feature,label,weight,height):
    if numpy.unique(label).shape[0]==1:
        return label[0]
    if height==0:
        unique_label=numpy.unique(label)
        p_label=numpy.empty((unique_label.shape[0],))
        for i in range(unique_label.shape[0]):
            p_label[i]=numpy.sum(weight[label==unique_label[i]])
        return unique_label[numpy.argmax(p_label)]
    split_index, split_value = get_best_split(feature, label, weight)
    tree={"split_index":split_index,"split_value":split_value}
    right_feature=feature[feature[:,split_index]>=split_value]
    right_weight=weight[feature[:,split_index]>=split_value]/numpy.sum(weight[feature[:,split_index]>=split_value])
    right_label=label[feature[:,split_index]>=split_value]
    tree["right"]=create_tree(right_feature,right_label,right_weight,height-1)
    left_feature = feature[feature[:, split_index] < split_value]
    left_weight = weight[feature[:, split_index] < split_value]/numpy.sum(weight[feature[:, split_index] < split_value])
    left_label = label[feature[:, split_index] < split_value]
    tree["left"] = create_tree(left_feature, left_label, left_weight,height-1)
    return tree
from pprint import pprint
#tree=create_tree(feature,label,weight,height)
def predict(x,tree):
    if not isinstance(tree,dict):
        return tree
    if x[tree['split_index']]>=tree['split_value']:
        return predict(x,tree['right'])
    else:
        return predict(x,tree["left"])
# res=numpy.empty((feature.shape[0],))
# for i in range(feature.shape[0]):
#     res[i]=predict(feature[i,:],tree)
# print(numpy.sum(res==label)/feature.shape[0])
def get_Error(tree,feature,label,weight):
    res=numpy.empty((feature.shape[0],))
    for i in range(feature.shape[0]):
        res[i]=predict(feature[i,:],tree)
    return res,numpy.dot(weight,res!=label)#计算错误率
epoch=100#弱分类器个数
aggregate_predict=numpy.empty((epoch,m))
aggregate_beta=numpy.empty(epoch)
weak_learners=[]
height=1#限制树高度
for i in range(epoch):
    p=weight/numpy.sum(weight)#权重转化为数据分布
    #基于分布建立弱分类器
    tree = create_tree(feature, label, p, height)
    #计算beta
    res,minError=get_Error(tree,feature,label,p)
    if minError>0.5 or minError==0:#错误率>0.5或者等于0时停止
        break
    aggregate_predict[i,:]=res#记录每个弱学习器的预测结果
    beta=minError/(1-minError)#
    aggregate_beta[i]=numpy.log(1/beta)
    #在beta和预测值的基础上计算新的weight
    weight=weight*beta**(1-(res != label))#注意权重更新时的不同
    tree["confidence"]=aggregate_beta[i]
    #计算综合结果
    f_res=numpy.empty((numpy.unique(label).shape[0],m))
    for k in range(numpy.unique(label).shape[0]):
        f_res[k,:]=numpy.sum((aggregate_predict[0:i+1,:]==k)*aggregate_beta[0:i+1].reshape(-1,1),axis=0)
    accuracy=numpy.sum(numpy.argmax(f_res,axis=0)==label)/m
    print(accuracy,minError)
    weak_learners.append(tree)
    if accuracy==1:
        break