import numpy
from matplotlib import pyplot
import pandas
#数据处理
data=pandas.read_table("./datingTestSet.txt",sep="\t",header=None,na_filter=False)
dic={'largeDoses':1, 'smallDoses':2, 'didntLike':0}
data.iloc[:,-1]=data.iloc[:,-1].map(dic)
data=data.values[:,[0,1,3]]
data=data[data[:,-1]!=2]
feature=data[:,0:-1]
label=data[:,-1]
#可视化
pyplot.scatter(feature[:,0],feature[:,1],c=label)
pyplot.show()
# from sklearn.datasets import make_moons
# feature, label = make_moons(n_samples=500, noise=0.05, random_state=42)
# import matplotlib.pyplot as plt
#
# # 可视化数据
# plt.scatter(feature[:, 0], feature[:, 1],c=label)
# plt.title("Moon-shaped Dataset")
# plt.xlabel("Feature 1")
# plt.ylabel("Feature 2")
# plt.show()
#data=numpy.loadtxt('./testSet.txt',delimiter='\t',encoding='utf-8')
#data=numpy.hstack((data,y.reshape(-1,1)))
def create_stump(weight,feature,label):#决策树桩
    m,n=feature.shape
    predict_label = numpy.empty(m)
    best_predict_label=numpy.zeros(m)
    numStep=100#步数
    minError=numpy.inf
    bestStump={}
    for i in range(n):
        #寻找每个特征最好的划分标准
        maximum=feature[:,i].max()
        minimum = feature[:, i].min()
        stride=(maximum-minimum)/numStep#除了取相邻中间值还可以取步长
        for j in range(numStep+1):
            for criterion in ['>=','<']:
                value=minimum+j*stride
                if criterion=='>=':
                    predict_label[feature[:,i]>=value]=1
                    predict_label[feature[:,i]<value]=0
                else:
                    predict_label[feature[:,i]<value]=1
                    predict_label[feature[:, i] >= value] =0
                error = numpy.dot(weight,predict_label != label)#样本的权重对弱分类器的形成有影响
                if error<minError:
                    minError=error
                    bestStump['dim']=i
                    bestStump['criterion']=criterion
                    bestStump['threshold']=value
                    best_predict_label=predict_label.copy()
    return bestStump,minError,best_predict_label
m,n=feature.shape
weight=numpy.ones((m))/m#定义初始权重
epoch=200#弱分类器个数
aggregate_predict=numpy.empty((epoch,m))
aggregate_beta=numpy.empty(epoch)
weak_learners=[]
for i in range(epoch):
    p=weight/numpy.sum(weight)#权重转化为数据分布
    bestStump, minError, best_predict_label = create_stump(p, feature, label)#基于分布建立弱分类器
    aggregate_predict[i,:]=best_predict_label#记录每个弱学习器的预测结果
    #计算beta
    beta=minError/(1-minError)
    aggregate_beta[i]=numpy.log(1/beta)
    #在beta和预测值的基础上计算新的weight
    weight=weight*beta**(1-(best_predict_label != label))#注意权重更新时的不同
    bestStump["log(1/beta)"]=numpy.log(1/beta)
    #计算综合结果
    res=numpy.where(numpy.sum(aggregate_predict[0:i+1,:]*aggregate_beta[0:i+1].reshape(-1,1),axis=0)>=(0.5*numpy.sum(aggregate_beta[0:i+1])),1,0)
    weak_learners.append(bestStump)
    print(numpy.sum(res==label)/m,minError)
