import threading
import numpy
from matplotlib import pyplot

#这里是xgboost的回归应用，损失函数使用均方损失函数
data=numpy.loadtxt("./ex0.txt",delimiter="\t")[:,1:]
# 可视化
# pyplot.scatter(data[:,0],data[:,1],s=3)
# pyplot.show()
def get_best_split(data,slimit,nlimit,lamb,gamma,y_k):
    if numpy.unique(data[:, -1]).shape[0] == 1:
        return None, -(y_k-data[0, -1])/(1+lamb)# 如果值相同则不用划分，直接返回值
    shape = data.shape
    #计算总损失
    G=numpy.sum(y_k-data[:,-1])
    H=shape[0]
    S =-0.5*G**2/(H+lamb)+gamma  # 计算数据原来的总方差
    bestS = numpy.inf
    spIndex = 0
    spValue = 0
    for i in range(shape[1] - 1):
        for j in range(shape[0]):
            right = data[data[:, i] > data[j, i]]
            left = data[data[:, i] <= data[j, i]]
            if right.shape[0]==0:
                continue
            y_k_r = y_k[data[:, i] > data[j, i]]
            y_k_l = y_k[data[:, i] <= data[j, i]]
            Gr=numpy.sum(y_k_r-right[:,-1])
            Hr=right.shape[0]
            Gl=numpy.sum(y_k_l-left[:,-1])
            Hl=left.shape[0]
            newS = -0.5*Gr**2/(Hr+lamb)+(-0.5*Gl**2/(Hl+lamb))+2*gamma
            if newS < bestS:
                bestS = newS
                spIndex = i
                spValue = data[j, i]
    # 划分后的方差变化太小则取消此次划分
    if S<bestS or S-bestS<slimit:#限制划分后前后方差变化
        return None, -G/(H+lamb)
    # # 划分后的数据集数量太小则取消此次划分
    # right = data[data[:, spIndex] > spValue]
    # left = data[data[:, spIndex] <= spValue]#用最好的划分方式进行划分
    # if right.shape[0] < nlimit or left.shape[0] < nlimit:#限制划分后的最小节点数
    #     return None, numpy.average(data[:, -1],weights=weight)
    return spIndex, spValue
#构建最复杂的回归树，则每组数据的最小数据量为1，方差变化界限为0
def regression_tree(data,slimit,nlimit,height,lamb,gamma,y_k):
    spIndex, spValue = get_best_split(data,slimit,nlimit,lamb,gamma,y_k)
    if spIndex is None:
        return spValue
    if height==0:#如果已经达到限制的高度
        G = numpy.sum(y_k - data[:, -1])
        H = data.shape[0]
        return -G / (H + lamb)
    tree = {'spIndex': spIndex, 'spValue': spValue}
    right = data[data[:, spIndex] > spValue]
    left = data[data[:, spIndex] <= spValue]
    y_k_r = y_k[data[:, spIndex] > spValue]
    y_k_l = y_k[data[:, spIndex] <= spValue]
    tree['right'] = regression_tree(right,slimit,nlimit,height-1,lamb,gamma,y_k_r)
    tree['left'] = regression_tree(left,slimit,nlimit,height-1,lamb,gamma,y_k_l)
    return tree
#对数据进行预测
def predict(tree,X):
    if not isinstance(tree,dict):
        return tree
    if X[tree['spIndex']]<=tree['spValue']:
        return predict(tree['left'],X)
    else:
        return predict(tree['right'],X)
def get_res(tree,data):
    res=numpy.empty((data.shape[0],))
    for i in range(data.shape[0]):
        res[i]=predict(tree,data[i,:-1])
    return res
#实现adaboost.R2算法
height=3
epoch=50
m,n=data.shape
aggregate_predict=numpy.empty((epoch,m))
weak_learners=[]
y0=numpy.ones(m)*numpy.sum(data[:,-1])/m
weak_learners.append(numpy.sum(data[:,-1])/m)
lam=1
gamma=1
for i in range(epoch):
    tree=regression_tree(data,0.001,1,height,lam,gamma,y0)
    res=get_res(tree,data)
    y0=y0+res
    weak_learners.append(tree)
pyplot.scatter(data[:,0],data[:,1],s=3)
pyplot.scatter(data[:,0],y0,s=3)
pyplot.show()
#xgboost与GBDT不同，GBDT是每次对梯度进行拟合，而xgboost并不是直接对梯度进行拟合