import numpy
import pprint
from matplotlib import pyplot

# dataset = numpy.loadtxt('./ex0.txt', delimiter='\t', encoding='utf-8')[:,1:]
#
# # 画出点的分布
# pyplot.scatter(dataset[:, 0], dataset[:, 1])
# pyplot.show()


# 实现树回归
# 1.划分依据为方差
# 2.划分结束条件：值相同则不进行划分、划分后的方差变化太小则取消此次划分、划分后的数据集的数据量太小则取消此次划分
# 3.用平均值作为叶子节点的值
# 有两个需要调整的参数     1.方差变化界限     2.每组数据的最小数据量            只有这两个参数选的好才能得到好的结果

#进行预剪枝和后剪枝的重要性
#预剪枝可以限制划分的数据集个数和划分前后的方差变化

def get_best_split(data,slimit,nlimit):
    if numpy.unique(data[:, -1]).shape[0] == 1:
        return None, data[0, -1]  # 如果值相同则不用划分，直接返回值
    shape = data.shape
    S = numpy.var(data[:,-1])*shape[0]   # 计算数据原来的总方差
    bestS = numpy.inf
    spIndex = 0
    spValue = 0
    for i in range(shape[1] - 1):
        for j in range(shape[0]):
            right = data[data[:, i] > data[j, i]]
            left = data[data[:, i] <= data[j, i]]
            if right.shape[0]==0:
                continue
            newS = numpy.var(right[:,-1])*right.shape[0]  + numpy.var(left[:,-1])*left.shape[0]
            if newS < bestS:
                bestS = newS
                spIndex = i
                spValue = data[j, i]
    # 划分后的方差变化太小则取消此次划分
    if S - bestS < slimit:
        return None, numpy.mean(data[:, -1])
    # 划分后的数据集数量太小则取消此次划分
    right = data[data[:, spIndex] > spValue]
    left = data[data[:, spIndex] <= spValue]#用最好的划分方式进行划分
    if right.shape[0] < nlimit or left.shape[0] < nlimit:
        return None, numpy.mean(data[:, -1])
    return spIndex, spValue
#构建最复杂的回归树，则每组数据的最小数据量为1，方差变化界限为0
def regression_tree(data,slimit,nlimit):
    spIndex, spValue = get_best_split(data,slimit,nlimit)
    if spIndex is None:
        return spValue
    tree = {'spIndex': spIndex, 'spValue': spValue}
    right = data[data[:, spIndex] > spValue]
    left = data[data[:, spIndex] <= spValue]
    tree['right'] = regression_tree(right,slimit,nlimit)
    tree['left'] = regression_tree(left,slimit,nlimit)
    return tree
#对数据进行预测
def predict(tree,X):
    if not isinstance(tree,dict):
        return tree
    if X[tree['spIndex']]<=tree['spValue']:
        return predict(tree['left'],X)
    else:
        return predict(tree['right'],X)
# tree=regression_tree(dataset,1,4)
# print(predict(tree,[0.2]))
#pprint.pprint(regression_tree(dataset))

# 树剪枝，防止过拟合
#进行树的后剪枝
#对最复杂的回归树进行剪枝，得到更好的回归树
def get_mean(tree):
    if isinstance(tree['left'],dict):
        tree['left']=get_mean(tree['left'])
    if isinstance(tree['right'],dict):
        tree['right']=get_mean(tree['right'])
    return (tree['right']+tree['left'])/2
#递归合并叶子节点
def prune(tree,datatest):
    if datatest.shape[0]==0:#如果划分的数据集为空，则将该树进行塌陷处理
        return get_mean(tree)#调用get_mean的一定是一颗树
    if isinstance(tree['left'],dict) or isinstance(tree['right'],dict):#存在字典则递归处理划分
        right = datatest[datatest[:, tree['spIndex']] > tree['spValue']]
        left = datatest[datatest[:, tree['spIndex']] <= tree['spValue']]
    if isinstance(tree['left'], dict):
        tree['left']=prune(tree['left'],left)
    if isinstance(tree['right'], dict):
        tree['right']=prune(tree['right'],right)
    if not isinstance(tree['left'], dict) and not isinstance(tree['right'], dict):#如果都是单个值，则试图将它合并
        right = datatest[datatest[:, tree['spIndex']] > tree['spValue']]
        left = datatest[datatest[:, tree['spIndex']] <= tree['spValue']]
        #计算划分数据集后的误差
        tree_mean=(tree['right']+tree['left'])/2
        if numpy.sum((datatest[:,-1]-tree_mean)**2)<numpy.sum((left[:,-1]-tree['left'])**2)+numpy.sum((right[:,-1]-tree['right'])**2):
            print('合并')
            return tree_mean
        else:
            return tree
    #有一个是单个值的不用剪枝，剪枝对于树而言
    return tree
def classify(tree,data_test):#根据回归树划分测试数据集
    if not isinstance(tree,dict):
        return data_test
    Tree={'spIndex':tree['spIndex'],'spValue':tree['spValue']}
    right = data_test[data_test[:, tree['spIndex']] > tree['spValue']]
    left = data_test[data_test[:, tree['spIndex']] <= tree['spValue']]
    Tree['right']=classify(tree['right'],right)
    Tree['left']=classify(tree['left'],left)
    return Tree


#data_test=numpy.loadtxt('./ex2test.txt', delimiter='\t', encoding='utf-8')
data_train=numpy.loadtxt('./exp2.txt', delimiter='\t', encoding='utf-8')
tree=regression_tree(data_train,0.0001,1)
#pprint.pprint(prune(tree,data_test))#单纯进行后剪枝并不能得到好的结果，可以将预剪枝和后剪枝进行结合以便得到更好的结果
pprint.pprint(tree)
li=[]
x=numpy.arange(0,1.005,0.005)
for i in x:
    li.append(predict(tree,numpy.array([i])))
pyplot.scatter(data_train[:, 0], data_train[:, 1],s=1)
pyplot.plot(x,li)
pyplot.show()
