
def cal_gini_index(data):
    '''
    计算Gini指数
    :param data: 数据集
    :return:  gini指数
    '''
    #样本的总个数
    total_sample = len(data)
    if len(data) == 0:
        return 0
    #统计数据集中不同标签的数量
    label_counts = label_uniq_cnt(data)

    #计算Gini指数
    gini = 0
    for label in label_counts:
        gini = gini + pow(label_counts[label], 2)

    gini = 1 - float(gini) / pow(total_sample, 2)
    return gini

def label_uniq_cnt(data):
    '''
    统计数据集中不同的类标签label的个数
    :param data: 原始数据集
    :return: 样本中标签的个数
    '''
    label_uniq_cnt = {}

    for x in data:
        #取得每一个样本的类标签 label
        label = x[len(x) - 1]
        if label not in label_uniq_cnt:
            label_uniq_cnt[label] = 0
        label_uniq_cnt[label] = label_uniq_cnt[label] + 1
    return label_uniq_cnt

class node(object):
    '''
    树的节点类
    '''
    def __init__(self, fea=-1, value=None, results=None, right=None, left=None):
        '''

        :param fea: 用于切分数据集的属性的列索引值
        :param value: 设置划分的值
        :param results: 存储叶节点所属的类别
        :param right: 右子树
        :param left: 左子树
        '''
        self.fea = fea
        self.value = value
        self.results = results
        self.right = right
        self.left = left

def build_tree(data):
    '''
    构建树
    :param data: 训练样本
    :return: 树的根结点
    '''

    if len(data) == 0:
        return node()
    #计算当前的Gini指数
    currentGini = cal_gini_index(data)

    bestGain = 0.0
    bestCriteria = None
    bestSets = None

    feature_num = len(data[0]) - 1
    #找到最好的划分
    for fea in range(0, feature_num):

        feature_values = {}
        for sample in data:
            feature_values[sample[fea]] = 1

        for value in feature_values.keys():

            set_1, set_2 = split_tree(data, fea, value)

            nowGini = float(len(set_1) * cal_gini_index(set_1) +
                            len(set_2) * cal_gini_index(set_2)) /len(data)

            gain = currentGini - nowGini

            if gain > bestGain and len(set_1) > 0 and len(set_2) > 0:
                bestGain = gain
                bestCriteria = (fea, value)
                bestSets = (set_1, set_2)

    #判断划分是否结束
    if bestGain > 0:
        right = build_tree(bestSets[0])
        left = build_tree(bestSets[1])
        return node(fea=bestCriteria[0], value=bestCriteria[1], right=right, left=left)
    else:
        #返回当前的类别标签作为最终的类别标签
        return node(results=label_uniq_cnt(data))

def split_tree(data, fea, value):
    '''
    根据特征fea中的值value将数据集data划分为左右子树
    :param data: 数据集
    :param fea: 待分割特征的索引
    :param value: 待分割的特征的具体值
    :return: 分割后的左右子树
    '''
    set_1 = []
    set_2 = []
    for x in data:
        if x[fea] >= value:
            set_1.append(x)
        else:
            set_2.append(x)
    return set_1, set_2

def predict(sample, tree):
    '''
    对每一个样本sample进行预测
    :param sample: 需要预测的样本
    :param tree: 构建好的分类树
    :return: 所属的类别
    '''
    #只是树根
    if tree.results != None:
        return tree.results
    else:
        #有左子树
        val_sample = sample[tree.fea]
        branch = None
        if val_sample >= tree.value:
            branch = tree.right
        else:
            branch = tree.left
        return predict(sample, branch)