# -*- coding:utf-8 -*-
# Decision tree 决策树，ID3\C4.5算法

import pandas as pd
import math

# 测试数据集，《统计学习方法》P59，贷款申请样本数据集
def createDataSet():
    dataSet = [[0, 2, 0, 0, 'no'],  # 数据集
               [0, 2, 0, 1, 'no'],
               [1, 2, 0, 0, 'yes'],
               [2, 1, 0, 0, 'yes'],
               [2, 0, 1, 0, 'yes'],
               [2, 0, 1, 1, 'no'],
               [1, 0, 1, 1, 'yes'],
               [0, 1, 0, 0, 'no'],
               [0, 0, 1, 0, 'yes'],
               [2, 1, 1, 0, 'yes'],
               [0, 1, 1, 1, 'yes'],
               [1, 1, 0, 1, 'yes'],
               [1, 2, 1, 0, 'yes'],
               [2, 1, 0, 1, 'no']]
    labels = ['age', 'income', 'student', 'credit_rating','buy_computer'] # 特征标签
    return dataSet, labels  # 返回数据集和特征标签


# 计算经验熵，《统计学习方法》P62，公式5.7
def cal_empirical_entropy(data_vector):
    # print(data_vector)
    # result_clss=set(data_vector.iloc[:,-1])
    counts_by_labels={}
    entroy={}
    for feature, value in data_vector.iloc[:,:-1].items():
        entroy[feature]={'p*entr':0,'entr':0}
        counts_by_labels[feature] = {'PClass': {clss:p for clss,p in value.value_counts(normalize=True).items()}, 'PFeature': {}}
        for clss,val in counts_by_labels[feature]['PClass'].items():
            # print(clss,val)
            # print(data_vector.iloc[:,-1].loc[(data_vector.loc[data_vector[feature].isin([clss])]).index])
            counts_by_labels[feature]['PFeature'][clss]={clss:p for clss,p in data_vector.iloc[:,-1].loc[(data_vector.loc[data_vector[feature].isin([clss])]).index].value_counts(normalize=True).items()}
            # print(data_vector.iloc[:,-1].iloc[(data_vector.loc[data_vector[feature].isin([clss])]).index].value_counts(normalize=True))
            # print(counts_by_labels[feature]['PFeature'][clss])
            result_clss = set(counts_by_labels[feature]['PFeature'][clss].keys())
            tmp=0-sum([counts_by_labels[feature]['PFeature'][clss][cls]*math.log2(counts_by_labels[feature]['PFeature'][clss][cls]) for cls in result_clss])
            entroy[feature]['p*entr']+=counts_by_labels[feature]['PClass'][clss]*tmp
        entroy[feature]['entr'] =0-sum([p*math.log2(p) for clss,p in data_vector[feature].value_counts(normalize=True).items()])
    # print(counts_by_labels)
    # print(entroy)
    return entroy

"""
根据每个特征划分数据集
data_vector
f_name：特征的索引
f_value：用来划分的特征取值
返回划分后的子数据及样本数，和子数据集（子数据集剔除了第i列特征）
"""
def split_datatset(data_vector,f_name, f_value):
    return data_vector.loc[data_vector[f_name].isin([f_value])]

# 选择最优分类特征
# create_alg_para，生成决策树的方法：ID3或者C45
def choose_best_feature(data_vector,mode):
    nums_data = len(data_vector)
    empirical_entropy = cal_empirical_entropy(data_vector)  # 计算经验熵
    father_entropy=0-sum([p*math.log2(p) for clss,p in data_vector.iloc[:,-1].value_counts(normalize=1).items()])       
    if mode=='ID3':
        result= {fea: father_entropy - entr['p*entr'] for fea, entr in empirical_entropy.items()} #gain
    if mode=='C45':
        for fea, entr in empirical_entropy.items():
            if entr['entr'] == 0:return fea
        result= {fea: (father_entropy - entr['p*entr'])/float(entr['entr']) for fea, entr in empirical_entropy.items()} #gain_rate
    return list(result.keys())[list(result.values()).index((max(result.values())))]

# 返回类列表中出现次数最多的类标签
def max_class(label_list):
    count_label={}
    for label in label_list:
        count_label[label]=count_label.get(label,0)+1
    #     选择字典value最大的所对应的key值
    return max(count_label,key=count_label.get)

# 决策树的生成
class Decision_tree(object):
    def __init__(self,mode='ID3'):
        # 生成决策树的方法：ID3或者C45
        self._mode = mode

    # 生成决策树，返回决策树tree，字典形式
    def fit(self,dataSet, labels):
        # 数据集
        self.data = dataSet
        # 特征标签
        self.labels = labels
        self.tree = self.creat_decision_tree(self.data, self.labels)
        self.results=set(self.data[:][-1])

    def model(self):
        return self.tree

    def predict(self,data):
        result=[]
        for len in range(data.shape[0]):
            result.append(self.check_node(self.tree,data.iloc[len,:]))
        return result

    def check_node(self,tr,data):
        fea=list(tr.keys())[0]
        val=data[fea]
        next_tr=tr[fea][val]
        if(not isinstance(next_tr,dict)): return next_tr
        return self.check_node(next_tr,data)
    """
    递归函数，用于生成每一个子树,并返回。
    《统计学习方法》ID3或C4.5算法
    data_vector：每一个待分类数据集
    labels：待分类特征标签 

    """
    def creat_decision_tree(self, data, labels):
        data=pd.DataFrame(data,columns=labels)
        nums_label = data.iloc[:,-1].values
        # 如果数据集中所有实例属于同一个类，则停止划分。返回该类 标签。
        if len(set(nums_label))==1: #set() 函数创建一个无序不重复元素集
            return nums_label[0]
        # 如果特征集只有一类时，即已经遍历完了所有特征，则停止划分。返回出现次数最多的类标签
        if data.shape[1] == 1:
            return max_class(nums_label)
        best_feature=choose_best_feature(data,mode=self._mode)
        best_feature_value=set(data[best_feature])

        myTree={best_feature:{}}
        next_check_feature=[]
        for i in labels:
            if(i != str(best_feature)):
                next_check_feature.append(i)
        for f_value in best_feature_value:
            data_split=split_datatset(data, best_feature, f_value)
            myTree[best_feature][f_value]=self.creat_decision_tree(data_split,list(next_check_feature))
        return myTree

    def cart(self):
        # CART算法参考下一篇博客
        pass


if __name__ == '__main__':
    dataSet, labels = createDataSet()

    # create_alg_para should be 'ID3' or 'C45'
    tree = Decision_tree(mode="C45")
    tree.fit(dataSet, labels)
    test_data=pd.DataFrame([
        [0, 1, 1, 1, 'yes'],
        [1, 1, 0, 1, 'yes'],
        [1, 2, 1, 0, 'yes'],
        [2, 1, 0, 1, 'no']],
        columns = ['age', 'income', 'student', 'credit_rating','buy_computer'])# 特征标签
    tree.predict(test_data)
    print(tree.predict(test_data))
    print(list(test_data.iloc[:,-1]))