# -*- coding: UTF-8 -*-
from math import log
import draw_tree_util

class DecisionTree:
    def __init__(self, file):
        self.data_set=[]
        self.labels=[]
        fileRow = 0
        for line in open(file):
            if fileRow > 0:
                self.data_set.append(line.strip('\n\r').split('\t'))
            else:
                self.labels = line.strip('\n\r').split('\t')
                fileRow = 1

    def des(self):
        print("labels:")
        print(self.labels)
        print("data_set:")
        print(self.data_set)


    # 求出结果的香农熵
    def calc_shannon_ent(self, data_set):
        numEntries = len(data_set)
        labelCounts = {}
        # 为所有可能分类创建字典
        for featVec in data_set:
            currentLabel = featVec[-1]
            if currentLabel not in labelCounts.keys():
                labelCounts[currentLabel] = 0
            labelCounts[currentLabel] += 1
        shannon_ent = 0.0
        for key in labelCounts:
            prob = float(labelCounts[key]) / numEntries
            # 以2为底求对数
            shannon_ent -= prob * log(prob, 2)
        return shannon_ent


    def split_data_set(self, data_set, axis, value):
        retDataSet = []
        for featVec in data_set:
            if featVec[axis] == value:
                reducedFeatVec = featVec[:axis]
                reducedFeatVec.extend(featVec[axis + 1:])
                retDataSet.append(reducedFeatVec)
        return retDataSet

    def choose_best_feature_to_split(self, data_set):
        # 数据集特征总数
        numFeatures = len(data_set[0]) - 1
        # 第三列结果列的香农熵
        baseEntropy = self.calc_shannon_ent(data_set)
        bestInfoGain = 0.0
        best_feature = -1
        for i in range(numFeatures):
            # 创建唯一的分类标签列表，将数据集合中所有第i个特征值或者所有可能存在的值写入到这个新的list中
            featList = [example[i] for example in data_set]
            uniqueVals = set(featList)
            newEntropy = 0.0
            for value in uniqueVals:
                subDataSet = self.split_data_set(data_set, i, value)
                prob = len(subDataSet) / float(len(data_set))
                newEntropy += prob * self.calc_shannon_ent(subDataSet)
            infoGain = baseEntropy - newEntropy
            if infoGain > bestInfoGain:
                bestInfoGain = infoGain
                best_feature = i
        return best_feature

    def majority_cnt(self, class_list):
        class_count = {}
        for vote in class_list:
            if vote not in class_count.keys():
                class_count[vote] = 0
            class_count[vote] += 1
        sortedClassCount = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True)
        return sortedClassCount[0][0]

    def create_tree(self, data_set, labels):
        classList = [example[-1] for example in data_set]
        # 结果队列中的元素都相同时，直接返回该元素。一般表示决策树中的叶子结点了
        if classList.count(classList[0]) == len(classList):
            return classList[0]
        # 只剩下最有一个特征的时候
        if len(data_set[0]) == 1:
            return self.majority_cnt(classList)
        # 选择最好的数据集划分方式的标签
        bestFeat = self.choose_best_feature_to_split(data_set)
        bestFeatLabel = labels[bestFeat]
        myTree = {bestFeatLabel: {}}
        # 总标签中删除已经使用过的标签
        del (labels[bestFeat])
        # 获取该标签对应所有的属性值
        featValues = [example[bestFeat] for example in data_set]
        uniqueVals = set(featValues)
        for value in uniqueVals:
            subLabels = labels[:]
            myTree[bestFeatLabel][value] = self.create_tree(self.split_data_set(data_set, bestFeat, value), subLabels)
        return myTree

    def generator_tree(self):
        tree = self.create_tree(self.data_set, self.labels)
        return tree


if __name__ == '__main__':
    dd = DecisionTree('./test/test1.txt')
    dd.des()
    tree = dd.generator_tree()
    print(tree)
    drawTreeUtil = draw_tree_util.DrawTreeUtil()
    drawTreeUtil.createPlot(tree)

