# -*- coding: UTF-8 -*-
from math import log
import matplotlib.pyplot as plt
import draw_tree_util


def create_data_set():
    data_set = [[1, 1, 'yes'],
                [1, 1, 'yes'],
                [1, 0, 'no'],
                [0, 1, 'no'],
                [0, 1, 'no']]
    labels = ['no surfacing','flippers']
    return data_set, labels


'''
计算给定数据集第三列结果列的香农熵
'''


def calc_shannon_ent(data_set):
    numEntries = len(data_set)
    labelCounts = {}
    # 为所有可能分类创建字典
    for featVec in data_set:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannon_ent = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries
        # 以2为底求对数
        shannon_ent -= prob * log(prob, 2)
    return shannon_ent


'''
按照给定特征划分数据集，并将待划分的数据集特征的下标与该下标的值从返回结果中过滤
@param dataSet 待划分的数据集
@param axis 待划分的数据集特征值下标
@param value 该下标下特征值的值
'''


def split_data_set(data_set, axis, value):
    retDataSet = []
    for featVec in data_set:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis + 1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet


'''
选择最好的数据集划分方式，计算出最好的划分数据集的特征
1.数据必须是一种由列表元素组成的列表，而且所有的列表元素都要具有相同的数据长度
2.数据的最后一列或者每个实例的最后一个元素是当前实例的列表标签
初始香农熵与某个下标为过滤之后形成新的数据集计算的香农熵进行比较（
以该下标为切入点，将该下标下的各个值进行分组，依次求出香农熵之后累加），
计算出来的香农熵越小则代表该下标的特征值越好
@return 最好的特征的下标
'''


def choose_best_feature_to_split(data_set):
    # 数据集特征总数
    numFeatures = len(data_set[0]) - 1
    # 第三列结果列的香农熵
    baseEntropy = calc_shannon_ent(data_set)
    bestInfoGain = 0.0
    best_feature = -1
    for i in range(numFeatures):
        # 创建唯一的分类标签列表，将数据集合中所有第i个特征值或者所有可能存在的值写入到这个新的list中
        featList = [example[i] for example in data_set]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = split_data_set(data_set, i, value)
            prob = len(subDataSet) / float(len(data_set))
            newEntropy += prob * calc_shannon_ent(subDataSet)
        infoGain = baseEntropy - newEntropy
        if infoGain > bestInfoGain:
            bestInfoGain = infoGain
            best_feature = i
    return best_feature


'''
挑选出次数最多的类别作为返回值
使用分类名称的列表，创建键值为class_list的唯一值的数据字典
字典中存储了class_list中每个类标签出现的频率，最后利用operator排序
@return 出现次数最后的分类名称
'''


def majority_cnt(class_list):
    class_count = {}
    for vote in class_list:
        if vote not in class_count.keys(): class_count[vote] = 0
        class_count[vote] += 1
    sortedClassCount = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]


'''
递归函数构造决策树
    1.创建结果列表classList
    2.停止条件两个条件：
        2.1 所有的类标签完全相同，一般表示末级子叶节点
        2.2 使用完了所有的特征，仍然不能将数据集划分成仅包含唯一类别的分组
@param dataSet 数据集
@param labels 标签列表
@return 决策树
'''


def create_tree(data_set, labels):
    classList = [example[-1] for example in data_set]
    # 结果队列中的元素都相同时，直接返回该元素。一般表示决策树中的叶子结点了
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    # 只剩下最有一个类别的组的时候
    if len(data_set[0]) == 1:
        return majority_cnt(classList)
    # 选择最好的数据集划分方式的标签
    bestFeat = choose_best_feature_to_split(data_set)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel: {}}
    # 总标签中删除已经使用过的标签
    del (labels[bestFeat])
    # 获取该标签对应所有的属性值
    featValues = [example[bestFeat] for example in data_set]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = create_tree(split_data_set(data_set, bestFeat, value), subLabels)
    return myTree


'''
递归获取叶节点的数目
@return 叶节点的数目
'''
def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            #如果节点是字典类型则递归
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs


'''
递归获取树的层数
@return 树的层数
'''
def getTreeDepth(myTree):
    maxDepth = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
            thisDepth = 1
        if thisDepth > maxDepth:
            maxDepth = thisDepth
    return maxDepth


#定义文本问框和箭头格式
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")

'''
绘制带箭头的注解
'''
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    createPlot.ax1.annotate(nodeTxt, xy=parentPt,  xycoords='axes fraction',
             xytext=centerPt, textcoords='axes fraction',
             va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )


def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
    yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)


def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = list(myTree.keys())[0]
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
    plotMidText(cntrPt, parentPt, nodeTxt)
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD


'''
绘图
'''
def createPlot(inTree):
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)    #no ticks
    plotTree.totalW = float(getNumLeafs(inTree))
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5/plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5, 1.0), '')
    plt.show()


if __name__ == '__main__':
    dataSet, labels = create_data_set()
    print(dataSet)
    # shannonEnt = calc_shannon_ent(dataSet)
    # print(shannonEnt)
    best_feature = choose_best_feature_to_split(dataSet)
    print(best_feature)
    tree = create_tree(dataSet, labels)
    print(tree)
    # createPlot(tree)
    drawTreeUtil = draw_tree_util.DrawTreeUtil()
    drawTreeUtil.createPlot(tree)
