from math import log
import operator


def createDataSet():
    dataSet = [[1, 1, 'yes'],
               [1, 1, 'yes'],
               [1, 0, 'no' ],
               [0, 1, 'no' ],
               [0, 1, 'no' ]]
    labels = ['no surfacing', 'flippers']

    return dataSet, labels

def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}

    for featVec in dataSet:
        # 数据集的最后一例是当前数据行的标签
        curLabel = featVec[-1]
        if curLabel not in labelCounts.keys():
            labelCounts[curLabel] = 0
        # 每个标签的总数存在字典里作为值，标签为键
        labelCounts[curLabel] += 1

    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * log(prob, 2)

    return shannonEnt
'''
    按照给定特征划分数据集
    
    dataSet:    待划分的数据集
    axis:       划分时所依据的特征（哪个维度）
    value:      依据特征具体对应的值
        
    返回划分好的数据（列表）
'''
def splitDataSet(dataSet, axis, value):
    retDataSet = []

    for featVec in dataSet:
        # 数据集中当前数据行的axis维度值等于value时，进行以下操作
        # 将数据集中值等于value的axis维度的数据去掉，取剩余的数据
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            # list.extend方法和append方法的区别
            reducedFeatVec.extend(featVec[axis+1:])
            retDataSet.append(reducedFeatVec)

    return retDataSet

def chooseBestFeatureToSplit(dataSet):
    # 除去数据的标签
    numFeatures = len(dataSet[0]) -1 
    # 数据集的原始熵
    baseEntry = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    # 按照数据集中每个数据行的各个维度进行划分，
    # 划分后的数据集对其计算对应的熵。找出最大的熵增
    for i in range(numFeatures):
        # dataSet当作n*m矩阵，下述操作是取出一列数据
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0

        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet)/float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)

        infoGain = baseEntry - newEntropy
        if(infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i

    return bestFeature

def majorityCnt(classList):
    classCount = {}
    for vote in clasList:
        if vote not in classCount.keys():
            classCount[vote] = 0

        classCount[vote] += 1
        # dict.items() 返回字典项((key,value))的一个视图，指定key后，按照value来排序，且是倒序
        # sorted返回list
        sortedClassCount = sorted(classCount().items(), key=operator.itemgetter(1), reverse=True);

        return sortedClassCount[0][0]

def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]

    if classList.count(classList[0]) == len(classList):
        return classList[0]

    if len(dataSet[0]) == 1:
        return majorityCnt(classList)

    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel:{}}
    del(labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)

    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)

    return myTree
    
