import numpy as np
import math
import operator
'''
香农熵
信息增益
'''

class DecisionTree:
    def __init__(self):
        pass
    def createDataSet(self):
        dataSet = [
            [1, 1, 'yes'],
            [1, 1, 'yes'],
            [1, 0, 'no'],
            [0, 1, 'no'],
            [0, 1, 'no']]
        labels = ['no surfacing', 'flippers']
        return dataSet, labels
    def calcShannonEnt(self, dataSet):
        numEntries = len(dataSet)
        labelCounts = {}
        for featVec in dataSet:
            currentLabel = featVec[-1]
            if currentLabel not in labelCounts.keys():
                labelCounts[currentLabel] = 0
            labelCounts[currentLabel] += 1
        shannonEnt = 0.0
        for key in labelCounts:
            prop = float(labelCounts[key])/numEntries
            shannonEnt -= prop*math.log(prop, 2)
        return shannonEnt
    def splitDataSet(self, dataSet, axis, value):
        retDataSet = []
        for featVec in dataSet:
            if featVec[axis] == value:
                reducedFeatVec = featVec[:axis]
                reducedFeatVec.extend(featVec[axis+1:])
                retDataSet.append(reducedFeatVec)
        return retDataSet
    def chooseBestFeaturetoSplit(self, dataSet):
        numFeatures = len(dataSet[0]) - 1
        print("len: ", numFeatures)
        baseShannonEnt = self.calcShannonEnt(dataSet)
        print("baseShannonEnt: ", baseShannonEnt)
        bestInfoGen = 0.0; bestFeature = -1
        for i in range(numFeatures):
            featList = [example[i] for example in dataSet]
            uniqueVals = set(featList)
            newShannonEnt = 0.0
            for value in uniqueVals:
                subDataSet = self.splitDataSet(dataSet, i, value)
                prob = len(subDataSet)/float(len(dataSet))
                newShannonEnt += prob * self.calcShannonEnt(subDataSet)
            print("newShannonEnt: ", newShannonEnt)
            newGen = baseShannonEnt - newShannonEnt
            print("newGain: ", newGen)
            print("ii: ", i)
            print("bestInfoGen: ", bestInfoGen)
            if newGen > bestInfoGen:
                bestInfoGen = newGen
                bestFeature = i
                print("i:", i)
        return bestFeature
    def majorityCnt(self, classList):
        classCount = {}
        for vote in classList:
            if vote not in classCount.keys: classCount[vote] = 0
            classCount[vote] += 1
        sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
        return sortedClassCount[0][0]
    def createTree(self, dataSet, labels):
        classList = [example[-1] for example in dataSet]
        if classList.count(classList[0] == len(classList)):
            return classList[0]
        if len(dataSet[0]) == 1:
            return majorityCnt(classList)
        bestFeat = self.chooseBestFeaturetoSplit(dataSet)
        print("bestFeat: ", bestFeat)
        bestFeatLabel = labels[bestFeat]
        myTree = {bestFeatLabel: {}}
        del(labels[bestFeat])
        featValues = [example[bestFeat] for example in dataSet]
        uniqueVals = set(featValues)
        for value in uniqueVals:
            subLabels = labels[:]
            myTree[bestFeatLabel][value] = self.createTree(self.splitDataSet(dataSet, bestFeat, value), subLabels)
        return myTree
    def outputTest(self):
        myMat, labels = self.createDataSet()
        #print("out: ",self.chooseBestFeaturetoSplit(myMat))
        print(self.createTree(myMat, labels))
if __name__ == '__main__':
    des = DecisionTree()
    des.outputTest()