import math
import operator
import treePlotter

def getlog2(x):
    return math.log(x,2)

def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]  # if the featVec is a line in a table
        if currentLabel not in labelCounts.keys():  # new Label
            labelCounts[currentLabel] = 0 # Initial a new label
        labelCounts[currentLabel] += 1

    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * math.log(prob,2)

    return shannonEnt

def splitDataSet(dataSet, feat, value):
    retDataSet = []
    for data in dataSet:
        if data[feat] == value:
            reducedData = data[:feat]
            reducedData.extend(data[feat+1:])
            retDataSet.append(reducedData)
    return retDataSet

def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1       # 不包含最后一个列的分类标签
    baseEntropy = calcShannonEnt(dataSet)   # ShannonEnt of whole dataset
    bestInfoGain = 0.0
    bestFeatureIndex = -1
    for testFeatNum in range(numFeatures):
        values = [data[testFeatNum] for data in dataSet]
        valueSet = set(values)
        testEntropy = 0.0
        testInfoGain = 0.0
        for value in valueSet:
            tempDataSet = splitDataSet(dataSet, testFeatNum, value)
            prob = len(tempDataSet)/float(len(dataSet))
            testEntropy = testEntropy + prob * calcShannonEnt(tempDataSet)
        testInfoGain = baseEntropy - testEntropy
        if bestInfoGain < testInfoGain:
            bestInfoGain = testInfoGain
            bestFeatureIndex = testFeatNum
    return bestFeatureIndex

def majorityCnt(classlist): # 属性已消耗完，但分类标签不唯一，选取多数的分类标签作为结果
    classCount = {}
    for oneClass in classlist:
        if oneClass not in classCount.keys():
            classCount[oneClass] = 0
        classCount[oneClass] = classCount[oneClass] + 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)  # sorted函数的用法：百度经验
    return sortedClassCount[0][0]

def createTree(dataSet, labels):        # dataset 二维list存储的数据，labels 每个属性的名字，迭代的最后应返回分类标签
    # 迭代的终止条件
    # 第一种情况：dataset的数据都已为同一个标签，该分支分类完成
    resultLabel = [oneData[-1] for oneData in dataSet]
    if len(set(resultLabel)) == 1:
        return resultLabel[0]

    #第二种情况：分类标签仍不唯一，但没有其他可用属性了，此时选出大多数的分类标签
    if len(dataSet[0]) == 1:    # 只有一个属性了，即分类标签
            return majorityCnt(resultLabel)

    bestFeatIndex = chooseBestFeatureToSplit(dataSet)   # 选择最好的分割属性（属性的序号）
    bestFeatName = labels[bestFeatIndex]
    bestFeatValues = set([oneData[bestFeatIndex] for oneData in dataSet])    # 该属性所有的取值，每一个取值都是一个分支，每一个分支迭代该函数
    bestFeatUniqValues = set(bestFeatValues)
    myTree = {bestFeatName: {}} # 构建一个字典，存储树形结构
    del(labels[bestFeatIndex])  # 删除已消耗的属性，准备进行迭代
    for oneFeatValue in bestFeatUniqValues:
        subLabels = labels[:]       # label传地址，每一次划分子数据集重新赋值label
        tempDataSet = splitDataSet(dataSet, bestFeatIndex, oneFeatValue)    # 得到某一取值分支的数据集
        myTree[bestFeatName][oneFeatValue] = createTree(tempDataSet, subLabels)
    return myTree

def createDataset():
    dataSet = [
        [1,1,'yes'],
        [1,1,'yes'],
        [1,0,'no'],
        [0,1,'no'],
        [0,1,'no'],
        # [1,1,'maybe'],
        [0,0,'no']
    ]
    labels = ['no surfacing', 'flippers']
    return dataSet, labels


def storeTree(inputTree, fileName):
    import pickle
    fw = open(fileName, 'wb')
    pickle.dump(inputTree, fw)
    fw.close()


def loadTree(fileName):
    import pickle
    fr = open(fileName, 'rb')
    return pickle.load(fr)


def classify(inputTree, featLabels, testVec):
    rootNode = list(inputTree.keys())[0]
    thisFeatLabelIndex = featLabels.index(rootNode)
    thisTestVec = testVec[thisFeatLabelIndex]
    subTree = inputTree[rootNode][thisTestVec]
    if type(subTree).__name__ == 'dict':   # 若该属性的该取值下，还有子树
        ret = classify(subTree, featLabels, testVec)
    else:
        ret = subTree
    return ret

# data, label = createDataset()
# ret = createTree(data, label)
# print(ret)
# storeTree(ret, './tree.txt')
# leafCount = treePlotter.getLeafCount(ret)
# print(leafCount)
# treeDepth = treePlotter.getTreeDepth(ret)
# print(treeDepth)
ret = loadTree('./tree.txt')
treePlotter.createPlot(ret)
classifyRet = classify(ret, ["flippers", "no surfacing"], [0, 1])
print(classifyRet)

# main

# data,label = createDataset()
# ret = calcShannonEnt(data)
# retData1 = splitDataSet(data, 1, 0)
# retData2 = splitDataSet(data, 1, 1)
# splitnum = chooseBestFeatureToSplit(data)
# majorityRet = majorityCnt([1,2,2,2,2,1,0])
# print(ret)
# print(retData1)
# print(retData2)
# print(splitnum)
# print(majorityRet)
