import operator
import numpy as np
from math import log
from operator import *

def createDataSet():
    dataSet = [['sunny', 'hot', 'high', 'False', 'no'],
               ['sunny', 'hot', 'high', 'True', 'no'],
               ['overcast', 'hot', 'high', 'False', 'yes'],
               ['rain', 'mild', 'high', 'False', 'yes'],
               ['rain', 'cool', 'normal', 'False', 'yes'],
               ['rain', 'cool', 'normal', 'True', 'no'],
               ['overcast', 'cool', 'normal', 'True', 'yes'],
               ['sunny', 'mild', 'high', 'False', 'no'],
               ['sunny', 'cool', 'normal', 'False', 'yes'],
               ['rain', 'mild', 'normal', 'False', 'yes'],
               ['sunny', 'mild', 'normal', 'True', 'yes'],
               ['overcast', 'mild', 'high', 'True', 'yes'],
               ['overcast', 'hot', 'normal', 'False', 'yes'],
               ['rain', 'mild', 'high', 'True', 'no']]
    features = ['outlook', 'temperature', 'humidity', 'windy']
    return dataSet, features

def calcShannonEnt(dataSet):
    numSamples = len(dataSet)
    label = {}
    for sampleVec in dataSet:
        sampleLabel = sampleVec[-1]
        if sampleLabel not in label.keys():
            label[sampleLabel] = 0
        label[sampleLabel] += 1
    shannonEnt = 0.0
    for key in label:
        prob = float(label[key]) / numSamples
        shannonEnt -= prob * log(prob, 2)
    return shannonEnt

def splitDataSet(dataSet, feature, featureValue):
    subDataSet = []
    for sampleVec in dataSet:
        if sampleVec[feature] == featureValue:
            reduceSampleVec = sampleVec[:feature]
            reduceSampleVec.extend(sampleVec[feature + 1:])
            subDataSet.append(reduceSampleVec)
    return subDataSet

def chooseBestFeatureToSplit(dataSet):
    numFeature = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInforGain = 0
    bestFeature = -1
    for i in range(numFeature):
        featureValueList = [sampleVec[i] for sampleVec in dataSet]
        uniquelFeatureVals = set(featureValueList)
        newEntropy = 0
        for featureValue in uniquelFeatureVals:
            subDataSet = splitDataSet(dataSet, i, featureValue)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)
        inforGain = baseEntropy - newEntropy
        # print("value", feature, "inforGain: ", inforGain)
        if inforGain > bestInforGain:
            bestInforGain = inforGain
            bestFeature = i
    return bestFeature

def majorityCnt(classList):
    classCount = {}
    for vote in classList:  # 统计当前划分下每中情况的个数
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.items, key=operator.itemgetter(1), reversed=True)  # reversed=True表示由大到小排序
    # 对字典里的元素按照value值由大到小排序
    return sortedClassCount[0][0]

def createTree(dataSet, features):
    labelList = [sampleVec[-1] for sampleVec in dataSet]
    if labelList.count(labelList[-1]) == len(labelList):  # 判断classList里是否全是一类，count() 方法用于统计某个元素在列表中出现的次数
        return labelList[-1]  # 当全是一类时停止分割
    # 长度为1，返回出现次数最多的类别
    if len(labelList[0]) == 1:  # 当没有更多特征时停止分割，即分到最后一个特征也没有把数据完全分开，就返回多数的那个结果
        return majorityCnt(labelList)
    bestFeature = chooseBestFeatureToSplit(dataSet)
    bestFeatureName = features[bestFeature]
    myTree = {bestFeatureName: {}}
    del (features[bestFeature])  # 从features的list中删除该feature
    featureValues = [sampleVec[bestFeature] for sampleVec in dataSet]
    uniqueVals = set(featureValues)
    for value in uniqueVals:
        subFeatures = features[:]  # 子集合 ,将labels赋给sublabels，此时的labels已经删掉了用于分类的特征的标签
        # 构建数据的子集合，并进行递归
        subData = splitDataSet(dataSet, bestFeature, value)
        myTree[bestFeatureName][value] = createTree(subData, subFeatures)

    return myTree

if __name__ == "__main__":
    dataSet, features = createDataSet()
    myTree = createTree(dataSet, features)
    print("myTree: ", myTree)
