import math
# 用于辅助排序
import operator
def createDataSet():
    dataSet=[[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,1,'no']]
    labels=['表面光滑','有鳍']
    return dataSet,labels

# 计算数据集的香农熵 即所有类别所有可能值包含的信息期望值
def calcShannonEnt(dataSet):
    n=len(dataSet)
    # 统计label的种类及个数
    labelCounts = {}
    for tmp in dataSet:
        if tmp[-1] not in labelCounts.keys():
            labelCounts[tmp[-1]]=1
        else:
            labelCounts[tmp[-1]]+=1
   # 计算数据集的香农熵∑p(xi)*[-log2(p（xi）)]  p(xi)是选择某个分类的概率
    shannonEnt=0
    for countTmp in labelCounts.values():
        p=countTmp/n
        shannonEnt-=p*math.log(p,2)
    return shannonEnt

# 划分数据集
def splitDataSet(dataSet, axis,value):
    newDataSet=[]
    for tmp in dataSet:
        if tmp[axis]==value:
            newTmp=tmp[:axis]
            newTmp.extend(tmp[axis+1:])
            newDataSet.append(newTmp)
    return newDataSet

# 选择最好的数据集划分方式
def chooseBestFeatureTpSplit(dataSet):
    AllShannonEnt = {}
    minShannonEnt=calcShannonEnt(dataSet)
    minFeature =-1
    fnum = len(dataSet[0])-1
    for i in range(fnum):
        # 找出特征i的所有值集合  另外一种python语法：featList=[datatmp1[i] for datatmp1 in dataSet] fSet=set(featList)
        fSet =set()
        for dataTmp in dataSet:
            fSet.add(dataTmp[i])
        # 计算第i个特征的不同值划分后的香农熵
        fshannonEnt=0
        for fvalue in fSet:
            tmpDataSet = splitDataSet(dataSet,i,fvalue)
            fshannonEnt+=calcShannonEnt(tmpDataSet)
        if fshannonEnt<minShannonEnt:
            minShannonEnt=fshannonEnt
            minFeature=i
        AllShannonEnt[i]=fshannonEnt
    print(AllShannonEnt)
    print(minShannonEnt)
    print(minFeature)
    return  minFeature

#
def majorityCnt(classList):
    classCount ={}
    for classTmp in classList:
        if classTmp not in classCount.keys():
            classCount[classTmp]=1
        else:
            classCount[classTmp]+=1
    sortedClassCountList = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
    return sortedClassCountList[0][0]
# 构建决策树
def createTree(dataSet,labels):
    classList = [v[2] for v in dataset]
    # 说明所有类别都一样
    if classList.count(classList[0])==len(classList):
        return classList[0]
    # 如果到了叶子节点
    if len(labels)==1:
        return majorityCnt(classList)
    bestFeature = chooseBestFeatureTpSplit(dataSet)
    del labels[bestFeature]
    featList = [v[bestFeature] for v in dataSet]
    featSet = set(featList)
    myTree={bestFeature:{}}
    for i in featSet:
        dataSetTmp = dataSet[:]
        subDataSet=splitDataSet(dataSetTmp,bestFeature,i)
        subTreeI=createTree(subDataSet,labels)
        myTree[bestFeature][i]=subTreeI
    return myTree

if __name__ == '__main__':
    dataset,labels = createDataSet()
    print(dataset)
    print(calcShannonEnt(dataset))
    print(splitDataSet(dataset,0,1))
    chooseBestFeatureTpSplit(dataset)
    print(createTree(dataset,labels))