#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plot
import math as mathUtil


"""
输入：训练集D，特征集A，阈值e
输出：决策树

1. 若D中所有实例属于同一类Ck，则T为单节点，并将Ck作为该节点的类标记，返回T
2. 若A为空，则T为单节点树，并将D中的实例树最大的类Ck作为该几点的类标记，返回T
3. 否则，计算A中各个特征值对于D的信息增益，选择信息增益最大的特征值Ag
4. 如果Ag的信息增益小于阈值e，则置T为单节点树，并将D中实例数最大的类Ck作为该节点的类标记，返回T
5. 否则对Ag的每一可能值ai，依Ag=ai将D分割为若干非空子集Di，将Di中实例数最大的类作为标记，构建子节点，
   由结点及其子节点构成树T，返回T
6. 对第i个子节点，以Di为训练集，以A-{Ag}为特征集，递归地调用步骤1-5，得到子树Ti，返回Ti
"""

def createDataSet():
    dataSet = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
    labels = ['no sufacing', 'flippers']
    return dataSet, labels


# 计算信息熵
def calEntropy(dataSet):
    number = len(dataSet)
    # 分类统计字典
    lableCounts = {}
    for i in range(number):
        label = dataSet[i][-1]
        if label in lableCounts.keys(): 
            lableCounts[label] = lableCounts[label] + 1
        else:
            lableCounts[label] = 1
    entroy = 0.0
    #计算熵
    for key in lableCounts:
        prob = float(lableCounts[key]) / number
        entroy += (-1 * prob * mathUtil.log2(prob))
        
        
    return entroy


# 划分数据集，去掉信息增益最大的那一列
def splitDataSetByFeature(dataSet,axis,value):
    retDataSet = []
    for featureVec in dataSet:
        if featureVec[axis] == value:
            reduceFeatVec = featureVec[:axis]
            reduceFeatVec.extend(featureVec[axis+1:])
            retDataSet.append(reduceFeatVec)
    return retDataSet

# 计算信息增益
def calBestInformatationGainFeature(dataSet):
    featureNum = len(dataSet[0]) - 1 # 最后一类是label
    baseEntropy = calEntropy(dataSet)

    bestInfoGain = 0
    bestFeature = -1

    for i in range(featureNum):
        featureList = [ x[i] for x in dataSet]
        # 统计特征i的label
        subFeature = set(featureList)

        conditionalEntropy = 0
        # 求条件熵
        for value in subFeature:
            subDataset = splitDataSetByFeature(dataSet,i,value)
            prob = len(subDataset) / float(len(dataSet))
            conditionalEntropy += prob * calEntropy(subDataset)
        
        infoGain = baseEntropy - conditionalEntropy
        print(infoGain)

        # 获取最大信息增益
        if infoGain > bestInfoGain:
            bestInfoGain = infoGain
            bestFeature = i
    
    return bestFeature

# 选取器，选择类别中种类最多的那一类
def majoritySelector(classLabelList):
    classCount = {}

    for classLable in classLabelList:
        if classLable not in classCount.keys():
            classCount.setdefault(classLable,1)
        else:
            classCount[classLable] += 1
    sortedClassCount = sorted(classCount.items(), key = lambda i:i[1],reversed=True)
    return sortedClassCount[0][0]


def createTree(dataSet,labels):

    classLabelList = [x[-1] for x in dataSet]
    # 若D中所有实例属于同一类Ck，则T为单节点，并将Ck作为该节点的类标记，返回T
    if classLabelList.count(classLabelList[0]) == len(classLabelList):
        return classLabelList[0]
    
    #若A为空，则T为单节点树，并将D中的实例树最大的类Ck作为该几点的类标记，返回T
    if len(dataSet) == 1:
        return majoritySelector(classLabelList)

    bestFeature = calBestInformatationGainFeature(dataSet)
    bestFeatLabel = labels[bestFeature]  #该特征的label
    myTree = {bestFeatLabel: {}}

    del (labels[bestFeat]) #移除该label

    featValues = [x[bestFeat] for x in dataSet]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]  #子集合
        #构建数据的子集合，并进行递归
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    return myTree


if __name__=='__main__':
    dataSet, labels = createDataSet()
    r = calBestInformatationGainFeature(dataSet)
    print(r)
    myTree = createTree(dataSet, labels)
    print(myTree)