'''
    实现简单的决策树ID3算法
'''

from math import log
import operator
import pandas as pd
import json

def calShannonEnt(data):
    '''
    计算信息熵
    :param data:
    :return:
    '''
    dataNum=len(data)
    label={}
    # 统计各个分类的样例数量
    for feature in data:
        current_label=feature[-1]
        if current_label not in label.keys():
            label[current_label]=0
        label[current_label]+=1

    Ent=0
    for key in label:
        # 该分类样例所占比例
        prob=float(label[key])/dataNum
        Ent-=prob*log(prob,2)
    return Ent

def splitData(data,axis,value):
    '''
    按照某个特征分割数据
    :param data:
    :param axis: 第i个特征
    :param value: 以该特征的某个值划分
    :return:
    '''
    resData=[]
    for feature in data:
        if(feature[axis]==value):
            reduceFeature=feature[:axis]
            reduceFeature.extend(feature[axis+1:])
            resData.append(reduceFeature)
    return resData


def chooseBestFeature(data):
    '''
    分别计算信息增益并选择最优分割特征
    :param data:
    :return:
    '''
    # 一条数据中属性个数
    featureNum=len(data[0])-1
    baseEnt=calShannonEnt(data)
    bestInfoGain=0
    bestFeature=-1
    # 循环对每个特征的每个取值进行划分
    for i in range(featureNum):
        # 得到每一个特征的所有值
        featureList=[sample[i] for sample in data]
        # 去重
        uniqueVals=set(featureList)
        newEnt=0
        #计算每个特征的信息增益
        for val in uniqueVals:
            subData=splitData(data,i,val)
            prob=len(subData)/float(len(data))
            newEnt+=prob*calShannonEnt(subData)
        infoGain=baseEnt-newEnt
        if(infoGain>bestFeature):
            bestInfoGain=infoGain
            bestFeature=i
    return bestFeature

def majorityCnt(classList):
    '''
    叶节点选择频率最高的类别作为结果
    :param classList:
    :return:
    '''
    classCnt={}
    for vote in classList:
        if vote not in classCnt.keys():
            classCnt[vote]=0
        classCnt[vote]+=1
    sortedClassCnt=sorted(classCnt.items(),key=operator.itemgetter(1))
    return sortedClassCnt[0][0]

def createTree(data,label):
    '''
    建立决策树
    :param data:
    :param label:
    :return:
    '''
    classList=[sample[-1] for sample in data]
    if classList.count(classList[0])==len(classList):
        return classList[0]
    # 一般不会有这种情况吧...
    if len(data[0])==1:
        return majorityCnt(classList)
    bestFeature=chooseBestFeature(data)
    bestLabel=label[bestFeature]
    decisionTree={bestLabel:{}}
    del(label[bestFeature])
    featureVal=[sample[bestFeature] for sample in data]
    uniqueVal=set(featureVal)
    for val in uniqueVal:
        subLabel=label[:]
        decisionTree[bestLabel][val]=createTree(splitData(data,bestFeature,val),subLabel)
    return decisionTree

def getData():
    '''
    获取数据集
    :return:
    '''
    csv_data=pd.read_csv("watermelon_dataset.csv")
    data=[[c,r,k,t,u,t,g] for c,r,k,t,u,t,g in zip(csv_data['色泽'],csv_data['根蒂'],csv_data['敲声'],csv_data['纹理'],csv_data['脐部'],csv_data['触感'],csv_data['好瓜'])]
    label=['色泽','根蒂','敲声','纹理','脐部','触感']
    return data,label

if __name__ == '__main__':
    data,label=getData()
    print(json.dumps(createTree(data,label),ensure_ascii=False,indent=2))