from math import log
#创建数据集
def createdataset():
    dataSet=[[0,2,0,0,'N'],
            [0,2,0,1,'N'],
            [1,2,0,0,'Y'],
            [2,1,0,0,'Y'],
            [2,0,1,0,'Y'],
            [2,0,1,1,'N'],
            [1,0,1,1,'Y'],
            [0,1,0,0,'N'],
            [0,0,1,0,'Y'],
            [2,1,1,0,'Y'],
            [0,1,1,1,'Y'],
            [1,1,0,1,'Y'],
            [1,2,1,0,'Y'],
            [2,1,0,1,'N'],]
    labels=['age','income','job','credit']
    return dataSet,labels


ds1,lab = createdataset()
print(ds1)
print(lab)


def splitDataSet(dataSet, axis, value):
    '''
    功能：按照给定特征划分数据
    参数：
        dataSet：待划分的数据集
        axis：特征值所在特征集的列
        value：特征值
    '''
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet


# 数据集信息熵

def calcShannonEnt(dataSet):
        numEntries = len(dataSet)
        labelCounts = {}
        for featVec in dataSet:
                currentLabel = featVec[-1]
                if currentLabel not in labelCounts.keys():
                        labelCounts[currentLabel] = 0

                labelCounts[currentLabel] += 1

        shannonEnt = 0.0
        for key in labelCounts:
                prob = float(labelCounts[key]) / numEntries
                shannonEnt -= prob * log(prob, 2)

        return shannonEnt


# 样本数据信息熵:信息熵也称为香农熵，是随机变量的期望。度量信息的不确定程度。信息的熵越大，信息就越不容易搞清楚。处理信息就是为了把信息搞清楚，就是熵减少的过程。
shan = calcShannonEnt(ds1)
print(shan)


# 信息增益:用于度量属性A降低样本集合X熵的贡献大小。信息增益越大，越适于对X分类。

def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0])-1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0;bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntroy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prop = len(subDataSet)/float(len(dataSet))
            newEntroy += prop * calcShannonEnt(subDataSet)
        infoGain = baseEntropy - newEntroy
        if(infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


col = chooseBestFeatureToSplit(ds1)
col


# 构建决策树

def majorityCnt(classList):
        classCount = {}
        for vote in classList:
                if vote not in classCount.keys(): classCount[vote] = 0
                classCount[vote] += 1
        sortedClassCount = sorted(classList.iteritems(), key=operator.itemgetter(1), reverse=True)  # 利用operator操作键值排序字典
        return sortedClassCount[0][0]


# 创建树的函数
def createTree(dataSet, labels):
        classList = [example[-1] for example in dataSet]
        if classList.count(classList[0]) == len(classList):
                return classList[0]
        if len(dataSet[0]) == 1:
                return majorityCnt(classList)
        bestFeat = chooseBestFeatureToSplit(dataSet)
        bestFeatLabel = labels[bestFeat]
        myTree = {bestFeatLabel: {}}
        del (labels[bestFeat])
        featValues = [example[bestFeat] for example in dataSet]
        uniqueVals = set(featValues)
        for value in uniqueVals:
                subLabels = labels[:]
                myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)

        return myTree


Tree = createTree(ds1, lab)
print("样本数据决策树：")
print(Tree)



# 测试样本分类

def classify(inputtree,featlabels,testvec):
    firststr = list(inputtree.keys())[0]
    seconddict = inputtree[firststr]
    featindex = featlabels.index(firststr)
    for key in seconddict.keys():
        if testvec[featindex]==key:
            if type(seconddict[key]).__name__=='dict':
                classlabel=classify(seconddict[key],featlabels,testvec)
            else:
                classlabel=seconddict[key]
    return classlabel


labels=['age','income','job','credit']
tsvec=[0,0,1,1]
print('result:',classify(Tree,labels,tsvec))
tsvec1=[0,2,0,1]
print('result1:',classify(Tree,labels,tsvec1))
