#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'Linson'

from math import log

# 给定数据集合的香农熵
def calcShannonEnt(dataSet):
    numEntries=len(dataSet)
    labelCounts={}

    #为所有可能的分类创建字典
    for featVec in dataSet:
        currentLabel=featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1

    shannonEnt=0.0
    for key in labelCounts.keys():
        prob=float(labelCounts[key])/numEntries  # 概率
        # 以2为底求对数
        shannonEnt -= prob * log(prob,2) # 求信息期望值
    return shannonEnt

def createDataSet():
    dataSet=[
        [1,1,'yes'],
        [2,1,'yes'],
        [1,2,'no'],
        [0,1,'no'],
        [1,2,'yes'],
        [0,1,'no']
    ]

    labels=['no surfacing','flippers']
    return dataSet,labels


def splitDataSet(dataSet,axis,value):
    recDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec=featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:])
            recDataSet.append(reducedFeatVec)
    return recDataSet

def chooseBestFeatureToSplit(dataSet):
    nf=len(dataSet[0])-1
    baseEntropy=calcShannonEnt(dataSet)
    bestInfoGain=0.0
    besetFeature=-1

    for i in range(nf):
        flist=[d[i] for d in dataSet]
        unqiVals=set(flist)
        newEntropy=0.0

        for v in unqiVals:
            subDataSet=splitDataSet(dataSet,i,v)
            prob=len(subDataSet)/float(len(dataSet))
            newEntropy+=prob*calcShannonEnt(subDataSet)

        infoGain=baseEntropy-newEntropy
        if infoGain>bestInfoGain:
            bestInfoGain=infoGain
            besetFeature=i

    return besetFeature

# 香农熵测试
myDat=createDataSet()[0]

print(chooseBestFeatureToSplit(myDat))


print(myDat)
print(calcShannonEnt(myDat))

#改变数据集中的分类
myDat[0][-1]='maybe'
print(calcShannonEnt(myDat))

#拆分 测试
print(splitDataSet(myDat,1,1))

