import  operator
import math
import decisionTreePlot as dtPlot

def read(filePath):
    file = open(filePath,"r",encoding="utf8")
    propertys = []
    for s in file.readlines():
        property = s.strip().split(" ")
        propertys.append(property)

    print(propertys)

    return propertys


def tree_generate(propertys, label):
    # 好瓜 坏瓜，数据的最后一列
    res = [property[-1] for property in propertys]

    #如果类型只有一种，返回第一个属性
    if(label.count(label[0]) == len(label)):
        return res[0]
    # 如果属性只有一种，就找出这种属性数量最多的那种类型
    if(len(propertys[0]) == 1):
        #找typeLables中最多的
        return max_of_type(res)
    bestProperty = choose_best_property(propertys)
    bestLabel = label[bestProperty]
    del label[bestProperty]
    decisionTree = {bestLabel: {}}
    pros = [property[bestProperty] for property in propertys]
    uniquePros = set(pros)
    for p in uniquePros:
        subLabels = label[:]
        # 遍历当前选择特征包含的所有属性值，在每个数据集划分上递归调用函数tree_generate()
        decisionTree[bestLabel][p] = tree_generate(split_property(propertys, p, bestProperty), subLabels)


    print("decisionTree:%s",decisionTree)
    return decisionTree



'''
返回信息增益最大的列下标
'''
def choose_best_property(propertys):
    bestGain, bestProperty = 0.0, 1
    numFeatures = len(propertys[0]) - 1
    baseEnt = calc_base_ent(propertys)

    for i in range(numFeatures):
        proList = [property[i] for property in propertys]
        uniquePro = set(proList)
        newEntropy = 0.0
        for pro in uniquePro:
            #计算该属性pro的信息熵
            dataSet = split_property(propertys,pro,i)
            prob = len(dataSet)/float(len(propertys))
            newEntropy += prob* calc_base_ent(dataSet)
        infoGain = baseEnt - newEntropy

        if(infoGain > bestGain):
            bestGain = infoGain
            bestProperty = i
        print('infoGain=', infoGain, 'bestFeature=', i, baseEnt, newEntropy)
    return bestProperty

'''
#   属性与value相同的所有行，去掉该属性所在的列
    返回的数组 用来计算该value的信息熵
:param propertys: 
:param value: 
:param index: 
:return: 
'''
def split_property(propertys,value,index):


    dataSet = []
    for property in propertys:
        if(property[index] == value):
            reducedFeatVec = property[:index]
            reducedFeatVec.extend(property[index + 1:])
            dataSet.append(reducedFeatVec)
            # front = property[:index]
            # print(front)
            # behind = property[index+1:]
            # print(behind)
            # dataSet.append(front.extend(behind))
    return dataSet

'''
计算根节点的香农熵
'''
def calc_base_ent(propertys):
    lableCount = {}
    length = len(propertys)
    for property in propertys:
        if(property[-1] in lableCount):
            lableCount[property[-1]] += 1
        else:
            lableCount[property[-1]] = 1

    shannonEnt = 0.0
    for key in lableCount:
        prob = float(lableCount[key])/length
        shannonEnt -= prob * math.log(prob,2)
    return shannonEnt

def max_of_type(typeLables):
    countMap = {}
    for type in typeLables:
        if(type in countMap.keys()):
            countMap[type] += 1
        else:
            countMap[type] = 1
    count = sorted(countMap.items(),key=operator.itemgetter(1),reverse=True)
    print(count[0][0])

# c = ["1","1",2,"1"]
# max_of_type(c)
propertys  = read("xigua.txt")
label = ["色泽","根蒂","敲声","纹理","脐部","触感"]
tree = tree_generate(propertys,label)
dtPlot.createPlot(tree)