# Project: Machine learning-decision tree
# Author: Lyndon
# date: 2015/10/27

from math import log
from _ast import operator
from treePlot import createPlot

# process training data set
# input: directory
# output: data_set, attribute

def proData(path):
    fileset = open(path)   #loading data file
    dataset = [data.strip().split('\t') for data in fileset.readlines()]
    attribute = dataset [0]
    del(dataset[0])
    return dataset, attribute

# calculate the information entropy
# input: dataset
# output: entropy

def calcEntropy(dataset):
    numEntries = len (dataset)
    attributeCounts = {}
    for item in dataset:
        currentAttribute = item[-1]
        if currentAttribute not in attributeCounts.keys():
            attributeCounts[currentAttribute]=0
        attributeCounts[currentAttribute]+=1
    entropy = 0.0
    for key in attributeCounts:
        prob = float (attributeCounts[key])/numEntries
        entropy -= prob *log(prob,2)
    return entropy

# split data based on different values of attribute
# input: dataset
# output: split data 
def splitData(dataset,axis,value):
    splitdata = [] 
    for feature in dataset:
        if feature[axis] == value:
            #del(feature[axis])
            tempFeaVec = feature[:axis]
            tempFeaVec.extend(feature[axis+1:])
            splitdata.append(tempFeaVec)
    return splitdata
    
# calculate the entropy of different features
# input: dataset
# output: best feature
def selectBestFeature(dataset):
    numFeatures = len(dataset[0]) - 1
    baseEntropy = calcEntropy(dataset)
    bestInfoGain = 0.0; bestFeature = -1
    for i in range(numFeatures):
        featList = [features[i] for features in dataset] # Select attribute types
        uniqueVals = set(featList)                       # Set different values of same attribute
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitData(dataset, i, value)
            prob = float(len(subDataSet))/len(dataset)
            newEntropy += prob * calcEntropy(subDataSet) 
        infoGain = baseEntropy - newEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature

# train decision tree ID3
# input: dataset, attribute
# output: decision tree
def createTreeID3(dataset,attributes):
    classList = [example[-1] for example in dataset]
    classCount = {}
    if classList.count(classList[0]) ==len(classList):
        return classList[0]                             # stop splitting when all data belong to same labels
    if len(dataset[0]) == 1:                            # stop splitting when attribute = NULL, return the max class
        for value in classList:
            if value not in classCount.keys():
                classCount[value] = 0
            classCount[value]+=1
        sortedClassCount = sorted(classCount.iteritems(), key = operator.itemgetter(1), reverse = True)
        return sortedClassCount[0][0] 
    bestFeature = selectBestFeature(dataset)
    bestAttribute = attributes [bestFeature]
    myTree = {bestAttribute:{}}
    del(attributes [bestFeature])
    featureValues = [example[bestFeature] for example in dataset] # select the training data of the child node
    uniqueVals = set(featureValues)
    for value in uniqueVals:
        subattributes = attributes[:]
        myTree[bestAttribute][value] = createTreeID3(splitData(dataset, bestFeature, value), subattributes)
    return myTree

# calculate the information gain ratio for different features
# input: dataset
# output: best feature
def selectBestFeature_C4(dataset):
    numFeatures = len(dataset[0]) - 1
    baseEntropy = calcEntropy(dataset)
    bestInfoGainRatio = 0.0; bestFeature = -1
    for i in range(numFeatures):
        featList = [features[i] for features in dataset] # Select attribute types
        uniqueVals = set(featList)                       # Set different values of same attribute
        newEntropy = 0.0;Splitentropy = 0.0
        for value in uniqueVals:
            subDataSet = splitData(dataset, i, value)
            prob = float(len(subDataSet))/len(dataset)
            newEntropy += prob * calcEntropy(subDataSet) 
            Splitentropy -= prob *log(prob,2)
        infoGainRatio = (baseEntropy - newEntropy)/Splitentropy
        if (infoGainRatio > bestInfoGainRatio):
            bestInfoGainRatio = infoGainRatio
            bestFeature = i
    return bestFeature

# train decision tree C4.5
# input: dataset, attribute
# output: decision tree
def createTreeC4(dataset,attributes):
    classList = [example[-1] for example in dataset]
    classCount = {}
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataset[0]) == 1:
        for value in classList: 
            if value not in classCount.keys():
                classCount[value] = 0
            classCount[value] +=1
        sortedClassCount = sorted(classCount.iteritems(), key = operator.itemgetter(1), reverse = True)
        return sortedClassCount[0][0] 
    bestFeature = selectBestFeature_C4(dataset)
    bestAttribute = attributes [bestFeature]
    myTree = {bestAttribute:{}}
    del(attributes [bestFeature])
    featureValues = [example[bestFeature] for example in dataset] # select the training data of the child node
    uniqueVals = set(featureValues)
    for value in uniqueVals:
        subattributes = attributes[:]
        myTree[bestAttribute][value] = createTreeC4(splitData(dataset, bestFeature, value), subattributes)
    return myTree



def classify(inputtree,featlabels,testvec):
    firststr = list(inputtree.keys())[0]
    seconddict = inputtree[firststr]
    featindex = featlabels.index(firststr)
    for key in seconddict.keys():
        if testvec[featindex]==key:
            if type(seconddict[key]).__name__=='dict':
                classlabel=classify(seconddict[key],featlabels,testvec)
            else:
                classlabel=seconddict[key]
    return classlabel


# main function
if __name__=="__main__":
    # data_set processing
    dataset = []
    attributes = []
    path='D:\PythonWork\数学建模国赛算法准备\规划分类\决策树\Decision_tree\lenses.txt'
    dataset, attributes = proData(path)
    print(dataset,attributes)
    myTreeID3 = createTreeID3(dataset, attributes)
    dataset, attributes = proData(path)
    myTreeC4 = createTreeC4(dataset, attributes)
    print(str(myTreeID3))
    createPlot(myTreeID3)
    print(str(myTreeC4))
    createPlot(myTreeC4)
    lable = ['age', 'prescriptor',	'astigmatic',	'tearRate']
    tsves = ['young', 'myope', 'no', 'reduced']
    print('result:', classify(myTreeC4, lable, tsves))