#!/usr/bin/env python3

"""
Decision Trees
https://en.wikipedia.org/wiki/Decision_tree_learning

The problem of learning an optimal decision tree is known to be
NP-complete under several aspects of optimality and even for
simple concepts. Consequently, practical decision-tree learning
algorithms are based on heuristics such as the greedy algorithm
where locally-optimal decisions are made at each node. Such
algorithms cannot guarantee to return the globally-optimal
decision tree.
"""

from math import log

# This DecisionTree is supposed to operate on data (x1, x2, .., xn, Y)
# Where each x indicate a feature and Y is considered as lable.
#
# e.g.
# dt = DecisionTree(dataset, feature_names, False)
# lable = dt.apply(target_data)
# dt.train(dataset2)
# lable2 = dt.apply(target_data)
#
class DecisionTree(object):
    def __init__(self, dataset=None, feature_names=None, strict=True):
        """
        dataset: dataset to contruct the tree
        feature_names: serve as additional information
        strict: if attricute runs out, error out if True and take a vote if False.
        """
        self.dataset = dataset
        self.feature_names = feature_names
        self.strict = strict
        self.tree = None
        if self.dataset:
            self.train(self.dataset, self.feature_names)

    def _validate(self):
        # dataset[-1] is lable, this implementation does not support a dict type lable
        if type(self.dataset[0][-1]) == dict:
            raise Exception("This implementation of DecisionTree does not support lable as dict")
            
    @staticmethod
    def calcEntropy(dataset):
        """
        Calculator Shannon Entropy.
        Higher entropy indicates more mixed-up data.
        H = -(p1*log(p1, 2) + p2*log(p2, 2) + ... + pn*log(pn, 2))
        dataset: (x1, x2, .., xn, Y), in which x indicates feature and Y lable.
        """
        lable_counts = {}
        datalen = len(dataset)
        for data in dataset:
            lable = data[-1]
            if lable in lable_counts:
                lable_counts[lable] += 1
            else:
                lable_counts[lable] = 1
        entropy = 0.0
        for lable in lable_counts:
            prob = float(lable_counts[lable] / datalen)
            entropy += (-1.0) * prob * log(prob, 2)
        return entropy
        
    def _split_dataset(self, dataset, idx, value):
        "Return data with x[idx] == value, with the x[idx] removed."
        ret_dataset = []
        for data in dataset:
            if data[idx] == value:
                ret_dataset.append(data[:idx] + data[idx+1:])
        return ret_dataset

    def _choose_best_feature_to_split(self, dataset):
        "Return the best feature to split, i.e. the index."
        # sanity check
        datalen = len(dataset)
        if datalen <= 1:
            raise
        num_features = len(dataset[0]) - 1
        base_entropy = self.calcEntropy(dataset)
        best_info_gain = 0.0
        best_feature = -1
        # calculator information gain for each split
        # newentropy = prob1 * dataset1 + prob2 * dataset2 ...
        for feature_idx in range(0, num_features):
            feature_values = {data[feature_idx] for data in dataset}
            new_entropy = 0.0
            for fv in feature_values:
                newdataset = self._split_dataset(dataset, feature_idx, fv)
                prob = float(len(newdataset) / datalen)
                new_entropy += prob * self.calcEntropy(newdataset)
            info_gain = base_entropy - new_entropy
            if info_gain > best_info_gain:
                best_info_gain = info_gain
                best_feature = feature_idx
        return best_feature

    @staticmethod
    def _majority_vote(lables):
        "return lable that has the most count in lables."
        lable_counts = {}
        for l in lables:
            if l in lable_counts:
                lable_counts[l] += 1
            else:
                lable_counts[l] = 1
        sorted_lable_counts = sorted(list(lable_counts.items()), key=lambda item: item[1],
                                     reverse=True)
        return sorted_lable_counts[0][0]

    def _create_tree(self, dataset, feature_names=None):
        "recursive function to construct the decision tree"
        # if examples in dataset have the same lable, return the lable, this is the leaf
        lables = [data[-1] for data in dataset]
        if len(set(lables)) == 1:
            return lables.pop()
        # if feature has exhausted, take a vote or error out
        if len(dataset[0]) == 1:
            # we reached here because len(lables) != 1
            if self.strict:
                raise Exception("dataset does not have enough features to split")
            else:
                return self._majority_vote(lables)

        best_feature_idx = self._choose_best_feature_to_split(dataset)
        best_feature = best_feature_idx if not feature_names else feature_names[best_feature_idx]
        my_tree = {best_feature: {}}
        new_fnames = None
        if feature_names:
            new_fnames = feature_names[:]
            del new_fnames[best_feature_idx]
        feature_values = {data[best_feature_idx] for data in dataset}
        for fv in feature_values:
            subdataset = self._split_dataset(dataset, best_feature_idx, fv)
            my_tree[best_feature][fv] = self._create_tree(subdataset, new_fnames)
        return my_tree

    def train(self, dataset, feature_names=None, strict=True):
        "Use data to train to construct the decision tree."
        self.dataset = dataset
        self.feature_names = feature_names
        self.strict = strict
        self._validate()
        self.tree = self._create_tree(dataset, feature_names)
        
    def apply(self, target_data):
        "Apply the decision tree to data, return the label."
        if not self.tree:
            raise Exception("Decision Tree hasn't been constructed.")
        current_tree = self.tree
        for idx, value in enumerate(target_data):
            if self.feature_names:
                key = self.feature_names[idx]
            else:
                key = idx
            current_tree = current_tree[key][value]
            # this breaks if lable is of type dict
            if type(current_tree) == type(self.dataset[0][-1]):
                return current_tree
        
    def show_tree(self):
        if not self.tree:
            raise Exception("Decision Tree hasn't been constructed.")
        print(self.tree)

    def plot_tree(self):
        if not self.tree:
            raise Exception("Decision Tree hasn't been constructed.")
        # plot tree using matplotlib annotations
        import matplotlib.pyplot as plt

        decisionNode = dict(boxstyle="sawtooth", fc="0.8")
        leafNode= dict(boxstyle="round4", fc="0.8")
        arrow_args = dict(arrowstyle="<-")

        def getNumLeafs(myTree):
            numLeafs = 0
            if not len(myTree) == 1:
                raise Exception("the dict representing tree has more than one element")
            root = list(myTree.keys())[0]
            root_value = myTree[root]
            for key in root_value.keys():
                if type(root_value[key]).__name__ == 'dict':
                    numLeafs += getNumLeafs(root_value[key])
                else:
                    numLeafs += 1
            return numLeafs

        def getTreeDepth(myTree):
            maxDepth = 0
            if not len(myTree) == 1:
                raise Exception("the dict representing tree has more than one element")
            root = list(myTree.keys())[0]
            root_value = myTree[root]
            for key in root_value.keys():
                if type(root_value[key]).__name__ == 'dict':
                    thisDepth = 1 + getTreeDepth(root_value[key])
                else:
                    thisDepth = 1
                if thisDepth > maxDepth:
                    maxDepth = thisDepth
            return maxDepth
        
        def plotNode(nodeTxt, centerPt, parentPt, nodeType):
            createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
                                    xytext=centerPt, textcoords='axes fraction',
                                    va='center', ha='center', bbox=nodeType,
                                    arrowprops=arrow_args)

        def plotMidText(cntrPt, parentPt, txtString):
            xMid = (parentPt[0] - cntrPt[0])/2.0 + cntrPt[0]
            yMid = (parentPt[1] - cntrPt[1])/2.0 + cntrPt[1]
            createPlot.ax1.text(xMid, yMid, txtString)

        def plotTree(myTree, parentPt, nodeTxt):
            numLeafs = getNumLeafs(myTree)
            root = list(myTree.keys())[0]
            cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW,
                      plotTree.yOff)
            plotMidText(cntrPt, parentPt, nodeTxt)
            plotNode(root, cntrPt, parentPt, decisionNode)
            root_value = myTree[root]
            plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
            for key in root_value.keys():
                if type(root_value[key]).__name__ == 'dict':
                    plotTree(root_value[key], cntrPt, str(key))
                else:
                    plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
                    plotNode(root_value[key], (plotTree.xOff, plotTree.yOff),
                             cntrPt, leafNode)
                    plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
            plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
            
        def createPlot(myTree):
            fig = plt.figure(1, facecolor='white')
            fig.clf()
            axprops = dict(xticks=[], yticks=[])
            createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
            plotTree.totalW = float(getNumLeafs(myTree))
            plotTree.totalD = float(getTreeDepth(myTree))
            plotTree.xOff = -0.5/plotTree.totalW
            plotTree.yOff = 1.0
            plotTree(myTree, (0.5, 1.0), '')
            plt.show()

        createPlot(self.tree)

    def store(self, filepath):
        "store the generated decision tree to filepath"
        import pickle
        if not self.tree:
            raise Exception("decision tree not generated yet!")
        with open(filepath, 'wb') as f:
            pickle.dump(self.tree, f)

    def restore(self, filepath):
        "restore the decision tree from filepath"
        import pickle
        with open(filepath, 'rb') as f:
            self.tree = pickle.load(f)
        
if __name__ == "__main__":
    sample_data1 = [[1, 1, 'yes'],
                    [1, 1, 'yes'],
                    [1, 0, 'no'],
                    [0, 1, 'no'],
                    [0, 1, 'no']]

    sample_data2 = [[1, 1, 'maybe'],
                    [1, 1, 'yes'],
                    [1, 0, 'no'],
                    [0, 1, 'no'],
                    [0, 1, 'no']]

    sample_data3 = [[1, 1, 'yes'],
                    [1, 1, 'yes'],
                    [1, 0, 'no'],
                    [0, 0, 'no'],
                    [0, 0, 'no']]

    sample_data4 = [[0, 'yes'],
                    [0, 'yes'],
                    [0, 'no'],
                    [1, 'no'],
                    [1, 'no']]

    sample_data5 = [[1, 1, 1, 'maybe'],
                    [1, 1, 0, 'yes'],
                    [1, 0, 0, 'no'],
                    [0, 1, 0, 'no'],
                    [0, 1, 0, 'no'],
                    [0, 0, 1, 'maybe'],
                    [1, 0, 1, 'maybe'],
                    [0, 0, 0, 'no']]

    
    dt = DecisionTree()
    ep1 = dt.calcEntropy(sample_data1)
    ep2 = dt.calcEntropy(sample_data2)
    assert(ep1 < ep2)
    #print(ep1, ep2)
    #print(dt._split_dataset(sample_data1, 0, 0))
    #print(dt._split_dataset(sample_data1, 0, 1))
    #print(dt._choose_best_feature_to_split(sample_data1))
    assert(dt._choose_best_feature_to_split(sample_data3) == 1)

    dt_test = DecisionTree(sample_data3, ["_notused", "determine"], strict=True)
    dt_test.show_tree()
    dt_test.train(sample_data1, ["no surfacing", "flippers"], strict=False)
    dt_test.show_tree()

    dt_test.train(sample_data4, strict=False)
    dt_test.show_tree()

    try:
        dt_test.train(sample_data4, strict=True)
    except Exception as e:
        print("expected exception here: %s" % e)
    finally:
        # DecisionTree ignores the invalid training and still keeps the last decision tree
        dt_test.show_tree()

    target_data = [0]
    lable = dt_test.apply(target_data)
    print(lable)
    assert(lable == 'yes')

    dt_test.train(sample_data1, ["no surfacing", "flippers"])
    lable = dt_test.apply([1, 1])
    dt_test.show_tree()
    print(lable)
    #dt_test.plot_tree()

    dt_test.train(sample_data5, ['no surfacing', 'fippers', 'not sure'], strict=True)
    #dt_test.plot_tree()

    dt_test.store('/tmp/decision_tree')
    dt_test2 = DecisionTree()
    dt_test2.restore('/tmp/decision_tree')
    dt_test2.plot_tree()
