__author__ = 'HEEYOUNG'

import util
import classificationMethod
import math
import types
import copy
import collections

class Tree(object):
    def __init__(self):
        self.left = None
        self.right = None
        self.feature = None

class ID3Classifier(classificationMethod.ClassificationMethod):
  """
  ClassificationMethod is the abstract superclass of
   - MostFrequentClassifier
   - NaiveBayesClassifier
   - PerceptronClassifier
   - MiraClassifier

  As such, you need not add any code to this file.  You can write
  all of your implementation code in the files for the individual
  classification methods listed above.
  """
  def __init__(self, legalLabels):
    """
    For digits dataset, the set of legal labels will be 0,1,..,9
    For faces dataset, the set of legal labels will be 0 (non-face) or 1 (face)
    """
    self.legalLabels = legalLabels

  def Entropy(self, Data):      # Calculating Entropy
    H_x = 0
    if sum(Data) == 0:
        return 0
    for x in Data:
        P_i = float(x) / sum(Data)
        if P_i != 0:
            H_x = H_x - (P_i * math.log(P_i, 2))
    return H_x

  def CntEvnt(self, EvntList):      # Counting the number of events
    NumOfEvnt = [0]*len(self.legalLabels)
    for x in range(len(EvntList)):
        NumOfEvnt[EvntList[x]] = NumOfEvnt[EvntList[x]] + 1
    return NumOfEvnt

  def GetSubset(self, Labels, Data, feature):      # Divide Data points with the given feature
    subsetData = [[], []]
    subsetLabel = [[], []]
    for x in range(len(Labels)):
        if Data[x][feature] == 0:
            subsetData[0].append(Data[x])
            subsetLabel[0].append(Labels[x])
        else:
            subsetData[1].append(Data[x])
            subsetLabel[1].append(Labels[x])
    return [subsetData, subsetLabel]

  def id3_training(self, Labels, Data):         # ID3 algorithm
    id3_tree = Tree()

    NumOfEvnt = self.CntEvnt(Labels)
    H_x = self.Entropy(NumOfEvnt)

    InfoGain = [0]*len(self.features)
    for i in range(len(self.features)):         # Calculating Information Gain using Entropy
        subsetData, subsetLabel = self.GetSubset(Labels, Data, self.features[i])
        Plus = self.CntEvnt(subsetLabel[1])
        Minus = self.CntEvnt(subsetLabel[0])
        InfoGain[i] = H_x - (float(sum(Plus))/sum(NumOfEvnt)*self.Entropy(Plus) + float(sum(Minus))/sum(NumOfEvnt)*self.Entropy(Minus))
    id3_tree.feature = self.features[InfoGain.index(max(InfoGain))]     # find the maximum gain
    subsetData, subsetLabel = self.GetSubset(Labels, Data, id3_tree.feature)    # divide data points based on the feature that makes information gain maximum

    if len(subsetLabel[0]) == 0:        # Leaf node
        id3_tree.left = len(subsetData[1])
        id3_tree.feature = subsetLabel[1][0]
        id3_tree.right = 999
    elif len(subsetLabel[1]) == 0:      # Leaf node
        id3_tree.left = len(subsetData[0])
        id3_tree.feature = subsetLabel[0][0]
        id3_tree.right = 999
    else:           # Get child node
        id3_tree.left = self.id3_training(subsetLabel[0], subsetData[0])
        id3_tree.right = self.id3_training(subsetLabel[1], subsetData[1])
    return id3_tree

  def id3_pruning(self, id3_tree, validationLabels, validationData):
    flag=[0, 0]
    if id3_tree.left.feature not in self.features and id3_tree.right.feature not in self.features:
        guesses = self.classify(validationData)
        BfPrn = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
        tmp = copy.deepcopy(id3_tree)
        f = id3_tree.feature
        if id3_tree.left.left != id3_tree.right.left:
            if id3_tree.left.left > id3_tree.right.left:
                id3_tree.feature = id3_tree.left.feature
            else:
                id3_tree.feature = id3_tree.right.feature
            guesses = self.classify(validationData)
            AfPrn = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
            if BfPrn >= AfPrn:
                id3_tree.feature = tmp.feature
                id3_tree.left = tmp.left
                id3_tree.right = tmp.right
                return 0
            else:
                print f, ' is pruned'
        else:
            id3_tree.feature = id3_tree.left.feature
            guesses = self.classify(validationData)
            AfPrn_1 = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
            id3_tree.feature = id3_tree.right.feature
            guesses = self.classify(validationData)
            AfPrn_2 = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
            if BfPrn >= AfPrn_1 and BfPrn >= AfPrn_2:
                id3_tree.feature = tmp.feature
                id3_tree.left = tmp.left
                id3_tree.right = tmp.right
                return 0
            elif AfPrn_1 > AfPrn_2:
                id3_tree.feature = id3_tree.left.feature
                print f, ' is pruned'
            else:
                print f, ' is pruned'
        id3_tree.left = id3_tree.left.left + id3_tree.right.left
        id3_tree.right = 999
        return 1
    else:
        if id3_tree.left.feature in self.features:
            flag[0] = self.id3_pruning(id3_tree.left, validationLabels, validationData)
        if id3_tree.right.feature in self.features:
            flag[1] = self.id3_pruning(id3_tree.right, validationLabels, validationData)
    return sum(flag)

  def id3_classify(self, id3_tree, Data):
    #if isinstance(id3_tree.feature, types.TupleType):   # if feature is tuple, go to the next depth
    if id3_tree.feature in self.features:
        if Data[id3_tree.feature] == 0:
            return self.id3_classify(id3_tree.left, Data)
        else:
            return self.id3_classify(id3_tree.right, Data)
    else:
        return id3_tree.feature     # if feature is not tuple, return

  def train(self, trainingData, trainingLabels, validationData, validationLabels):
    """
    This is the supervised training function for the classifier.  Two sets of
    labeled data are passed in: a large training set and a small validation set.

    Many types of classifiers have a common training structure in practice: using
    training data for the main supervised training loop but tuning certain parameters
    with a small held-out validation set.

    For some classifiers (naive Bayes, MIRA), you will need to return the parameters'
    values after traning and tuning step.

    To make the classifier generic to multiple problems, the data should be represented
    as lists of Counters containing feature descriptions and their counts.
    """

    self.features = list(set([ f for datum in trainingData for f in datum.keys() ]))
    self.id3_tree = self.id3_training(trainingLabels, trainingData)

    while 1:
        flag = self.id3_pruning(self.id3_tree, validationLabels, validationData)
        if flag == 0:
            break


  def classify(self, data):
    """
    This function returns a list of labels, each drawn from the set of legal labels
    provided to the classifier upon construction.

    To make the classifier generic to multiple problems, the data should be represented
    as lists of Counters containing feature descriptions and their counts.
    """
    guesses = []
    for i in range(len(data)):
        guesses.append(self.id3_classify(self.id3_tree, data[i]))
    return guesses


