import copy
import sets

# Decision node in the decision tree
class DecisionNode():
  def __init__(self):
    self.attribute = None
    self.attribute_vals = []
    self.children = []
    self.label = None

  # Set the attribute for the split at this node
  def set_attribute(self, attribute):
    self.attribute = attribute

  # Set the output label
  def set_label(self, label):
    self.label = label

  # Add a child
  def add_child(self, decision_node, attribute_value):
    self.attribute_vals.append(attribute_value)
    self.children.append(decision_node) 

  # Get the decision path
  def decide(self, input_vector):
    if self.attribute in input_vector.keys():
      attribute_value = input_vector[self.attribute]
      attribute_value_idx = self.attribute_vals.index(attribute_value)
      return self.children[attribute_value_idx].decide(input_vector) 
    # Else return the label set at this node.
    return self.label

  # Pretty Print the tree
  def print_tree(self, level_prefix):
    if len(self.children) == 0:
      display_str = 'Prediction: "%s"' % self.label
      print level_prefix + display_str  
      return

    for idx in range(0, len(self.children)):
      print level_prefix + self.attribute + ' == ' + self.attribute_vals[idx]
      self.children[idx].print_tree(level_prefix + '    ')

# Decision tree learning/predicting algorithm.
class DecisionTree():
  def __init__(self, examples, attributes, default, attribute_chooser):
    self.examples = examples
    self.attributes = attributes
    self.default = default
    self.root_node = None
    self.attribute_chooser = attribute_chooser
    self.__learn()

  # Check if the examples are pure
  def is_pure(self, examples):
    num_unique_elements = sets.Set([example[1] for example in examples])
    return len(num_unique_elements) == 1

  # Get majority label from the examples
  def majority_label(self, examples):
    labels = [example[1] for example in examples]
    unique_labels = [element for element in sets.Set(labels)]
    counts = [labels.count(label) for label in unique_labels]
    return unique_labels[counts.index(max(counts))]      

  # Get the subset of examples that have the given value for the attribute.
  def get_subset(self, examples, attribute, attribute_value):
    example_subset = []
    for example in examples:
      if attribute in example[0] and example[0][attribute] == attribute_value:
        example_subset.append(example)

    return example_subset

  # Recurssively learn the decision rules.
  def __learn_internal(self, examples, attributes, default):
    current_node = DecisionNode()
    majority_label = None 
    if len(examples) == 0:
      current_node.set_label(default)
    elif self.is_pure(examples):
      current_node.set_label(examples[0][1])
    elif len(attributes) == 0:
      majority_label = self.majority_label(examples)
      current_node.set_label(majority_label) 
    else:
      best_attribute = self.attribute_chooser.choose_best_attribute(
          examples, attributes)
      current_node.set_attribute(best_attribute)
      attribute_subset = copy.deepcopy(attributes)
      del attribute_subset[best_attribute]
      # Create a subtree for each values of the best attribute
      for attribute_value in attributes[best_attribute]:
        subset_examples = self.get_subset(examples,
            best_attribute,
            attribute_value)
        child_node = self.__learn_internal(subset_examples,
            attribute_subset,
            majority_label)
        current_node.add_child(child_node, attribute_value)

    # Return the created tree
    return current_node

  # Learn the decision tree rules from the examples
  def __learn(self):
    self.root_node = self.__learn_internal(self.examples,
        self.attributes, self.default)

  # Decide the output label for the input vector
  def decide(self, input_vector):
    return self.root_node.decide(input_vector)

  # Print the tree
  def print_tree(self):
    self.root_node.print_tree("")
