# coding=utf-8
from math import log
import operator


# calculate the shannon entropy
def cal_shannon_ent(data_set):
    num_len = len(data_set)
    label_count = {}
    for vec in data_set:
        label = vec[-1]

        # if the label did't appear in label_count, create it.
        if label not in label_count.keys():
            label_count[label] = 0

        # label appear once.
        label_count[label] += 1

    shannon_ent = 0.0
    for key in label_count:
        # calculate the appear probability of this label.
        prob = float(label_count[key]) / num_len
        shannon_ent -= prob * log(prob, 2)

    return shannon_ent


def create_data_set():
    data_set = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'],
                [0, 1, 'no'], [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    return data_set, labels


# split a data_set, need feature's index and feature's val.
def split_data_set(data_set, axis, val):
    res = []
    for vec in data_set:
        if vec[axis] == val:
            # skip the current feature val.
            red_vec = vec[:axis]
            red_vec.extend(vec[axis + 1:])
            res.append(red_vec)
    return res


# choose a best feature to split
def choose_feature(data_set):
    # -1 because last val is label.
    features_len = len(data_set[0]) - 1

    # calculate the entropy of the data set.
    base_ent = cal_shannon_ent(data_set)

    best_info_gain = 0.0
    best_feature = -1
    for i in range(features_len):  # scan every feature, find the best one
        # get all feature in data set.
        feature_list = [x[i] for x in data_set]
        # get feature set.
        feature_set = set(feature_list)

        cur_ent = 0.0
        for feature in feature_set:
            sub_data_set = split_data_set(data_set, i, feature)
            # calculate every feature's shannon entropy.
            prob = len(sub_data_set) / float(len(data_set))
            cur_ent += prob * cal_shannon_ent(sub_data_set)

        # calculate the information gain.
        # It is the difference between base entropy and current one.
        info_gain = base_ent - cur_ent

        # select the maximum info gain.
        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feature = i
    return best_feature


# select the uppermost element in list.
# return its val and appear count.
def majority_cnt(class_list):
    class_count = {}
    for vote in class_list:
        if vote not in class_count.keys():
            class_count[vote] = 0
        class_count[vote] += 1
    sorted_class_count = sorted(class_count.iteritems(),
                                key=operator.itemgetter(1), reverse=True)
    return sorted_class_count[0][0]


def create_decision_tree(data_set, labels):
    class_list = [x[-1] for x in data_set]

    # all classes are same, stop.
    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]

    # No additional data is available for decision.
    # Select the uppermost class.
    if len(data_set) == 1:
        return majority_cnt(class_list)

    # choose the feature to classify this time
    best_feature = choose_feature(data_set)
    best_feature_label = labels[best_feature]

    # decision tree to return.
    decision_tree = {best_feature_label: {}}

    # remove the selected feature in labels.
    del(labels[best_feature])

    # Get all values of selected feature.
    feature_val = [x[best_feature] for x in data_set]
    feature_val_set = set(feature_val)
    for val in feature_val_set:
        sub_labels = labels[:]  # protect base labels.
        decision_tree[best_feature_label][val] = create_decision_tree(
            split_data_set(data_set, best_feature, val), sub_labels)

    return decision_tree


# classify by a decision tree.
# this tree should be already trained by test data.
def classify(tree, labels, vec):
    first_str = tree.keys()[0]
    second_dic = tree[first_str]
    feat_index = labels.index(first_str)
    class_label = None
    for key in second_dic.keys():
        if vec[feat_index] == key:
            if type(second_dic[key]).__name__ == 'dict':
                class_label = classify(second_dic[key], labels, vec)
            else:
                class_label = second_dic[key]
    return class_label


# store a tree in disk.
def store_tree(tree, filename):
    import pickle
    fw = open(filename, "w")
    pickle.dump(tree, fw)
    fw.close()


# get a tree from disk.
def grab_tree(filename):
    import pickle
    fr = open(filename)
    return pickle.load(fr)
