# coding=utf-8
from math import log
import operator


def majority_cnt(class_list):
    class_count = {}
    for vote in class_list:
        if vote not in class_count.keys():
            class_count[vote] = 0
        class_count[vote] += 1
    sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reversed=True)
    return sorted_class_count[0][0]


"""
得到决策树, 需要样本集, 每个属性的名字, 那些是连续的
"""


def create_tree(data_set, labels, series):
    class_list = [x[-1] for x in data_set]
    if class_list.count(class_list[0]) == len(class_list):  # 剩下的样本是一样的分类
        return class_list[0]
    best_feat, positive, negative, mid_val = choose_best_feature_to_split(data_set, series)
    best_feat_label = labels[best_feat]
    if not series[best_feat]:
        my_tree = {best_feat_label: {}}
        del (labels[best_feat])
        del (series[best_feat])
        feat_values = [x[best_feat] for x in data_set]
        unique_vals = set(feat_values)
        for value in unique_vals:
            sub_label = labels[:]
            my_tree[best_feat_label][value] = create_tree(split_data_set(data_set, best_feat, value), sub_label, series)
    else:
        print "mid_val = " + str(mid_val)
        my_tree = {best_feat_label + str(mid_val): {}}
        my_tree[best_feat_label + str(mid_val)]['p'] = create_tree(positive, labels, series)
        my_tree[best_feat_label + str(mid_val)]['n'] = create_tree(negative, labels, series)
    return my_tree


"""
计算香农熵, 需要样本集, 用最后一行的分类计算(好瓜 ? 坏瓜?)
"""


def calc_shannon_ent(data_set):
    num_entries = len(data_set)
    label_counts = {}
    for featVec in data_set:
        current_label = featVec[-1]
        if current_label not in label_counts.keys():
            label_counts[current_label] = 0
        label_counts[current_label] += 1
    shannon_ent = 0.0
    for key in label_counts:  # 香农熵ent = -sum(probably[1-y] * log2(probably[1-y])
        prob = float(label_counts[key]) / num_entries
        shannon_ent -= prob * log(prob, 2)
    return shannon_ent


"""
离散值按axis下标得到value值得样本集
"""


def split_data_set(data_set, axis, value):
    ret_data_set = []
    for feat_vec in data_set:  # 去掉axis这一列并且把这一列中属性值为value的加入到集合中
        if feat_vec[axis] == value:
            reduced_feat_vec = feat_vec[:axis]
            reduced_feat_vec.extend(feat_vec[axis + 1:])
            ret_data_set.append(reduced_feat_vec)
    return ret_data_set


"""
连续值按axis下标得到比value值大的或者小的样本集(大小取决于larger)
"""


def split_data_set_series(data_set, axis, value, larger):
    ret_data_set = []
    for feat_vec in data_set:  # 连续值不去掉axis这一列   分成+-两个集合分别表示该列属性值比value大和比value小的
        if larger and float(feat_vec[axis]) > float(value):
            ret_data_set.append(feat_vec)
        elif not larger and float(feat_vec[axis]) <= float(value):
            ret_data_set.append(feat_vec)
    return ret_data_set


"""
输入 : 样本集， 那些样本是连续的
返回 : 信息增益最大的属性的下标, 如果是连续值, 外加分成的+-两个分组
"""


def choose_best_feature_to_split(data_set, series):
    num_features = len(data_set[0]) - 1
    base_entropy = calc_shannon_ent(data_set)
    best_info_gain = 0.0
    best_feature = -1
    positive_ret = []
    negative_ret = []
    mid_val_ret = 0
    for i in range(num_features):
        feat_list = [temp[i] for temp in data_set]
        if not series[i]:  # 离散值
            unique_vals = set(feat_list)
            new_entropy = 0.0
            for value in unique_vals:  # 信息增益Gain(D) = Ent(D) - sum(len(D[i]) / len(D) * Ent(D[i]))
                sub_data_set = split_data_set(data_set, i, value)
                prob = len(sub_data_set) / float(len(data_set))
                new_entropy += prob * calc_shannon_ent(sub_data_set)
            info_gain = base_entropy - new_entropy
            if info_gain > best_info_gain:  # 选择最大的信息增益, 并且记录下标
                best_info_gain = info_gain
                best_feature = i
        else:  # 连续值
            temp = [float(temp_sort) for temp_sort in feat_list]
            temp = sorted(temp)
            temp_len = len(temp)
            info_gain = 0.0
            for j in range(temp_len - 1):  # 连续值计算信息增益,在x个数中找到x-1个中间值分成+-两个集合并得到所有分法中最大的信息增益
                mid_val = (temp[j] + temp[j + 1]) / 2.0
                positive = split_data_set_series(data_set, i, mid_val, True)
                negative = split_data_set_series(data_set, i, mid_val, False)
                temp_ent = 1.0 * len(positive) / len(data_set) * calc_shannon_ent(positive) + 1.0 * len(negative) / len(data_set) * calc_shannon_ent(negative)
                info_gain = max(base_entropy - temp_ent, info_gain)
                if info_gain > best_info_gain:  # 记录最大的信息增益的下标和此时分成的两个组
                    best_info_gain = info_gain
                    best_feature = i
                    positive_ret = positive
                    negative_ret = negative
                    mid_val_ret = mid_val
    return best_feature, positive_ret, negative_ret, mid_val_ret


class DecisionTree(object):
    data_set = [[]]
    label = []

    def __init__(self, data_set, label):
        self.data_set = data_set
        self.label = label

    def __init__(self, data_type):
        if data_type == 0:
            self.data_set, self.label = self.create_data_fish()
        elif data_type == 1:
            self.data_set, self.label = self.create_data_watermelon2_0()
        else:
            assert False

    @staticmethod
    def create_data_fish():
        data_set = [[1, 1, u'yes'],
                    [1, 1, u'yes'],
                    [1, 0, 'no'],
                    [0, 1, 'no'],
                    [0, 1, 'no']]
        label = ['no surfacing', 'flippers']
        return data_set, label

    @staticmethod
    def create_data_watermelon2_0():
        data_set = [[u'青绿', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', u'好瓜'],
                    [u'乌黑', u'蜷缩', u'沉闷', u'清晰', u'凹陷', u'硬滑', u'好瓜'],
                    [u'乌黑', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', u'好瓜'],
                    [u'青绿', u'蜷缩', u'沉闷', u'清晰', u'凹陷', u'硬滑', u'好瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', u'好瓜'],
                    [u'青绿', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'软粘', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'稍糊', u'稍凸', u'软粘', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'硬滑', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'沉闷', u'稍糊', u'稍凸', u'硬滑', u'坏瓜'],
                    [u'青绿', u'硬挺', u'清脆', u'清晰', u'平坦', u'软粘', u'坏瓜'],
                    [u'浅白', u'硬挺', u'清脆', u'模糊', u'平坦', u'硬滑', u'坏瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'模糊', u'平坦', u'软粘', u'坏瓜'],
                    [u'青绿', u'稍蜷', u'浊响', u'稍糊', u'凹陷', u'硬滑', u'坏瓜'],
                    [u'浅白', u'稍蜷', u'沉闷', u'稍糊', u'凹陷', u'硬滑', u'坏瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'软粘', u'坏瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'模糊', u'平坦', u'硬滑', u'坏瓜'],
                    [u'青绿', u'蜷缩', u'沉闷', u'稍糊', u'稍凸', u'硬滑', u'坏瓜'],
                    ]
        label = [u'色泽', u'根蒂', u'敲声', u'纹理', u'脐部', u'触感']
        series = [False, False, False, False, False, False]
        assert len(data_set[0]) - 1 == len(label)
        assert len(data_set[0]) - 1 == len(series)
        return data_set, label, series

    @staticmethod
    def create_data_watermelon3_0():
        data_set = [[u'青绿', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', '0.697', '0.460', u'好瓜'],
                    [u'乌黑', u'蜷缩', u'沉闷', u'清晰', u'凹陷', u'硬滑', '0.774', '0.376', u'好瓜'],
                    [u'乌黑', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', '0.634', '0.264', u'好瓜'],
                    [u'青绿', u'蜷缩', u'沉闷', u'清晰', u'凹陷', u'硬滑', '0.608', '0.318', u'好瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'清晰', u'凹陷', u'硬滑', '0.556', '0.215', u'好瓜'],
                    [u'青绿', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'软粘', '0.403', '0.237', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'稍糊', u'稍凸', u'软粘', '0.481', '0.149', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'硬滑', '0.437', '0.211', u'好瓜'],
                    [u'乌黑', u'稍蜷', u'沉闷', u'稍糊', u'稍凸', u'硬滑', '0.666', '0.091', u'坏瓜'],
                    [u'青绿', u'硬挺', u'清脆', u'清晰', u'平坦', u'软粘', '0.243', '0.267', u'坏瓜'],
                    [u'浅白', u'硬挺', u'清脆', u'模糊', u'平坦', u'硬滑', '0.245', '0.057', u'坏瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'模糊', u'平坦', u'软粘', '0.343', '0.099', u'坏瓜'],
                    [u'青绿', u'稍蜷', u'浊响', u'稍糊', u'凹陷', u'硬滑', '0.639', '0.161', u'坏瓜'],
                    [u'浅白', u'稍蜷', u'沉闷', u'稍糊', u'凹陷', u'硬滑', '0.657', '0.198', u'坏瓜'],
                    [u'乌黑', u'稍蜷', u'浊响', u'清晰', u'稍凸', u'软粘', '0.360', '0.370', u'坏瓜'],
                    [u'浅白', u'蜷缩', u'浊响', u'模糊', u'平坦', u'硬滑', '0.593', '0.042', u'坏瓜'],
                    [u'青绿', u'蜷缩', u'沉闷', u'稍糊', u'稍凸', u'硬滑', '0.719', '0.103', u'坏瓜'],
                    ]
        label = [u'色泽', u'根蒂', u'敲声', u'纹理', u'脐部', u'触感', u'密度', u'含糖率']
        series = [False, False, False, False, False, False, True, True]
        assert len(data_set[0]) - 1 == len(label)
        assert len(data_set[0]) - 1 == len(series)
        return data_set, label, series
