#!/usr/bin/env.python
# -*- coding: utf-8 -*-
from math import log
import operator
import matplotlib.pyplot as plt


def creat_data():                                      
    '''创建测试数据集'''
    data = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    return data, labels

def calculate_entropy(data_set):                        
    '''计算熵值函数'''
    num_entries = len(data_set)                         # 计算这类有多少数据(数据的行数)
    labels_count = {}                                   # 创建空字典保存类别计数
    for feat_vec in data_set:
        current_labels = feat_vec[-1]                   # 计算类别计数， 统计每个类别所含个数
        if current_labels not in labels_count.keys():
            labels_count[current_labels] = 0
        labels_count[current_labels] += 1
    shannon_ent = 0.0
    for key in labels_count:                            # 通过公式计算熵值
        prob = float(labels_count[key])/num_entries
        shannon_ent -= prob * log(prob, 2)
    return shannon_ent

def split_data(data_set, axis, values):                 # 划分数据集函数
    ret_data = []
    for fec in data_set:
        if fec[axis] == values:
            reduce_feat = fec[:axis]
            reduce_feat.extend(fec[axis+1:])
            ret_data.append(reduce_feat)
    return ret_data

def choose_best_split(data_set):
    num_features = len(data_set[0]) - 1                 # 判断数据有多少特征属性。
    base_entropy = calculate_entropy(data_set)          # 计算整个数据集的熵值
    base_info_gain = 0.0
    base_feature = -1
    for i in range(num_features):
        feat_list = [example[i] for example in data_set]     # 将数据集中的第i个属性写到新的列表当中
        unique_values = set(feat_list)                       # 用set方法生成属性集合
        new_entropy = 0.0
        for value in unique_values:
            sub_data = split_data(data_set, i, value)
            prob = len(sub_data)/len(data_set)
            new_entropy += prob * calculate_entropy(sub_data)
        info_gain = base_entropy - new_entropy
        if info_gain > base_info_gain:
            base_info_gain = info_gain
            base_feature = i
    return base_feature

def major_cnt(class_list):
    class_count = {}
    for vote in class_list:
        if vote not in class_list.key():
            class_count[vote] = 0
            class_count[vote] += 1
    sorted_class_count = sorted(class_count, key=operator.itemgetter(1), reverse=True)
    return sorted_class_count


data_set = creat_data()[0]
labels = creat_data()[1]
def creat_tree(data_set, labels):
    class_list = [example[-1] for example in data_set]
    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]
    if len(data_set[0]) == 1:
        return major_cnt(class_list)
    base_feat = choose_best_split(data_set)
    base_labels = labels[base_feat]
    my_tree = {base_labels: {}}
    del(labels[base_feat])
    feat_values = [example[base_feat] for example in data_set]
    unique_value = set(feat_values)
    for value in unique_value:

        sub_labels = labels[:]
        my_tree[base_labels][value] = creat_tree(split_data(data_set, base_feat, value), sub_labels)
    return my_tree


mytree = creat_tree(data_set, labels)
def get_num_leafs(mytree):                
    numLeafs = 0
    first_str = list(mytree.keys())[0]
    second_dict = mytree[first_str]
    for key in second_dict.keys():
        if type(second_dict[key]).__name__ == "dict":
            numLeafs += get_num_leafs(second_dict[key])
        else:
            numLeafs += 1
    return numLeafs

def get_tree_depth(mytree):
    max_depth = 0
    first_str = list(mytree.keys())[0]
    second_dict = mytree[first_str]
    for key in second_dict.keys():
        if type(second_dict[key]).__name__ == "dict":
            this_depth = 1 + get_tree_depth(second_dict[key])
        else:
            this_depth = 1
        if this_depth > max_depth:
            max_depth = this_depth
    return max_depth

decisionNode = dict(boxstyle='sawtooth', fc='0.8')
leafNode = dict(boxstyle='round4', fc='0.8')
arrow_args = dict(arrowstyle="<-")
def plot_node(nodetxt, centrpt, parentpt, node_type):
    creat_plot.ax1.annotate(nodetxt, xy=parentpt, xycoords = 'axes fraction', xytext = centrpt,
                            textcoords = 'axes fraction', va='center', ha='center', bbox=node_type,
                            arrowprops=arrow_args)

# def creatplot():
#     fig = plt.figure(1, facecolor='white')
#     fig.clf()
#     creatplot.ax1 = plt.subplot(111, frameon=False)
#     plot_node('0', (0.5, 0.1), (0.1, 0.5), decisionNode)
#     plot_node('0', (0.8, 0.1), (0.3, 0.8), leafNode)
#     plt.show()
#
# creatplot()
def plot_mid_text(cntrpt, parentpy, txtstring):
    x_mid = (parentpy[0]-cntrpt[0])/2.0 + cntrpt[0]
    y_mid = (parentpy[1]-cntrpt[1])/2.0 + cntrpt[1]
    creat_plot.ax1.text(x_mid, y_mid, txtstring)

def plot_tree(mytree, parentpt, node_txt):
    num_leafs = get_num_leafs(mytree)
    depth = get_tree_depth(mytree)
    first_str = list(mytree.keys())[0]
    cntrpt = (plot_tree.xOff+(1.0+float(num_leafs))/2.0/plot_tree.totalW, plot_tree.yOff)
    plot_mid_text(cntrpt, parentpt, node_txt)
    plot_node(first_str, cntrpt, parentpt, decisionNode)
    second_dict = mytree[first_str]
    plot_tree.yOff = plot_tree.yOff - 1.0/plot_tree.totalD
    for key in second_dict.keys():
        if type(second_dict[key]).__name__ == 'dict':
            plot_tree(second_dict[key], cntrpt, str(key))
        else:
            plot_tree.xOff = plot_tree.xOff + 1.0/plot_tree.totalW
            plot_node(second_dict[key], (plot_tree.xOff, plot_tree.yOff), cntrpt, leafNode)
            plot_mid_text((plot_tree.xOff, plot_tree.yOff), cntrpt, str(key))
    plot_tree.yOff = plot_tree.yOff + 1.0/plot_tree.totalD

def creat_plot(in_tree):
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    creat_plot.ax1 = plt.subplot(111, frameon=False, **axprops)
    plot_tree.totalW = float(get_num_leafs(in_tree))
    plot_tree.totalD = float(get_tree_depth(in_tree))
    plot_tree.xOff = -0.5/plot_tree.totalW; plot_tree.yOff = 1.0
    plot_tree(in_tree, (0.5, 1.0), '')
    plt.show()


def retrieveTree(i):                       # data
    listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
                  {'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
                  ]
    return listOfTrees[i]

