# 1. 导入依赖
import numpy as np
import pandas as pd
from math import log
import operator  # 执行python操作符
import pickle  # 对python对象进行序列化和反序列化

class DecisionTreeUtils:
    def __init__(self):
        pass

    # 2. 创建数据
    def create_data(self):
        # 定义数据集
        data = [[0, 0, 0, 0, 'no'],
                   [0, 0, 0, 1, 'no'],
                   [0, 1, 0, 1, 'yes'],
                   [0, 1, 1, 0, 'yes'],
                   [0, 0, 0, 0, 'no'],
                   [1, 0, 0, 0, 'no'],
                   [1, 0, 0, 1, 'no'],
                   [1, 1, 1, 1, 'yes'],
                   [1, 0, 1, 2, 'yes'],
                   [1, 0, 1, 2, 'yes'],
                   [2, 0, 1, 2, 'yes'],
                   [2, 0, 1, 1, 'yes'],
                   [2, 1, 0, 1, 'yes'],
                   [2, 1, 0, 2, 'yes'],
                   [2, 0, 0, 0, 'no']]
        labels = ['F1-AGE', 'F2-WORK', 'F3-HOME', 'F4-LOAN']
        return data, labels

    # 3.创建决策树
    # data: 数据集
    # labels: 特征标签
    # target_label: 目标标签
    def create_decision_tree(self,data, labels, target_label):
        # 3.1 判断此时数据集是否只有一种类别
        class_list = [example[-1] for example in data]
        if class_list.count(class_list[0]) == len(class_list):
            return class_list[0]    # 叶子节点，唯一的类别
        # 3.2 判断此时数据集是否已经完全纯净，只有label标签
        if len(data[0]) == 1:
            return self.majorityCount(data)    # 返回样本数最多的类别（少数服从多数）
        # 3.3 选择最优特征
        best_feature_index = self.choose_best_feature(data, labels)
        best_feature_label = labels[best_feature_index]
        newLabels = labels[:best_feature_index] + labels[best_feature_index+1:]    # 剔除最优特征
        target_label.append(best_feature_label) # 加入最优特征
        # 3.4 创建子树
        decision_tree = {best_feature_label: {}}
        # 3.5 递归创建子树
        for value in set([example[best_feature_index] for example in data]):  # 遍历最优特征的所有取值
            sub_labels = newLabels
            sub_data = self.split_data(data, best_feature_index, value)    # 划分数据集
            sub_tree = self.create_decision_tree(sub_data, sub_labels, target_label)    # 递归创建子树
            decision_tree[best_feature_label][value] = sub_tree    # 加入子树
        return decision_tree

    # 4. 返回样本数最多的类别（少数服从多数）
    def majorityCount(self,data):
        class_count = {}
        for vote in data:
            if vote[-1] in class_count:
                class_count[vote[-1]] += 1
            else:
                class_count[vote[-1]] = 1
        sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
        return sorted_class_count[0][0]

    # 5. 选择最优特征
    def choose_best_feature(self,data, labels):
        num_features = len(data[0]) - 1    # 特征数
        base_entropy = self.calc_entropy(data)    # 计算数据集的熵
        best_info_gain = 0.0    # 最优信息增益
        best_feature_index = -1    # 最优特征索引
        for i in range(num_features):    # 遍历所有特征
            feature_values = set([example[i] for example in data])    # 特征的所有取值（唯一）
            new_entropy = 0.0    # 经过该特征划分后的新熵
            for value in feature_values:    # 遍历该特征的所有取值
                sub_data = self.split_data(data, i, value)    # 划分数据集
                prob = len(sub_data) / float(len(data))    # 计算该特征取值的概率，加权平均数
                new_entropy += prob * self.calc_entropy(sub_data)    # 计算经过该特征划分后的新熵
            info_gain = base_entropy - new_entropy    # 计算信息增益
            if info_gain > best_info_gain:    # 找到最优信息增益
                best_info_gain = info_gain
                best_feature_index = i
        return best_feature_index

    # 6. 计算数据集的熵
    def calc_entropy(self,data):
        num_entries = len(data)    # 数据集的样本数
        label_counts = {}    # 标签计数
        for vote in data:
            if vote[-1] in label_counts:
                label_counts[vote[-1]] += 1
            else:
                label_counts[vote[-1]] = 1
        entropy = 0.0    # 熵
        for count in label_counts.values():
            prob = float(count) / num_entries    # 计算标签的概率
            entropy -= prob * log(prob, 2)    # 计算熵
        return entropy

    # 7. 划分数据集
    # feature_index: 特征索引，以哪一列特征为划分依据
    # value: 特征取值，根据这个值分割数据集
    def split_data(self,data, feature_index, value):
        split_data = []
        for example in data:
            if example[feature_index] == value:
                split_data.append(example)
        return split_data

