#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on 2022-04-01 09:08:04
@author: DengLibin 榆霖
@description: 决策树
'''

from math import log

import numpy as np


def calc_shannon_ent(data_set):
    """计算香农信息熵,度量数据的无序程度， 熵越高混合的数据越多,数据越无序

    Args:
        data_set (_type_): _description_
    """
    # 数据集大小
    num_entries = len(data_set)
    # label计数
    label_counts = {}
    # 变量数据集 label计数
    for feat_vec in data_set:
        # 当前行最后一列（label）
        current_label = feat_vec[-1]
        if current_label not in label_counts.keys():
            label_counts[current_label] = 0
        label_counts[current_label] += 1
    # 香农熵 = 概率*概率以2为底的对数 再累加的负数
    shannon_ent = 0.0
    # 遍历label
    for key in label_counts:
        # 当前label出现的概率
        prob = float(label_counts[key]/num_entries)
        shannon_ent -= prob * log(prob, 2)
    return shannon_ent

def create_data_set():
    """创建数据
    """
    data_set = [
        [1, 1, 'yes'],
        [1, 1, 'yes'],
        [1, 0, 'no'],
        [0, 1, 'no'],
        [0, 1, 'no'],
                ]
    # 数据第1,2列的名称
    labels = ['no surfacing', 'flippers']
    return data_set, labels

def split_data_set(data_set, axis, value):
    """划分数据集，将特征列==特征值的行返回（不包含特征列）

    Args:
        data_set:  待划分的数据集
        axis : 划分数据集的特征
        value: 特征的返回值
    """
    ret_data_set = []
    for feat_vec in data_set:
        # 特征列==特征值
        if feat_vec[axis] == value:
            # 特征列前面的列
            reduce_feat_vec = feat_vec[:axis]
            # 特征列后面的列 extend:将参数集合中的数据取出添加到集合后面
            reduce_feat_vec.extend(feat_vec[axis+1:])
            ret_data_set.append(reduce_feat_vec)
    
    return ret_data_set


def choose_best_feature_to_split(data_set):
    """选择最好的数据划分方式，选择最好的划分数据的那一列

    Args:
        data_set (_type_): _description_
    """
    # 特征数
    num_features = len(data_set[0]) - 1
    # 香农熵
    base_entropy = calc_shannon_ent(data_set)
    best_info_gain = 0.0
    # 最好的数据划分列
    best_feature = -1
    # 遍历特征列
    for i in range(num_features):
        # 当前特征列
        feat_list = [e[i] for e in data_set]
        # 去重
        unique_vals = set(feat_list)
        # 当前列的熵
        new_entropy = 0.0
        # 遍历去重后的当前特征列
        for value in unique_vals:
            # 得到当前列中值为value的那些行数据（特征列已被去除）
            sub_data_set = split_data_set(data_set, i, value)
            # 特征列 特征值为value的数据出现的概率
            prob = len(sub_data_set)/len(data_set)
            new_entropy += prob * calc_shannon_ent(sub_data_set)
        info_gain = base_entropy - new_entropy
        # 取所有列中 info_gain最大(即new_entropy最小)的那一列
        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feature = i
    return best_feature


def majority_cnt(class_list):
    """出现次数最多的元素

    Args:
        class_list (_type_): _description_

    Returns:
        _type_: _description_
    """
    class_count = {}
    # 最多投票数
    vote_max = 0
    for vote in class_list:
        # 计数
        class_count[vote] = class_count.get(vote, 0) + 1
        if class_count[vote] > vote_max:
            vote_max = class_count[vote]
            r_vote = vote
    return r_vote

def create_tree(data_set, labels):
    """创建决策树，构造决策树是很耗时的任务，但利用构造好的决策树解决分类问题则可以很快完成

    Args:
        data_set (_type_): _description_
        label (_type_): _description_
    """
    # 每一行的最后一个取出来作为分类集合
    class_list = [line[-1] for line in data_set]
    # list.count: 统计列表中某个元素的次数
    if class_list.count(class_list[0]) == len(class_list): # 所有类别相同则停止划分
        return class_list[0]
    if len(data_set[0]) == 1: # 遍历完所有特征时，返回出现次数组最多的
        return majority_cnt(class_list)
    
    # 获取划分数据最好的特征列的索引
    best_feat = choose_best_feature_to_split(data_set)
    best_feat_label = labels[best_feat]
    # 使用字典存储树信息
    my_tree = {best_feat_label:{}}
    # 删除一个label
    del(labels[best_feat])
    # 特征列的值
    feat_values = [line[best_feat] for line in data_set]
    # 去重
    unique_vals = set(feat_values)
    # 遍历
    for value in unique_vals:
        sub_labels = labels[:]
        # 递归
        my_tree[best_feat_label][value] = create_tree(split_data_set(data_set, best_feat, value), sub_labels)
    return my_tree


def label_index(labels, label):
    for index, value in enumerate(labels):
        if value == label:
            return index
    return -1

# 使用决策树预测
def tree_predict(tree, inv, labels):
    # label 特征
    key = list(tree.keys())[0]
    # 特征的索引
    i = label_index(labels, key)
    # 输入向量特征的值
    v = inv[i]
    
    c = tree[key][v]
    # 结果不是字典 则是最终结论
    if type(c)!= dict:
        return c
    return tree_predict(tree[key][v], inv, labels)

if __name__ == '__main__':
    my_data, labels = create_data_set()
    # print(split_data_set(my_data, 0, 1))
    # print(choose_best_feature_to_split(my_data))
    # 构造决策树
    # {'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}
    # 解释: 首先判断 no surfacing 特征 如果是0 则结论就是 no, 如果是1 则继续判断 flippers 特征 ，如果是0则结论是 no， 如果是1 则结论是yes
    tree = create_tree(my_data, labels)
    c = tree_predict(tree, [1, 1], ['no surfacing', 'flippers'])
    print(c)
