# __author__ = 'heyin'
# __date__ = '2018/12/27 9:11'
import numpy as np
import pandas as pd


def cal_ent(dataset):
    """计算初始信息熵，即最大信息熵，只需要考虑最终的分类结果"""
    n = dataset.shape[0]  # 多少行数据
    iset = dataset.iloc[:, -1].value_counts()  # 这里是series有value_counts方法
    # print(iset.index)
    p = iset / n
    # print(p)
    ent = (-p * np.log2(p)).sum()
    # print(ent)
    return ent


def create_data_set():
    row_data = {'no surfacing': [1, 1, 1, 0, 0], 'flippers': [1, 1, 0, 1, 1], 'fish': ['yes', 'yes', 'no', 'no', 'no']}
    data_set = pd.DataFrame(row_data)
    return data_set


def best_split(dataset):
    """根据信息增益找出最佳数据集切分列，即选出信息增益最大的特征作为根节点"""
    base_ent = cal_ent(dataset)
    best_gain = 0  # 初始化信息增益
    axis = -1
    for i in range(dataset.shape[1] - 1):
        levels = dataset.iloc[:, i].value_counts().index
        ents = 0
        for j in levels:
            child_set = dataset[dataset.iloc[:, i] == j]
            ent = cal_ent(child_set)
            ents += (child_set.shape[0] / dataset.shape[0]) * ent
        info_gain = base_ent - ents
        if info_gain > best_gain:  # 当前信息增益大于之前的信息增益，将此赋值给最大增益
            best_gain = info_gain
            axis = i  # 当前列为求得最大信息增益的特征
    return axis


def my_split(dataset, f_axis, value):
    col = dataset.columns[f_axis]
    redataset = dataset.loc[dataset[col] == value, :].drop(col, axis=1)
    return redataset


def create_tree(dataset):
    """基于最大信息增益切分数据集，对贵构建决策树"""
    featlist = list(dataset.columns)
    classlist = dataset.iloc[:, -1].value_counts()
    print(classlist)
    # 判断最多标签数目是否等于数据集行数

def skdtc():
    """sklearn直接实现决策树"""
    pass

if __name__ == '__main__':
    # 创建数据集
    dataset = create_data_set()
    # print(dataset)
    # ent = cal_ent(dataset)
    best_f_axis = best_split(dataset)
    # print(best_f_axis)
    # print(dataset.columns[best_f_axis])
    # print(my_split(dataset, best_f_axis, 1))

    create_tree(dataset)