# -*- encoding: utf-8 -*-
"""
ID3算法：
1. 计算训练数据原始熵
2. 分别根据每个特征划分数据集后计算熵
3. 根据信息增益最大找出最优划分特征
4. 以此类推递归计算每个子数据集并创建递归树，直到分类标签只有一个值
5. 测试递归树
"""
from math import log


def calculate_entropy(dataset):
    """计算数据集的熵"""
    # 获取分类标签
    classify = [data[-1] for data in dataset]
    cls_set = set(classify)
    # 初始化熵
    entropy = 0.0
    for cls in cls_set:
        prob = classify.count(cls) / len(classify)
        entropy -= prob * log(prob, 2)
    return entropy


def split_dataset(dataset, index, value):
    """根据特征值划分数据集"""
    sub_dataset = []
    for data in dataset:
        if data[index] == value:
            sub_dataset.append(data[:index] + data[index + 1:])
    return sub_dataset


def find_best_feature(dataset):
    """找出最好的特征，使得根据该特征划分后的信息增益达到最大"""
    information_gain = 0.0
    best_feature_index = -1
    feature_count = len(dataset[0]) - 1
    for i in range(feature_count):
        entropy = 0.0
        feature_set = set([data[i] for data in dataset])
        for f in feature_set:
            sub_dataset = split_dataset(dataset, i, f)
            sub_entropy = calculate_entropy(sub_dataset)
            entropy += len(sub_dataset) / len(dataset) * sub_entropy
        new_information = calculate_entropy(dataset) - entropy
        if new_information > information_gain:
            information_gain = new_information
            best_feature_index = i
    return best_feature_index
