from math import log2
import tree_plotter


class ID3Tree(object):
    def __init__(self):
        self.decision_tree = {}  # 决策树
        self.dataset = []  # 数据集
        self.labels = []  # 标签集

    def set_dataset(self, dataset, labels):
        """
        设置数据集和标签集

        :param dataset: 数据集
        :param labels: 标签集
        """
        self.dataset = dataset
        self.labels = labels

    def train(self):
        """
        训练决策树
        """
        self.decision_tree = self.build_tree(self.dataset, self.labels)

    def build_tree(self, dataset, labels):
        """
        递归构建决策树

        :param dataset: 数据集
        :param labels: 标签集
        """
        class_list = [data[-1] for data in dataset]
        if class_list.count(class_list[0]) == len(class_list):
            return class_list[0]
        if len(dataset[0]) == 1:
            return self.majority_class_label(class_list)

        best_feature_index = self.find_best_split(dataset)
        best_feature_label = labels[best_feature_index]
        tree = {best_feature_label: {}}  # 构建新的树节点
        del labels[best_feature_index]

        feature_values = [data[best_feature_index] for data in dataset]
        unique_feature_values = set(feature_values)

        for value in unique_feature_values:
            sub_labels = labels[:]
            sub_dataset = self.split_dataset(dataset, best_feature_index, value)
            sub_tree = self.build_tree(sub_dataset, sub_labels)
            tree[best_feature_label][value] = sub_tree

        return tree

    @staticmethod
    def majority_class_label(class_list):
        """
        计算出现次数最多的类别标签

        :param class_list: 类别标签列表
        :return: 出现次数最多的类别标签
        """
        items = dict([(class_list.count(i), i) for i in class_list])
        return items[max(items.keys())]

    def find_best_split(self, dataset):
        """
        计算最优特征

        :param dataset: 数据集
        :return: 最优特征索引
        """
        num_features = len(dataset[0]) - 1
        base_entropy = self.calculate_entropy(dataset)  # 基础熵
        num_samples = len(dataset)  # 样本总数
        best_info_gain = 0.0
        best_feature_index = -1  # 初始化最优特征索引

        # 遍历数据集各列, 寻找最优特征索引
        for i in range(num_features):
            feature_values = [data[i] for data in dataset]
            unique_feature_values = set(feature_values)
            split_entropy = 0.0

            # 按列和唯一值, 计算分裂后的信息熵
            for val in unique_feature_values:
                sub_dataset = self.split_dataset(dataset, i, val)
                prob = len(sub_dataset) / float(num_samples)
                split_entropy += prob * self.calculate_entropy(sub_dataset)
            info_gain = base_entropy - split_entropy  # 信息增益

            if info_gain > best_info_gain:
                best_info_gain = base_entropy - split_entropy
                best_feature_index = i

        return best_feature_index

    @staticmethod
    def split_dataset(dataset, feature_index, value):
        """
        从数据集中选取特定特征值的样本集合

        :param dataset: 数据集
        :param feature_index: 特征索引
        :param value: 特征值
        :return: 选取的样本集合
        """
        selected_samples = []
        for sample in dataset:
            if sample[feature_index] == value:
                reduced_sample = sample[:feature_index]
                reduced_sample.extend(sample[feature_index + 1:])
                selected_samples.append(reduced_sample)
        return selected_samples

    @staticmethod
    def calculate_entropy(dataset):
        """
        计算数据集的信息熵

        :param dataset: 数据集
        :return: 信息熵
        """
        num_samples = len(dataset)
        class_list = [sample[-1] for sample in dataset]
        label_counts = {}

        # 对每个分类进行计数
        for class_label in set(class_list):
            label_counts[class_label] = class_list.count(class_label)

        entropy = 0.0
        for count in label_counts.values():
            probability = count / float(num_samples)
            entropy -= probability * log2(probability)

        return entropy

    @staticmethod
    def predict(tree, dataset):
        """
        预测

        :param tree: 决策树模型
        :param dataset: 数据集
        :return: 预测结果
        """
        while type(tree).__name__ == 'dict':
            key = list(tree.keys())[0]
            tree = tree[key][dataset[key]]
        return tree


if __name__ == '__main__':
    def create_dataset():
        dataset = [[2, 1, 0, 1, 'No'],
                   [2, 1, 0, 0, 'No'],
                   [0, 1, 0, 1, 'Yes'],
                   [1, 2, 0, 1, 'Yes'],
                   [1, 0, 1, 1, 'Yes'],
                   [1, 0, 1, 0, 'No'],
                   [0, 0, 1, 0, 'Yes'],
                   [2, 2, 0, 1, 'No'],
                   [2, 0, 1, 1, 'Yes'],
                   [1, 2, 1, 1, 'Yes'],
                   [2, 2, 1, 0, 'Yes'],
                   [0, 2, 0, 0, 'Yes'],
                   [0, 1, 1, 1, 'Yes'],
                   [1, 2, 0, 0, 'No']]
        features = ['Outlook', 'Temp', 'Humidity', 'Windy']
        return dataset, features


    id3 = ID3Tree()
    dataset_train, labels_train = create_dataset()
    id3.set_dataset(dataset_train, labels_train)
    id3.train()  # 决策树训练
    print("决策树:", id3.decision_tree)
    print("预测结果:", id3.predict(id3.decision_tree, {'Outlook': 1, 'Temp': 1, 'Humidity': 0, 'Windy': 1}))
    tree_plotter.create_plot(id3.decision_tree)
