import numpy as np
from sklearn import tree
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
import matplotlib.pyplot as plt
import graphviz
from io import StringIO
import pydotplus
import pickle


class DecisionTreeCode(object):
    """
    决策树源码
    """
    @staticmethod
    def majorty_cnt(class_list):
        """
        :param class_list:
        :return:
        """
        class_count = dict()
        for vote in class_list:
            class_count[vote] = class_count.get(vote, 0) + 1
        sorted_class_count = sorted(class_count.items(), key=lambda d: d[1], reverse=True)
        return sorted_class_count[0][0]

    @staticmethod
    def calc_entropy(arr):
        """
        计算数组的熵
        -sum(p(x) * log2p(x))
        :param arr:
        :return:
        """
        entropy = 0
        label_dict = {}
        for label in arr:
            label_dict[label] = label_dict.get(label, 0) + 1

        for k, v in label_dict.items():
            p_value = v / len(arr)
            entropy -= p_value * np.log2(p_value)
        return entropy

    @staticmethod
    def split_data_by_feature(dataset, axis, value):
        """
        按特征划分数据集：按指定的特征（分叉特征）（第axis列的特征），获取按特征划分后的子数据集(分叉树)
        :param dataset: 二维数据集
        :param axis: 列number
        :param value: 分叉特征值（按这个值进行切分）
        :return:
        """
        ret_dataset = []
        for feat_vec in dataset:
            if feat_vec[axis] == value:
                # 去掉分叉特征
                ret_dataset.append(feat_vec[: axis] + feat_vec[axis + 1:])
        return ret_dataset

    def get_best_feature_split(self, dataset):
        """
        获取最好的特征切分
        :param dataset:
        :return:
        """
        num_features = len(dataset[0]) - 1
        base_label = [example[-1] for example in dataset]
        base_entropy = self.calc_entropy(base_label)
        best_info_gain = 0
        best_feature = -1

        for num in range(num_features):
            feat_list = [example[num] for example in dataset]
            new_entropy = 0
            for value in set(feat_list):
                sub_dataset = self.split_data_by_feature(dataset, num, value)
                prob = len(sub_dataset) / len(dataset)
                sub_label = [example[-1] for example in sub_dataset]
                new_entropy += prob * self.calc_entropy(sub_label)
            info_gain = base_entropy - new_entropy
            if (info_gain > best_info_gain):
                best_info_gain = info_gain
                best_feature = num
        return best_feature, best_info_gain

    def create_tree(self, dataset, labels):
        """
        建树
        :param dataset:
        :param labels:
        :return:
        """
        class_list = [example[-1] for example in dataset]
        if class_list.count(class_list[0]) == len(class_list):
            return class_list[0]
        if len(dataset[0]) == 1:
            return self.majorty_cnt(class_list)

        best_feat, best_feat_gain = self.get_best_feature_split(dataset)
        best_feat_label = labels[best_feat]
        my_tree = {best_feat_label: {}}
        labels.remove(best_feat_label)

        feat_value = [example[best_feat] for example in dataset]
        for value in set(feat_value):
            sub_labels = labels[:]
            sub_dataset = self.split_data_by_feature(dataset, best_feat, value)
            my_tree[best_feat_label][value] = self.create_tree(sub_dataset, sub_labels)
        return my_tree

    def predict(self, input_tree, feat_label, test_value):
        """

        :param input_tree: 决策树
        :param feat_label: 特征
        :param test_value: 测试的x数组
        :return: 测试的tet_value的分类结果
        """
        first_label = list(input_tree.keys())[0]
        second_dict = input_tree[first_label]
        feat_index = feat_label.index(first_label)
        for k, v in second_dict.items():
            if test_value[feat_index] == k:
                if isinstance(v, dict):
                    class_label = self.predict(v, feat_label, test_value)
                else:
                    class_label = v
                return class_label


class DecisionTreeSklearn(object):
    """
    决策树 sklearn
    """
    @staticmethod
    def model_save(input_tree, file_name):
        """
        模型存储
        :param input_tree:
        :param file_name:
        :return:
        """
        with open(file_name, 'wb') as fw:
            pickle.dump(input_tree, fw)

    @staticmethod
    def model_load(file_name):
        """
        模型读取
        :param file_name:
        :return:
        """
        with open(file_name, 'rb') as fr:
            return pickle.load(fr)

    def model_tree(self, dataset, labels, test_value=None):
        """
        建树
        :param dataset:
        :param labels:
        :return:
        """
        clf = tree.DecisionTreeClassifier()
        x = [example[: -1] for example in dataset]
        y = [example[-1] for example in dataset]
        model = clf.fit(x, y)

        # 模型存储
        self.model_save(model, file_name='./data/model/tree')

        # 模型读取
        model_read = self.model_load('./data/model/tree')

        #  plot print
        # tree.plot_tree(model_read)
        # plt.show()

        # plot print
        dot_data = tree.export_graphviz(model, out_file=None,
                                        feature_names=labels,
                                        class_names=list(set(y)),
                                        filled=True, rounded=True)
        graph = graphviz.Source(dot_data)
        graph.view()

        # plot save
        # dot_data = StringIO()
        # tree.export_graphviz(model, out_file=dot_data, feature_names=labels,
        #                                 class_names=list(set(y)),
        #                                 filled=True, rounded=True)
        # graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
        # graph.write_pdf('./data/tree.pdf')

        if test_value:
            return model.predict([test_value])[0]


def get_birds_data():
    dataset = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
    # labels = ['no surfacing', 'flippers']
    labels = ["不浮出水面是否可以生存", "是否有脚蹼"]

    return dataset, labels


def get_lenses_data():
    file = './data/machinelearninginaction/Ch03/lenses.txt'
    with open(file, 'r') as f:
        file_lines = [info for info in f.readlines()]
        file_arr = [info.strip().split('\t') for info in file_lines]

    lenses_label = ['年龄', '处方', '散光', '撕裂率']
    return file_arr, lenses_label


def code_run(dataset, labels, test_data=None):
    """
    code的主程序
    :param dataset:
    :param labels:
    :param test_data:
    :return:
    """
    decisiontree_code = DecisionTreeCode()
    # # best_feature, best_info_gain = desiontree_code.get_best_feature_split(dataset)
    # # print(best_feature, best_info_gain)
    #
    feat_label = labels[:]

    code_tree = decisiontree_code.create_tree(dataset, labels)
    print(code_tree)

    if test_data and isinstance(test_data, list) and len(test_data) > 1:
        for test_info in test_data:
            print('-' * 30)
            pred = decisiontree_code.predict(code_tree, feat_label, test_value=test_info)
            print(test_info)
            print(pred)


def sklearn_run(dataset, labels, test_data=None):
    """
    sklearn主程序
    :param dataset:
    :param labels:
    :param test_data:
    :return:
    """
    decisiontree_sklearn = DecisionTreeSklearn()
    if test_data and isinstance(test_data, list) and len(test_data) > 1:
        for test_info in test_data:
            print('-' * 30)
            pred = decisiontree_sklearn.model_tree(dataset, labels, test_info)
            print(test_info)
            print(pred)

    decisiontree_sklearn.model_tree(dataset, labels)


def run():
    """
    主程序
    :return:
    """
    #  初始化数据
    # 鸟类数据
    dataset, labels = get_birds_data()
    test_data = [[1, 0], [1, 1]]

    # 是否佩戴眼镜
    dataset, labels = get_lenses_data()
    print(dataset)

    # print('源码')
    # code_label = labels[:]
    # code_run(dataset, code_label, test_data=None)
    #
    # print('*' * 30)

    print('sklearn')
    # 类型编码
    enc = OrdinalEncoder()
    dataset_normal = enc.fit_transform(dataset)

    dataset_train = []
    for data1, data2 in zip(dataset_normal, dataset):
        dataset_train.append(list(data1[: -1]) + [data2[-1]])

    print(dataset_train)

    sklearn_label = labels[:]
    sklearn_run(dataset_train, sklearn_label, test_data=None)


if __name__ == '__main__':
    run()
