# power by wit_lyp
import pandas as pd
import numpy as np
from math import log
from graphviz import Digraph


class DecisionTree:

    # 读取Excel数据文件的位置
    data_path = ""

    # 不带序号和标题的所有数据
    data = []

    # 训练数据，只有特征（暂时没使用，后期数据量大后可扩展）
    train_data = []

    # 训练数据的标签集合
    train_label = []

    # 特征及特征可能出现的情况
    feature_map = {}

    # 所有可能出现的标签值
    label_map = []

    # 所有标签名，对应Excel表头
    feature_name = {}

    # 绘制决策树所需节点颜色
    feature_color = {}

    def __init__(self, data_path):
        self.data_path = data_path
        self.data = self.load_dataset()
        self.train_data = self.data[:, :-1]
        self.train_label = self.data[:, -1]
        color = ["red", "blue", "yellow", "orange", "black", "pink", "green"]
        self.feature_color = dict(zip([i for i in range(len(self.train_data[0]))],
                                      [color[i] for i in range(len(self.train_data[0]))]))
        self.feature_map = self.load_feature_map()
        self.label_map = self.load_label_map()
        self.feature_name = self.load_feature_name()

        self.tree = self.createTree(self.data, [i for i in self.feature_map.keys()])

# ####################### 前期数据准备 ######################### #
    # 加载除编号外所有数据
    def load_dataset(self):
        df = pd.read_excel(self.data_path)
        return df.to_numpy()[:, 1:]

    # 加载特征名
    def load_feature_name(self):
        name = list(pd.read_excel(self.data_path).keys()[1:-1])
        re = dict(zip([i for i in range(len(name))], name))
        return re

    # 加载特征，以及所有可能出现的特征
    def load_feature_map(self):
        feature = list(pd.read_excel(self.data_path).keys())[1:-1]
        feature_map = dict(zip([i for i in range(len(feature))],
                       [[] for i in range(len(feature))]))

        for i in range(len(feature)):
            for j in self.data[:, i]:
                if j not in feature_map[i]:
                    feature_map[i].append(j)

        return feature_map

    # 加载所有标签种类
    def load_label_map(self):
        label_map = []
        for i in self.train_label:
            if i not in label_map:
                label_map.append(i)
        return label_map
# ############################################################## #

    # 生成决策树
    """
    @:param subDataset (ndarray类型) 子决策树所包含的样本
    @:param restFeatures (List类型) 剩下未划分的特征
    @:param currentFeatureType (int类型) 当前划分的特征类型
    @:param currentFeFeatureName (string类型) 当前划分的特征类型名
    
    @:return tempTree (dict类型) 决策树
    """
    def createTree(self, subDataset, restFeatures, currentFeatureType=None, currentFeatureName=None):

        dataset = subDataset
        featureList = restFeatures.copy()

        # 当前节点无数据时，直接通过整体数据集判断最有可能出现的标签值
        if len(subDataset) == 0:
            result = dict(zip(self.label_map, [0 for i in range(len(self.label_map))]))
            for item in self.data:
                if item[currentFeatureType] == currentFeatureName:
                    result[item[-1]] += 1
            maxValue = max(result.values())
            for item in result:
                if result[item] == maxValue:
                    return item

        # 当某一属性对应的标签值全部相同时，直接返回标签值，可以以此方法剪枝，降低决策树的复杂度
        if np.sum(subDataset[:, -1] == subDataset[:,-1][0]) == len(subDataset):
            return subDataset[:,-1][0]

        # 当只剩下最后一个特征，返回单节点树
        if len(featureList) == 0:
            resultList = []
            for i in self.label_map:
                resultList.append(np.sum(dataset[:, -1] == i))
            resultList = np.array(resultList)
            maxPos = resultList.argmax()
            return self.label_map[maxPos]

        # 计算当前所剩余的所有特征的信息增益
        gainList = []
        for i in featureList:
            gainList.append(self.calInfoGain(dataset, i))

        # 找到最大信息增益特征，并且更新剩余特征表
        gainList = np.array(gainList)
        maxGainPos = gainList.argmax()
        maxFeatureType = featureList[maxGainPos]
        del featureList[maxGainPos]

        tempTree = {}
        tempTree["feature"] = maxFeatureType

        # 以最大信息增益特征为下一节点，对当前特征所有选项递归创建子树
        for i in self.feature_map[maxFeatureType]:
            tempTree[i] = self.createTree(self.splitDataset(dataset, i, maxFeatureType), featureList, maxFeatureType, i)

        return tempTree

    """
    @:param subDataset (ndarray类型) 将要划分的数据集
    @:param subFeatureName (string类型) 将要划分的特征名
    @:param maxFeatureType (int类型) 将要划分的特征类型，由最大信息增益得到
    
    @:return tempDataset (ndarray类型) 划分后的数据集
    """
    # 划分子集
    def splitDataset(self, subDataset, subFeatureName, maxFeatureType):
        tempDataset = []
        for i in range(len(subDataset)):
            if subDataset[i][maxFeatureType] == subFeatureName:
                tempDataset.append(subDataset[i])
        tempDataset = np.array(tempDataset)
        return tempDataset

    """
    @:param subDataset (ndarray类型) 将要划分的数据集
    @:param featureType (int类型) 当前将要计算的特征类型
    
    @:return 计算的信息增益结果
    """
    # 计算信息增益
    def calInfoGain(self, subDataset, featureType):
        result = 0
        labelCount = self.featureCount(subDataset, featureType)

        for i in labelCount.keys():
            result += self.calEntropy("Child",subDataset, labelCount[i])

        return self.calEntropy("Parent", subDataset) - result

    # 计算信息熵
    """
    @:param calType (string类型) 计算模式:"Parent":计算父节点的熵, "Child":计算子节点的熵
    @:param subDataset (ndarray类型) 当前数据集
    @:param labelCount (dict类型) 记录当前类型所对应标签的出现次数。key:标签名(string), value:标签出现次数(int)
    
    @:return result 熵值(float类型)
    """
    def calEntropy(self, calType, subDataset, labelCount=None):
        result = 0
        itemSize = 0

        if calType == "Parent":
            labelCount = dict(zip(self.label_map,
                                  [0 for i in range(len(self.label_map))]))

            for item in subDataset:
                labelCount[item[-1]] += 1

        for i in self.label_map:
            itemSize += labelCount[i]
        for i in self.label_map:
            if labelCount[i] == 0:
                continue
            else:
                pi = labelCount[i] / itemSize
            result = result - (pi * log(pi, 2))
        return itemSize / len(subDataset) * result

    # 计算特征对应种类数
    """
    @:param subDataset (ndarray类型) 当前数据集
    @:param featureType (int类型) 当前统计的数据类型
    
    @:return featureCount(dict类型) 该类型特征下所有类型名对应的标签出现次数
    """
    def featureCount(self, subDataset, featureType):
        currentType = self.feature_map[featureType]
        featureCount = {}
        for i in currentType:
            featureCount[i] = dict(zip(self.label_map,
                                    [0 for i in range(len(self.label_map))]))
        for item in subDataset:
            featureCount[item[featureType]][item[-1]] += 1
        print(featureCount)
        return featureCount

    # 预测结果
    """
    @:param feature (list类型) 用户输入的待测特征
    @:param tree (dict类型) 决策树模型
    
    @:return result(string类型) 预测结果
    """
    def classify(self, feature, tree):
        # 对决策树进行搜索，若当前特征对应的是结果则直接返回，若当前特征对应的是子树继续遍历
        if type(tree[feature[tree["feature"]]]) == type({}):
            result = self.classify(feature, tree[feature[tree["feature"]]])
        else:
            return tree[feature[tree["feature"]]]
        return result

    # 获取树的叶子节点个数（暂时未用上）
    def getNumberLeafs(self, tree):
        number = 0
        currentFeatureType = tree["feature"]
        for i in self.feature_map[currentFeatureType]:
            if type(tree[i]) == type({}):
                number += self.getNumberLeafs(tree[i])
            else:
                number = number + 1
        return number

    # 获取树的深度（暂时未用上）
    def getTreeDepth(self, tree):
        depth = 1
        maxDepth = 0
        currentFeatureType = tree["feature"]
        for i in self.feature_map[currentFeatureType]:
            if type(tree[i]) == type({}):
                depth += self.getTreeDepth(tree[i])
            else:
                depth = 1
            if depth >= maxDepth:
                maxDepth = depth

        return maxDepth

    # 绘制决策树图像
    def showTree(self, tree):
        g = Digraph("DecisionTree")
        fileName = "DecisionTree.gv"
        self.createNode(tree, g, 0, "1")
        g.view(fileName)

    # 创建决策树节点，用于绘制图像
    """
    @:param tree (dict类型) 决策树模型
    @:param graph (Digraph类型) 用于存放当前的图
    @:param depth (int类型) 当前绘制的深度
    @:param id (int类型) 当前深度下的节点唯一标识符
    
    Tips:不会在外部直接调用，由类自己调用
    """
    def createNode(self, tree, graph, depth, id):
        currentFeatureType = tree["feature"]
        currentFeatureName = self.feature_name[currentFeatureType]
        if depth == 0:
            graph.node(name=currentFeatureName + str(depth) + id,
                       color=self.feature_color[currentFeatureType],
                       fontname="Microsoft YaHei",
                       label=currentFeatureName)

        count = 1
        for i in self.feature_map[currentFeatureType]:
            if type(tree[i]) == type({}):
                nextFeature = tree[i]["feature"]
                graph.node(name=self.feature_name[nextFeature] + str(depth + 1) + str(count),
                           color=self.feature_color[nextFeature],
                           fontname="Microsoft YaHei",
                           label=self.feature_name[nextFeature]
                           )
                graph.edge(currentFeatureName + str(depth) + id,
                           self.feature_name[nextFeature] + str(depth + 1) + str(count),
                           color="green",
                           label=i,
                           fontname="Microsoft YaHei")
                self.createNode(tree[i], graph, depth + 1, str(count))
            else:
                graph.node(name=tree[i] + str(depth + 1) + str(count),
                           color="purple",
                           fontname="Microsoft YaHei",
                           label=tree[i]
                           )
                graph.edge(currentFeatureName + str(depth) + id,
                           tree[i] + str(depth + 1) + str(count),
                           color="green",
                           label=i,
                           fontname="Microsoft YaHei"
                           )
            count += 1


if __name__ == '__main__':
    dt = DecisionTree("data.xlsx")
    dt.showTree(dt.tree)
    print(dt.classify(['青绿','蜷缩','浊响',	'清晰', '凹陷', '硬滑'], dt.tree))
    print(dt.tree)
    print(dt.getNumberLeafs(dt.tree))
    print(dt.getTreeDepth(dt.tree))





