{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "    @file Sklearn\n",
    "    @description Decision Tree By Sklearn Using Gini Index\n",
    "    @author Synhard\n",
    "    @tel 13001321080\n",
    "    @id 21126338\n",
    "    @email 823436512@qq.com\n",
    "    @date 2021-09-25 14:43\n",
    "    @version 1.0\n",
    "\"\"\"\n",
    "import xlrd\n",
    "import numpy as np\n",
    "from collections import Counter\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib\n",
    "from sklearn.metrics import confusion_matrix\n",
    "\n",
    "# 可视化部分\n",
    "matplotlib.rcParams['font.family'] = 'SimHei'  # 用来正常显示中文\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号\n",
    "\n",
    "decisionNode = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\n",
    "leafNode = dict(boxstyle=\"round4\", fc=\"0.8\")\n",
    "arrow_args = dict(arrowstyle=\"<-\")\n",
    "\n",
    "\n",
    "def getNumLeafs(myTree):\n",
    "    numLeafs = 0\n",
    "    firstStr = list(myTree.keys())[0]\n",
    "    secondDict = myTree[firstStr]\n",
    "    for key in secondDict.keys():\n",
    "        if type(secondDict[\n",
    "                    key]).__name__ == 'dict':\n",
    "            numLeafs += getNumLeafs(secondDict[key])\n",
    "        else:\n",
    "            numLeafs += 1\n",
    "    return numLeafs\n",
    "\n",
    "\n",
    "def getTreeDepth(myTree):\n",
    "    maxDepth = 0\n",
    "    firstStr = list(myTree.keys())[0]\n",
    "    secondDict = myTree[firstStr]\n",
    "    for key in secondDict.keys():\n",
    "        if type(secondDict[\n",
    "                    key]).__name__ == 'dict':\n",
    "            thisDepth = 1 + getTreeDepth(secondDict[key])\n",
    "        else:\n",
    "            thisDepth = 1\n",
    "        if thisDepth > maxDepth: maxDepth = thisDepth\n",
    "    return maxDepth\n",
    "\n",
    "\n",
    "def plotNode(nodeTxt, centerPt, parentPt, nodeType):\n",
    "    createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',\n",
    "                            xytext=centerPt, textcoords='axes fraction',\n",
    "                            va=\"center\", ha=\"center\", bbox=nodeType, arrowprops=arrow_args)\n",
    "\n",
    "\n",
    "def plotMidText(cntrPt, parentPt, txtString):\n",
    "    xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]\n",
    "    yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]\n",
    "    createPlot.ax1.text(xMid, yMid, txtString, va=\"center\", ha=\"center\", rotation=30)\n",
    "\n",
    "\n",
    "def plotTree(myTree, parentPt, nodeTxt):\n",
    "    numLeafs = getNumLeafs(myTree)\n",
    "    firstStr = list(myTree.keys())[0]\n",
    "    cntrPt = (plotTree.xOff + (2 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)\n",
    "    plotMidText(cntrPt, parentPt, nodeTxt)\n",
    "    plotNode(firstStr, cntrPt, parentPt, decisionNode)\n",
    "    secondDict = myTree[firstStr]\n",
    "    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD\n",
    "    for key in secondDict.keys():\n",
    "        if type(secondDict[\n",
    "                    key]).__name__ == 'dict':\n",
    "            plotTree(secondDict[key], cntrPt, str(key))\n",
    "        else:\n",
    "            plotTree.xOff = plotTree.xOff + 1.1 / plotTree.totalW\n",
    "            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)\n",
    "            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))\n",
    "    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD\n",
    "\n",
    "\n",
    "def createPlot(myTree):\n",
    "    fig = plt.figure(1, facecolor='white', figsize=(8, 8))\n",
    "    fig.clf()\n",
    "    axprops = dict(xticks=[], yticks=[])\n",
    "    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)\n",
    "    plotTree.totalW = float(getNumLeafs(myTree))\n",
    "    plotTree.totalD = float(getTreeDepth(myTree))\n",
    "    plotTree.xOff = -1.5 / plotTree.totalW\n",
    "    plotTree.yOff = 1.1\n",
    "    plotTree(myTree, (0.5, 1.0), '')\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def read_xslx(xslx_path):\n",
    "    trainingdata = []  # 先声明一个空list\n",
    "    testingdata = []\n",
    "    data = xlrd.open_workbook(xslx_path)  # 读取文件\n",
    "    table = data.sheet_by_index(0)  # 按索引获取工作表，0就是工作表1\n",
    "\n",
    "    for i in range(int(0.7 * table.nrows)):  # table.nrows表示总行数\n",
    "        line = table.row_values(i)  # 读取每行数据，保存在line里面，line是list\n",
    "        trainingdata.append(line)  # 将line加入到trainingdata中，trainingdata是二维list\n",
    "    trainingdata = np.array(trainingdata)  # 将trainingdata从二维list变成数组\n",
    "\n",
    "    testingdata.append(table.row_values(0))\n",
    "    for i in range(int(0.7 * table.nrows), int(table.nrows)):  # table.nrows表示总行数\n",
    "        line = table.row_values(i)  # 读取每行数据，保存在line里面，line是list\n",
    "        testingdata.append(line)  # 将line加入到testingdata中，testingdata是二维list\n",
    "    testingdata = np.array(testingdata)  # 将testingdata从二维list变成数组\n",
    "\n",
    "    return trainingdata, testingdata\n",
    "\n",
    "\n",
    "# 计算结点GINI值\n",
    "def calcGini(dataSet):\n",
    "    numTotal = dataSet.shape[0]  # 记录本数据集总条数\n",
    "    length = len(dataSet[0])  # 计算特征列数\n",
    "    frequent_0 = 0.0  # 记录两种样本出现次数\n",
    "    frequent_1 = 0.0\n",
    "    for i in range(0, numTotal):\n",
    "        if dataSet[i][length - 1] == '0.0':\n",
    "            frequent_0 += 1\n",
    "        elif dataSet[i][length - 1] == '1.0':\n",
    "            frequent_1 += 1\n",
    "    gini = 1 - (frequent_0 / numTotal) ** 2 - (frequent_1 / numTotal) ** 2\n",
    "    return gini\n",
    "\n",
    "\n",
    "# 根据条件分离数据集\n",
    "def splitDataSet(dataSet, n, value, type):\n",
    "    subDataSet = []\n",
    "    numTotal = dataSet.shape[0]  # 记录本数据集总条数\n",
    "    if type == 1:  # type==1对应小于等于value的情况\n",
    "        for i in range(0, numTotal):\n",
    "            if float(dataSet[i][n]) <= float(value):\n",
    "                subDataSet.append(dataSet[i])\n",
    "    elif type == 2:  # type==2对应大于value的情况\n",
    "        for i in range(0, numTotal):\n",
    "            if float(dataSet[i][n]) > float(value):\n",
    "                subDataSet.append(dataSet[i])\n",
    "    subDataSet = np.array(subDataSet)  # 强制转换为array类型\n",
    "    pass\n",
    "    return subDataSet, len(subDataSet)\n",
    "\n",
    "\n",
    "# 选择最好的特征划分数据集，即返回最佳特征下标及传入数据集各列的Gini指数\n",
    "def FindBestFeature(dataSet):\n",
    "    numTotal = dataSet.shape[0]  # 记录本数据集总条数\n",
    "    numFeatures = len(dataSet[0]) - 1  # 计算特征列数\n",
    "    bestFeature = -1  # 初始化参数，记录最优特征列i，下标从0开始\n",
    "    columnFeaGini = {}  # 初始化参数，记录每一列x的每一种特征的基尼 Gini(D,A)\n",
    "    for i in range(0, numFeatures - 1):  # 遍历所有x特征列,i为特征标号\n",
    "        featList = list(dataSet[:, i])  # 取这一列x中所有数据，转换为list类型\n",
    "        featListSort = [float(x) for x in featList]\n",
    "        featListSort.sort()  # 对该特征值排序\n",
    "        FeaGinis = []\n",
    "        FeaGiniv = []\n",
    "        for j in range(0, len(featListSort) - 1):  # j为第几组数据\n",
    "            value = (featListSort[j] + featListSort[j + 1]) / 2\n",
    "            feaGini = 0.0\n",
    "            subDataSet1, sublen1 = splitDataSet(dataSet, i, value, 1)  # 获取切分后的数据\n",
    "            subDataSet2, sublen2 = splitDataSet(dataSet, i, value, 2)\n",
    "            if sublen1 == 0:\n",
    "                feaGini = (sublen2 / numTotal) * calcGini(subDataSet2)  # 计算此分法对应Gini值\n",
    "            elif sublen2 == 0:\n",
    "                feaGini = (sublen1 / numTotal) * calcGini(subDataSet1)\n",
    "            else:\n",
    "                feaGini = (sublen1 / numTotal) * calcGini(subDataSet1) + (sublen2 / numTotal) * calcGini(\n",
    "                    subDataSet2)  # 计算此分法对应Gini值\n",
    "            FeaGinis.append(feaGini)  # 记录该特征下各种分法遍历出的Gini值\n",
    "            FeaGiniv.append(value)  # 记录该特征下的各种分法\n",
    "\n",
    "        columnFeaGini['%d_%f' % (i, FeaGiniv[FeaGinis.index(min(FeaGinis))])] = min(FeaGinis)  # 将该特征下最小的Gini值\n",
    "    bestFeature = min(columnFeaGini, key=columnFeaGini.get)  # 找到最小的Gini指数对应的数据列\n",
    "    return bestFeature, columnFeaGini\n",
    "\n",
    "\n",
    "# 生成决策树。输入：训练数据集D，特征集A。输出：决策树T\n",
    "def createTree(dataSet, features, decisionTree):\n",
    "    if len(features) > 2:  # 特征未用完\n",
    "        bestFeature, columnFeaGini = FindBestFeature(dataSet)\n",
    "        bestFeatureLable = features[int(bestFeature.split('_')[0])]  # 最佳特征\n",
    "        NodeName = bestFeatureLable + '\\n' + '<=' + bestFeature.split('_')[1]  # 结点名称\n",
    "        decisionTree = {NodeName: {}}  # 构建树，以Gini指数最小的特征bestFeature为子节点\n",
    "    else:\n",
    "        return decisionTree\n",
    "\n",
    "    LeftSet, LeftSet_len = splitDataSet(dataSet, int(bestFeature.split('_')[0]), float(bestFeature.split('_')[1]), 1)\n",
    "    RightSet, RightSet_len = splitDataSet(dataSet, int(bestFeature.split('_')[0]), float(bestFeature.split('_')[1]), 2)\n",
    "    del (features[int(bestFeature.split('_')[0])])  # 该特征已为子节点使用，则删除，以便接下来继续构建子树\n",
    "\n",
    "    if calcGini(LeftSet) <= 0.1 or len(features) == 2:\n",
    "        L_lables_grp = dict(Counter(LeftSet[:, -1]))\n",
    "        L_leaf = max(L_lables_grp, key=L_lables_grp.get)  # 获得划分后出现概率最大的分类作为结点的分类\n",
    "        decisionTree[NodeName]['Y'] = L_leaf  # 设定左枝叶子值\n",
    "    elif calcGini(LeftSet) > 0.1:\n",
    "        dataSetNew = np.delete(LeftSet, int(bestFeature.split('_')[0]), axis=1)  # 删除此最优划分x列，使用剩余的x列进行数据划分\n",
    "        L_subFeatures = features[:]\n",
    "        decisionTree[NodeName]['Y'] = {'NONE'}\n",
    "        decisionTree[NodeName]['Y'] = createTree(dataSetNew, L_subFeatures, decisionTree[NodeName]['Y'])  # 递归生成左边的树\n",
    "\n",
    "    if calcGini(RightSet) <= 0.1 or len(features) == 2:\n",
    "        R_lables_grp = dict(Counter(RightSet[:, -1]))\n",
    "        R_leaf = max(R_lables_grp, key=R_lables_grp.get)  # 获得划分后出现概率最大的分类作为结点的分类\n",
    "        decisionTree[NodeName]['N'] = R_leaf  # 设定右枝叶子值\n",
    "    elif calcGini(RightSet) > 0.1:\n",
    "        dataSetNew = np.delete(RightSet, int(bestFeature.split('_')[0]), axis=1)  # 删除此最优划分x列，使用剩余的x列进行数据划分\n",
    "        R_subFeatures = features[:]\n",
    "        decisionTree[NodeName]['N'] = {'NONE'}\n",
    "        decisionTree[NodeName]['N'] = createTree(dataSetNew, R_subFeatures, decisionTree[NodeName]['N'])  # 递归生成右边的树\n",
    "\n",
    "    return decisionTree\n",
    "\n",
    "\n",
    "# 获得测试结果\n",
    "def testTree(decisontree, dataSet):\n",
    "    testmemory = []\n",
    "    label = []\n",
    "    TP = 0\n",
    "    FP = 0\n",
    "    TN = 0\n",
    "    FN = 0\n",
    "    rowDict = []\n",
    "    for i in range(1, dataSet.shape[0]):\n",
    "        dict = {}\n",
    "        for j in range(0, dataSet[0].shape[0]):\n",
    "            dict[dataSet[0][j]] = dataSet[i][j]\n",
    "\n",
    "        rowDict.append(dict)\n",
    "\n",
    "    for row in rowDict:\n",
    "        label.append(row['target'])\n",
    "        testmemory.append(prediction(decisontree, row))\n",
    "        pass\n",
    "\n",
    "    for i in range(0, len(rowDict)):\n",
    "        if (testmemory[i] == '1.0') and (label[i] == '1.0'):  # test为预测 label为实际\n",
    "            TP += 1\n",
    "        elif (testmemory[i] == '1.0') and (label[i] == '0.0'):\n",
    "            FP += 1\n",
    "        elif (testmemory[i] == '0.0') and (label[i] == '0.0'):\n",
    "            TN += 1\n",
    "        elif (testmemory[i] == '0.0') and (label[i] == '1.0'):\n",
    "            FN += 1\n",
    "\n",
    "    print('TP:%d' % TP)  # 真阳性\n",
    "    print('FP:%d' % FP)  # 假阳性\n",
    "    print('TN:%d' % TN)  # 真阴性\n",
    "    print('FN:%d' % FN)  # 假阴性\n",
    "\n",
    "    cm = confusion_matrix(label, testmemory, labels=[\"0.0\", \"1.0\"])\n",
    "    plt.rc('figure', figsize=(8, 8))\n",
    "    plt.matshow(cm, cmap=plt.cm.cool)  # 背景颜色\n",
    "    plt.colorbar()  # 颜色标签\n",
    "    # 内部添加图例标签\n",
    "    for x in range(len(cm)):\n",
    "        for y in range(len(cm)):\n",
    "            plt.annotate(cm[x, y], xy=(y, x), horizontalalignment='center', verticalalignment='center')\n",
    "    plt.ylabel('True Label')\n",
    "    plt.xlabel('Predicted Label')\n",
    "    plt.title('decision_tree')\n",
    "    plt.savefig(r'confusion_matrix')\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def prediction(decisontree, row):\n",
    "    if isinstance(decisontree, str):\n",
    "        return decisontree\n",
    "    root = list(decisontree.keys())[0]\n",
    "    attr = root.splitlines()[0]\n",
    "    value = float(str(root.splitlines()[1])[2:])\n",
    "    if float(row.get(attr)) <= value:\n",
    "        return prediction(decisontree.get(root).get('Y'), row)\n",
    "    else:\n",
    "        return prediction(decisontree.get(root).get('N'), row)\n",
    "    pass\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    trainingData, testingData = read_xslx(r'heart.xlsx')\n",
    "    features = list(trainingData[0])  # 表头\n",
    "    trainingDataSet = trainingData[1:]  # 训练集\n",
    "    bestFeature, columnFeaGini = FindBestFeature(trainingDataSet)\n",
    "    decisionTree = createTree(trainingDataSet, features, {})  # 建立决策树，CART分类树\n",
    "    testTree(decisionTree, testingData)\n",
    "    createPlot(decisionTree)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}