{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第五题（选做）：请你完成带有后剪枝的决策树"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 实验内容\n",
    "1. 实现带有后剪枝的决策树\n",
    "2. 数据集随意\n",
    "3. 最后对比剪枝和不剪枝的差别"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**（选做：以下答案仅供参考）**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入类库\n",
    "import pandas as pd\n",
    "from sklearn.utils import shuffle\n",
    "import numpy as np\n",
    "import math\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "loans = pd.read_csv('data/lendingclub/lending-club-data.csv', low_memory=False)\n",
    "\n",
    "# 对数据进行预处理，将safe_loans作为标记\n",
    "loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)\n",
    "del loans['bad_loans']\n",
    "\n",
    "features = ['grade',              # grade of the loan\n",
    "            'term',               # the term of the loan\n",
    "            'home_ownership',     # home_ownership status: own, mortgage or rent\n",
    "            'emp_length',         # number of years of employment\n",
    "           ]\n",
    "target = 'safe_loans'\n",
    "loans = loans[features + [target]]\n",
    "\n",
    "loans = shuffle(loans, random_state = 34)\n",
    "\n",
    "split_line1 = int(len(loans) * 0.6)\n",
    "split_line2 = int(len(loans) * 0.8)\n",
    "train_data = loans.iloc[: split_line1]\n",
    "validation_data = loans.iloc[split_line1: split_line2]\n",
    "test_data = loans.iloc[split_line2:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def one_hot_encoding(data, features_categorical):\n",
    "    '''\n",
    "    Parameter\n",
    "    ----------\n",
    "    data: pd.DataFrame\n",
    "    \n",
    "    features_categorical: list(str)\n",
    "    '''\n",
    "    \n",
    "    # 对所有的离散特征遍历\n",
    "    for cat in features_categorical:\n",
    "        \n",
    "        # 对这列进行one-hot编码，前缀为这个变量名\n",
    "        one_encoding = pd.get_dummies(data[cat], prefix = cat)\n",
    "        \n",
    "        # 将生成的one-hot编码与之前的dataframe拼接起来\n",
    "        data = pd.concat([data, one_encoding],axis=1)\n",
    "        \n",
    "        # 删除掉原始的这列离散特征\n",
    "        del data[cat]\n",
    "    \n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(73564, 25) (24521, 25) (24522, 25)\n",
      "59622\n",
      "13942\n"
     ]
    }
   ],
   "source": [
    "train_data = one_hot_encoding(train_data, features)\n",
    "\n",
    "one_hot_features = train_data.columns.tolist()\n",
    "one_hot_features.remove(target)\n",
    "\n",
    "validation_tmp = one_hot_encoding(validation_data, features)\n",
    "validation_data = pd.DataFrame(columns = train_data.columns)\n",
    "for feature in train_data.columns:\n",
    "    if feature in validation_tmp:\n",
    "        validation_data[feature] = validation_tmp[feature].copy()\n",
    "    else:\n",
    "        validation_data[feature] = np.zeros(len(validation_tmp), dtype = 'uint8')\n",
    "        \n",
    "test_data_tmp = one_hot_encoding(test_data, features)\n",
    "test_data = pd.DataFrame(columns = train_data.columns)\n",
    "for feature in train_data.columns:\n",
    "    if feature in test_data_tmp.columns:\n",
    "        test_data[feature] = test_data_tmp[feature].copy()\n",
    "    else:\n",
    "        test_data[feature] = np.zeros(test_data_tmp.shape[0], dtype = 'uint8')\n",
    "\n",
    "print(train_data.shape, validation_data.shape, test_data.shape)\n",
    "\n",
    "        \n",
    "target_values = train_data[target]\n",
    "print(len(target_values[target_values==1]))\n",
    "print(len(target_values[target_values==-1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 以下所有函数同前面题中的函数\n",
    "def information_entropy(labels_in_node):\n",
    "    '''\n",
    "    求当前结点的信息熵\n",
    "    \n",
    "    Parameter\n",
    "    ----------\n",
    "    labels_in_node: np.ndarray, 如[-1, 1, -1, 1, 1]\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    float: information entropy\n",
    "    '''\n",
    "    \n",
    "    # 统计样本总个数\n",
    "    num_of_samples = labels_in_node.shape[0]\n",
    "    \n",
    "    if num_of_samples == 0:\n",
    "        return 0\n",
    "    \n",
    "    # 统计出标记为1的个数\n",
    "    num_of_positive = len(labels_in_node[labels_in_node == 1])\n",
    "    \n",
    "    # 统计出标记为-1的个数\n",
    "    num_of_negative = len(labels_in_node[labels_in_node == -1])                  # YOUR CODE HERE\n",
    "    \n",
    "    # 统计正例的概率\n",
    "    prob_positive = num_of_positive / num_of_samples\n",
    "    \n",
    "    # 统计负例的概率\n",
    "    prob_negative = num_of_negative / num_of_samples                            # YOUR CODE HERE\n",
    "    \n",
    "    if prob_positive == 0:\n",
    "        positive_part = 0\n",
    "    else:\n",
    "        positive_part = prob_positive * np.log2(prob_positive)\n",
    "    \n",
    "    if prob_negative == 0:\n",
    "        negative_part = 0\n",
    "    else:\n",
    "        negative_part = prob_negative * np.log2(prob_negative)\n",
    "    \n",
    "    return - ( positive_part + negative_part )\n",
    "\n",
    "\n",
    "def compute_information_gain_ratios(data, features, target, annotate = False):\n",
    "    '''\n",
    "    计算所有特征的信息增益率并保存起来\n",
    "    \n",
    "    Parameter\n",
    "    ----------\n",
    "    data: pd.DataFrame, 带有特征和标记的数据\n",
    "    \n",
    "    features: list(str)，特征名组成的list\n",
    "    \n",
    "    target: str， 特征的名字\n",
    "    \n",
    "    annotate: boolean, default False，是否打印注释\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    gain_ratios: dict, key: str, 特征名\n",
    "                       value: float，信息增益率\n",
    "    '''\n",
    "    \n",
    "    gain_ratios = dict()\n",
    "    \n",
    "    # 对所有的特征进行遍历，使用当前的划分方法对每个特征进行计算\n",
    "    for feature in features:\n",
    "        \n",
    "        # 左子树保证所有的样本的这个特征取值为0\n",
    "        left_split_target = data[data[feature] == 0][target]\n",
    "        \n",
    "        # 右子树保证所有的样本的这个特征取值为1\n",
    "        right_split_target =  data[data[feature] == 1][target]\n",
    "            \n",
    "        # 计算左子树的信息熵\n",
    "        left_entropy = information_entropy(left_split_target)\n",
    "        \n",
    "        # 计算左子树的权重\n",
    "        left_weight = len(left_split_target) / (len(left_split_target) + len(right_split_target))\n",
    "\n",
    "        # 计算右子树的信息熵\n",
    "        right_entropy = information_entropy(right_split_target)\n",
    "        \n",
    "        # 计算右子树的权重\n",
    "        right_weight = len(right_split_target) / (len(left_split_target) + len(right_split_target))\n",
    "        \n",
    "        # 计算当前结点的信息熵\n",
    "        current_entropy = information_entropy(data[target])\n",
    "        \n",
    "        # 计算当前结点的信息增益\n",
    "        gain =  current_entropy - (left_weight * left_entropy + right_weight * right_entropy)         # YOUR CODE HERE\n",
    "        \n",
    "        # 计算IV公式中，当前特征为0的值\n",
    "        if left_weight == 0:\n",
    "            left_IV = 0\n",
    "        else:\n",
    "            left_IV =left_weight * np.log2(left_weight)                                        # YOUR CODE HERE\n",
    "        \n",
    "        # 计算IV公式中，当前特征为1的值\n",
    "        if right_weight == 0:\n",
    "            right_IV = 0\n",
    "        else:\n",
    "            right_IV =right_weight * np.log2(right_weight)                                  # YOUR CODE HERE\n",
    "        \n",
    "        # IV 等于所有子树IV之和的相反数\n",
    "        IV = - (left_IV + right_IV)\n",
    "            \n",
    "        # 计算使用当前特征划分的信息增益率\n",
    "        # 这里为了防止IV是0，导致除法得到np.inf，在分母加了一个很小的小数\n",
    "        gain_ratio = gain / (IV + np.finfo(np.longdouble).eps)\n",
    "        \n",
    "        # 信息增益率的存储\n",
    "        gain_ratios[feature] = gain_ratio\n",
    "        \n",
    "        if annotate:\n",
    "            print(\" \", feature, gain_ratio)\n",
    "            \n",
    "    return gain_ratios\n",
    "\n",
    "\n",
    "def best_splitting_feature(data, features, target, criterion = 'gain_ratio', annotate = False):\n",
    "    '''\n",
    "    给定划分方法和数据，找到最优的划分特征\n",
    "    \n",
    "    Parameters\n",
    "    ----------\n",
    "    data: pd.DataFrame, 带有特征和标记的数据\n",
    "    \n",
    "    features: list(str)，特征名组成的list\n",
    "    \n",
    "    target: str， 特征的名字\n",
    "    \n",
    "    criterion: str, 使用哪种指标，三种选项: 'information_gain', 'gain_ratio', 'gini'\n",
    "    \n",
    "    annotate: boolean, default False，是否打印注释\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    best_feature: str, 最佳的划分特征的名字\n",
    "    \n",
    "    '''\n",
    "    if criterion == 'information_gain':\n",
    "        if annotate:\n",
    "            print('using information gain')\n",
    "        return None\n",
    "\n",
    "    elif criterion == 'gain_ratio':\n",
    "        if annotate:\n",
    "            print('using information gain ratio')\n",
    "        \n",
    "        # 得到当前所有特征的信息增益率\n",
    "        gain_ratios = compute_information_gain_ratios(data, features, target, annotate)\n",
    "    \n",
    "        # 根据这些特征和他们的信息增益率，找到最佳的划分特征\n",
    "        best_feature = max(gain_ratios.items(), key = lambda x: x[1])[0]\n",
    "\n",
    "        return best_feature\n",
    "    \n",
    "    elif criterion == 'gini':\n",
    "        if annotate:\n",
    "            print('using gini')\n",
    "        return None\n",
    "    else:\n",
    "        raise Exception(\"传入的criterion不合规!\", criterion)\n",
    "        \n",
    "\n",
    "def intermediate_node_num_mistakes(labels_in_node):\n",
    "    '''\n",
    "    求树的结点中，样本数少的那个类的样本有多少，比如输入是[1, 1, -1, -1, 1]，返回2\n",
    "    \n",
    "    Parameter\n",
    "    ----------\n",
    "    labels_in_node: np.ndarray, pd.Series\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    int：个数\n",
    "    \n",
    "    '''\n",
    "    # 如果传入的array为空，返回0\n",
    "    if len(labels_in_node) == 0:\n",
    "        return 0\n",
    "    \n",
    "    # 统计1的个数\n",
    "    num_of_one = len(labels_in_node[labels_in_node == 1])     # YOUR CODE HERE\n",
    "    \n",
    "    # 统计-1的个数\n",
    "    num_of_minus_one = len(labels_in_node[labels_in_node == -1])    # YOUR CODE HERE\n",
    "    \n",
    "    return num_of_one if num_of_minus_one > num_of_one else num_of_minus_one\n",
    "\n",
    "\n",
    "def majority_class(labels_in_node):\n",
    "    '''\n",
    "        求树的结点中，样本数多的那个类是什么\n",
    "    '''\n",
    "    # 如果传入的array为空，返回0\n",
    "    if len(labels_in_node) == 0:\n",
    "        return 0\n",
    "    \n",
    "    # 统计1的个数\n",
    "    num_of_one = len(labels_in_node[labels_in_node == 1])     # YOUR CODE HERE\n",
    "    \n",
    "    # 统计-1的个数\n",
    "    num_of_minus_one = len(labels_in_node[labels_in_node == -1])    # YOUR CODE HERE\n",
    "    \n",
    "    return 1 if num_of_minus_one < num_of_one else -1\n",
    "\n",
    "\n",
    "def create_leaf(target_values):\n",
    "    '''\n",
    "    计算出当前叶子结点的标记是什么，并且将叶子结点信息保存在一个dict中\n",
    "    \n",
    "    Parameter:\n",
    "    ----------\n",
    "    target_values: pd.Series, 当前叶子结点内样本的标记\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    leaf: dict，表示一个叶结点，\n",
    "            leaf['splitting_features'], None，叶结点不需要划分特征\n",
    "            leaf['left'], None，叶结点没有左子树\n",
    "            leaf['right'], None，叶结点没有右子树\n",
    "            leaf['is_leaf'], True, 是否是叶子结点\n",
    "            leaf['prediction'], int, 表示该叶子结点的预测值\n",
    "    '''\n",
    "    # 创建叶子结点\n",
    "    leaf = {'splitting_feature' : None,\n",
    "            'left' : None,\n",
    "            'right' : None,\n",
    "            'is_leaf': True}\n",
    "   \n",
    "    # 数结点内-1和+1的个数\n",
    "    num_ones = len(target_values[target_values == +1])\n",
    "    num_minus_ones = len(target_values[target_values == -1])    \n",
    "\n",
    "    # 叶子结点的标记使用少数服从多数的原则，为样本数多的那类的标记，保存在 leaf['prediction']\n",
    "    leaf['prediction'] = majority_class(target_values)\n",
    "\n",
    "    # 返回叶子结点\n",
    "    return leaf\n",
    "\n",
    "\n",
    "def classify(tree, x, annotate = False):\n",
    "    '''\n",
    "    递归的进行预测，一次只能预测一个样本\n",
    "    \n",
    "    Parameters\n",
    "    ----------\n",
    "    tree: dict\n",
    "    \n",
    "    x: pd.Series，样本\n",
    "    \n",
    "    x: pd.DataFrame, 待预测的样本\n",
    "    \n",
    "    annotate, boolean, 是否显示注释\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    返回预测的标记\n",
    "    '''\n",
    "    if tree['is_leaf']:\n",
    "        if annotate:\n",
    "            print (\"At leaf, predicting %s\" % tree['prediction'])\n",
    "        return tree['prediction']\n",
    "    else:\n",
    "        split_feature_value = x[tree['splitting_feature']]\n",
    "        if annotate:\n",
    "             print (\"Split on %s = %s\" % (tree['splitting_feature'], split_feature_value))\n",
    "        if split_feature_value == 0:\n",
    "            return classify(tree['left'], x, annotate)\n",
    "        else:\n",
    "            return classify(tree['right'], x, annotate)\n",
    "        \n",
    "\n",
    "def predict(tree, data):\n",
    "    '''\n",
    "    按行遍历data，对每个样本进行预测，将值存储起来，最后返回np.ndarray\n",
    "    \n",
    "    Parameter\n",
    "    ----------\n",
    "    tree, dict, 模型\n",
    "    \n",
    "    data, pd.DataFrame, 数据\n",
    "    \n",
    "    Returns\n",
    "    ----------\n",
    "    predictions, np.ndarray, 模型对这些样本的预测结果\n",
    "    '''\n",
    "    predictions = np.zeros(len(data))\n",
    "    \n",
    "    # YOUR CODE HERE\n",
    "    for i in range(len(data)):\n",
    "        predictions[i] = classify(tree, data.iloc[i])\n",
    "    \n",
    "    return predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_majority_class(labels_in_node):\n",
    "    '''\n",
    "        求树的结点中，样本数多的那个类是什么\n",
    "    '''\n",
    "    # 如果传入的array为空，返回0\n",
    "    if len(labels_in_node) == 0:\n",
    "        return 0\n",
    "    \n",
    "    # 统计1的个数\n",
    "    num_of_one = len(labels_in_node[labels_in_node == 1])     # YOUR CODE HERE\n",
    "    \n",
    "    # 统计-1的个数\n",
    "    num_of_minus_one = len(labels_in_node[labels_in_node == -1])    # YOUR CODE HERE\n",
    "    \n",
    "    return 1 if num_of_minus_one < num_of_one else -1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def no_pruning_decision_tree_create(data,validation_data, features, target,index_tree, criterion = 'gini', current_depth = 0, max_depth = 10, annotate = False):\n",
    "    '''\n",
    "    Parameter:\n",
    "    ----------\n",
    "    data: pd.DataFrame, 数据\n",
    "\n",
    "    features: iterable, 特征组成的可迭代对象，比如一个list\n",
    "\n",
    "    target: str, 标记的名字\n",
    "\n",
    "    criterion: 'str', 特征划分方法，只支持三种：'information_gain', 'gain_ratio', 'gini'\n",
    "\n",
    "    current_depth: int, 当前深度，递归的时候需要记录\n",
    "\n",
    "    max_depth: int, 树的最大深度，我们设定的树的最大深度，达到最大深度需要终止递归\n",
    "\n",
    "    Returns:\n",
    "    ----------\n",
    "    dict, dict['is_leaf']          : False, 当前顶点不是叶子结点\n",
    "          dict['prediction']       : None, 不是叶子结点就没有预测值\n",
    "          dict['splitting_feature']: splitting_feature, 当前结点是使用哪个特征进行划分的\n",
    "          dict['left']             : dict\n",
    "          dict['right']            : dict\n",
    "    '''\n",
    "    \n",
    "    if criterion not in ['information_gain', 'gain_ratio', 'gini']:\n",
    "        raise Exception(\"传入的criterion不合规!\", criterion)\n",
    "    \n",
    "    # 复制一份特征，存储起来，每使用一个特征进行划分，我们就删除一个\n",
    "    remaining_features = features[:]\n",
    "    \n",
    "    # 取出标记值\n",
    "    target_values = data[target]\n",
    "    validation_values = validation_data[target]\n",
    "    majority_class = find_majority_class(validation_values)\n",
    "    if(majority_class == -1):print(\"HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n",
    "    print(\"-\" * 50)\n",
    "    print(\"Subtree, depth = %s (%s data points).\" % (current_depth, len(target_values)))\n",
    "\n",
    "    # 终止条件1\n",
    "    # 如果当前结点内所有样本同属一类，即这个结点中，各类别样本数最小的那个等于0\n",
    "    # 使用前面写的intermediate_node_num_mistakes来完成这个判断\n",
    "    if intermediate_node_num_mistakes(target_values)==0:                                  # YOUR CODE HERE\n",
    "        print(\"Stopping condition 1 reached.\")\n",
    "        return create_leaf(target_values)   # 创建叶子结点\n",
    "    \n",
    "    # 终止条件2\n",
    "    # 如果已经没有剩余的特征可供分割，即remaining_features为空\n",
    "    \n",
    "    if  len(remaining_features)==0:           # YOUR CODE HERE\n",
    "        print(\"Stopping condition 2 reached.\")\n",
    "        return create_leaf(target_values)   # 创建叶子结点\n",
    "    \n",
    "    # 终止条件3\n",
    "    # 如果已经到达了我们要求的最大深度，即当前深度达到了最大深度\n",
    "    \n",
    "    if current_depth==max_depth:             # YOUR CODE HERE\n",
    "        print(\"Reached maximum depth. Stopping for now.\")\n",
    "        return create_leaf(target_values)   # 创建叶子结点\n",
    "\n",
    "    # 找到最优划分特征\n",
    "    # 使用best_splitting_feature这个函数\n",
    "    \n",
    "    splitting_feature =best_splitting_feature(data,remaining_features,target,criterion)          # YOUR CODE HERE\n",
    "    \n",
    "    # 使用我们找到的最优特征将数据划分成两份\n",
    "    # 左子树的数据\n",
    "    left_split = data[data[splitting_feature] == 0]\n",
    "    \n",
    "    # 右子树的数据\n",
    "    right_split = data[data[splitting_feature] == 1]                                    # YOUR CODE HERE\n",
    "    \n",
    "    validation_left_split = validation_data[validation_data[splitting_feature] == 0]\n",
    "    validation_right_split = validation_data[validation_data[splitting_feature] == 1]\n",
    "    \n",
    "    # 现在已经完成划分，我们要从剩余特征中删除掉当前这个特征\n",
    "    remaining_features.remove(splitting_feature)\n",
    "    \n",
    "    # 打印当前划分使用的特征，打印左子树样本个数，右子树样本个数\n",
    "    print(\"Split on feature %s. (%s, %s)\" % (\\\n",
    "                      splitting_feature, len(left_split), len(right_split)))\n",
    "    \n",
    "    # 如果使用当前的特征，将所有的样本都划分到一棵子树中，那么就直接将这棵子树变成叶子结点\n",
    "    # 判断左子树是不是“完美”的\n",
    "    if len(left_split) == len(data):\n",
    "        print(\"Creating leaf node.\")\n",
    "        return create_leaf(left_split[target])\n",
    "    \n",
    "    # 判断右子树是不是“完美”的\n",
    "    if len(right_split) == len(data):\n",
    "        print(\"Creating right node.\")\n",
    "        return create_leaf(right_split[target])                                          # YOUR CODE HERE\n",
    "\n",
    "    # 递归地创建左子树\n",
    "    left_tree = no_pruning_decision_tree_create(left_split,validation_left_split, remaining_features, target,index_tree, criterion, current_depth + 1, max_depth, annotate)\n",
    "    \n",
    "    # 递归地创建右子树\n",
    "    \n",
    "    right_tree = no_pruning_decision_tree_create(right_split,validation_right_split, remaining_features, target,index_tree, criterion, current_depth + 1, max_depth, annotate)\n",
    "    \n",
    "    # 返回树的非叶子结点\n",
    "    thisnode = {'is_leaf'          : False, \n",
    "            'prediction'       : None,\n",
    "            'majority_class'   :majority_class,\n",
    "            'splitting_feature': splitting_feature,\n",
    "            'left'             : left_tree, \n",
    "            'right'            : right_tree}\n",
    "    \n",
    "    index_tree[current_depth].append(thisnode)\n",
    "    \n",
    "    return thisnode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------------------------------------------------\n",
      "Subtree, depth = 0 (73564 data points).\n",
      "Split on feature grade_F. (71229, 2335)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 1 (71229 data points).\n",
      "Split on feature grade_A. (57869, 13360)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (57869 data points).\n",
      "Split on feature grade_G. (57232, 637)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (57232 data points).\n",
      "Split on feature grade_E. (51828, 5404)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (51828 data points).\n",
      "Split on feature grade_D. (40326, 11502)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (40326 data points).\n",
      "Split on feature term_ 36 months. (5760, 34566)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (5760 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (34566 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (11502 data points).\n",
      "Split on feature term_ 36 months. (3315, 8187)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (3315 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (8187 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (5404 data points).\n",
      "Split on feature term_ 36 months. (3185, 2219)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (3185 data points).\n",
      "Split on feature home_ownership_OTHER. (3184, 1)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (3184 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (2219 data points).\n",
      "Split on feature emp_length_1 year. (2011, 208)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2011 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (208 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (637 data points).\n",
      "Split on feature emp_length_3 years. (590, 47)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (590 data points).\n",
      "Split on feature emp_length_2 years. (541, 49)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (541 data points).\n",
      "Split on feature home_ownership_OWN. (495, 46)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (495 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (46 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (49 data points).\n",
      "Split on feature term_ 36 months. (32, 17)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (32 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (17 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (47 data points).\n",
      "Split on feature home_ownership_OTHER. (46, 1)\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (46 data points).\n",
      "Split on feature home_ownership_OWN. (44, 2)\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (44 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (13360 data points).\n",
      "Split on feature term_ 36 months. (259, 13101)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (259 data points).\n",
      "Split on feature emp_length_9 years. (252, 7)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (252 data points).\n",
      "Split on feature home_ownership_RENT. (202, 50)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (202 data points).\n",
      "Split on feature emp_length_8 years. (192, 10)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (192 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (10 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (50 data points).\n",
      "Split on feature emp_length_4 years. (48, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (48 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (7 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (13101 data points).\n",
      "Split on feature home_ownership_MORTGAGE. (5830, 7271)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (5830 data points).\n",
      "Split on feature emp_length_7 years. (5592, 238)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (5592 data points).\n",
      "Split on feature emp_length_3 years. (5045, 547)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (5045 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (547 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (238 data points).\n",
      "Split on feature home_ownership_OWN. (184, 54)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (184 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (54 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (7271 data points).\n",
      "Split on feature emp_length_2 years. (6702, 569)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (6702 data points).\n",
      "Split on feature emp_length_4 years. (6234, 468)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (6234 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (468 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (569 data points).\n",
      "Split on feature emp_length_6 years. (569, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 1 (2335 data points).\n",
      "Split on feature emp_length_7 years. (2197, 138)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (2197 data points).\n",
      "Split on feature term_ 36 months. (1719, 478)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (1719 data points).\n",
      "Split on feature home_ownership_OTHER. (1717, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (1717 data points).\n",
      "Split on feature emp_length_3 years. (1577, 140)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1577 data points).\n",
      "Split on feature home_ownership_RENT. (904, 673)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (904 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (673 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (140 data points).\n",
      "Split on feature home_ownership_RENT. (73, 67)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (73 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (67 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (2 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (478 data points).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Split on feature emp_length_8 years. (460, 18)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (460 data points).\n",
      "Split on feature emp_length_4 years. (433, 27)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (433 data points).\n",
      "Split on feature home_ownership_MORTGAGE. (287, 146)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (287 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (146 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (27 data points).\n",
      "Split on feature home_ownership_OWN. (25, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (25 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (18 data points).\n",
      "Split on feature home_ownership_OWN. (17, 1)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (17 data points).\n",
      "Split on feature home_ownership_RENT. (6, 11)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (6 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (11 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (138 data points).\n",
      "Split on feature term_ 36 months. (109, 29)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (109 data points).\n",
      "Split on feature home_ownership_RENT. (51, 58)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (51 data points).\n",
      "Split on feature home_ownership_OWN. (43, 8)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (43 data points).\n",
      "Split on feature emp_length_6 years. (43, 0)\n",
      "Creating leaf node.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (8 data points).\n",
      "Split on feature emp_length_6 years. (8, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (58 data points).\n",
      "Split on feature emp_length_6 years. (58, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (29 data points).\n",
      "Split on feature home_ownership_OWN. (25, 4)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (25 data points).\n",
      "Split on feature home_ownership_RENT. (12, 13)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (12 data points).\n",
      "Split on feature emp_length_6 years. (12, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (13 data points).\n",
      "Split on feature emp_length_6 years. (13, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (4 data points).\n",
      "Split on feature emp_length_6 years. (4, 0)\n",
      "Creating leaf node.\n"
     ]
    }
   ],
   "source": [
    "max_depth = 6\n",
    "index_tree = dict()\n",
    "for i in range(0,max_depth+1):\n",
    "    index_tree[i] = list()\n",
    "tree_without_pruning = no_pruning_decision_tree_create(train_data,validation_data, one_hot_features, target,index_tree, 'gain_ratio', max_depth = max_depth, annotate = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def post_pruning(tree,index_tree,max_depth,validation_data,target):\n",
    "    for i in range(max_depth-1,-1,-1):\n",
    "        for j in range(len(index_tree[i])):\n",
    "            #保存当前结点信息\n",
    "            node = index_tree[i][j]\n",
    "            node_label = node['majority_class']\n",
    "            left_label = 1\n",
    "            right_label = 1\n",
    "            if node['left']['is_leaf']==True :\n",
    "                left_label = node['left']['prediction']\n",
    "            else: left_label = node['left']['majority_class']\n",
    "            if node['right']['is_leaf']==True :\n",
    "                right_label = node['right']['prediction']\n",
    "            else: right_label = node['right']['majority_class']\n",
    "            #如果当前结点的多数label和他的左右孩子的多数标签是一样的，那么即使进行剪枝替换精度也不会变化，可以跳过检验\n",
    "            if node_label == left_label and node_label == right_label:\n",
    "                print(\"当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\")\n",
    "                continue\n",
    "                \n",
    "            node_info = dict(node)\n",
    "            prediction = predict(tree,validation_data)\n",
    "            np_score = accuracy_score(validation_data[target],prediction)\n",
    "            print('未替换前验证精度：',np_score)\n",
    "            \n",
    "            majority_class = node['majority_class']\n",
    "            node['splitting_feature'] = None\n",
    "            node['left'] = None\n",
    "            node['right'] = None\n",
    "            node['is_leaf'] = True\n",
    "            node['prediction'] = majority_class\n",
    "            \n",
    "            #print(tree)\n",
    "            \n",
    "            prediction = predict(tree,validation_data)\n",
    "            p_score = accuracy_score(validation_data[target],prediction)\n",
    "            print('替换后验证精度：',p_score)\n",
    "            \n",
    "            if p_score>np_score :\n",
    "                print('进行剪枝，将%s变为叶子结点'%(node_info['splitting_feature']))\n",
    "            else:\n",
    "                print('不剪枝')\n",
    "                node['splitting_feature'] = node_info['splitting_feature']\n",
    "                node['left'] = node_info['left']\n",
    "                node['right'] = node_info['right']\n",
    "                node['is_leaf'] = node_info['is_leaf']\n",
    "                node['prediction'] = node_info['prediction']\n",
    "                node['majority_class'] = node_info['majority_class']\n",
    "    return tree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_echarts_data(tree,count):\n",
    "    \n",
    "    # 当前顶点的dict\n",
    "    value = dict()\n",
    "    \n",
    "    # 如果传入的tree已经是叶子结点了\n",
    "    if tree['is_leaf'] == True:\n",
    "        count['left']=count['left']+1\n",
    "        # 它的value就设置为预测的标记\n",
    "        value['value'] = tree['prediction']\n",
    "        \n",
    "        # 它的名字就叫\"label: 标记\"\n",
    "        value['name'] = 'label: %s'%(tree['prediction'])\n",
    "        \n",
    "        # 直接返回这个dict即可\n",
    "        return value\n",
    "    \n",
    "    # 如果传入的tree不是叶子结点，名字就叫当前这个顶点的划分特征，子树是一个list\n",
    "    # 分别增加左子树和右子树到children中\n",
    "    value['name'] = tree['splitting_feature']\n",
    "    count['bifurcation']=count['bifurcation']+1\n",
    "    value['children'] = [generate_echarts_data(tree['left'],count), generate_echarts_data(tree['right'],count)]\n",
    "    return value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:lml.utils:failed to import pyecharts_snapshot\n",
      "Traceback (most recent call last):\n",
      "  File \"D:\\Users\\11979\\Anaconda3\\envs\\tens\\lib\\site-packages\\lml\\utils.py\", line 43, in do_import\n",
      "    plugin_module = __import__(plugin_module_name)\n",
      "ImportError: No module named 'pyecharts_snapshot'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'bifurcation': 44, 'left': 45}\n"
     ]
    }
   ],
   "source": [
    "# from pyecharts.charts import Tree\n",
    "from pyecharts import Tree\n",
    "np_count = dict()\n",
    "np_count['left']=0\n",
    "np_count['bifurcation'] = 0\n",
    "np_data = generate_echarts_data(tree_without_pruning,np_count)\n",
    "tree = Tree()\n",
    "tree.add(\"\",\n",
    "         [np_data],\n",
    "         collapse_interval=5,\n",
    "         pos_top=\"5%\",\n",
    "         pos_left=\"0%\",\n",
    "         symbol = 'rect',\n",
    "         symbol_size = 20\n",
    "         )\n",
    "print(np_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--------------------------------------------------\n",
      "Subtree, depth = 0 (73564 data points).\n",
      "Split on feature grade_F. (71229, 2335)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 1 (71229 data points).\n",
      "Split on feature grade_A. (57869, 13360)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (57869 data points).\n",
      "Split on feature grade_G. (57232, 637)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (57232 data points).\n",
      "Split on feature grade_E. (51828, 5404)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (51828 data points).\n",
      "Split on feature grade_D. (40326, 11502)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (40326 data points).\n",
      "Split on feature term_ 36 months. (5760, 34566)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (5760 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (34566 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (11502 data points).\n",
      "Split on feature term_ 36 months. (3315, 8187)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (3315 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (8187 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (5404 data points).\n",
      "Split on feature term_ 36 months. (3185, 2219)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (3185 data points).\n",
      "Split on feature home_ownership_OTHER. (3184, 1)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (3184 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (2219 data points).\n",
      "Split on feature emp_length_1 year. (2011, 208)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2011 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (208 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (637 data points).\n",
      "Split on feature emp_length_3 years. (590, 47)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (590 data points).\n",
      "Split on feature emp_length_2 years. (541, 49)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (541 data points).\n",
      "Split on feature home_ownership_OWN. (495, 46)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (495 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (46 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (49 data points).\n",
      "Split on feature term_ 36 months. (32, 17)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (32 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (17 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (47 data points).\n",
      "Split on feature home_ownership_OTHER. (46, 1)\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (46 data points).\n",
      "Split on feature home_ownership_OWN. (44, 2)\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (44 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (13360 data points).\n",
      "Split on feature term_ 36 months. (259, 13101)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (259 data points).\n",
      "Split on feature emp_length_9 years. (252, 7)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (252 data points).\n",
      "Split on feature home_ownership_RENT. (202, 50)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (202 data points).\n",
      "Split on feature emp_length_8 years. (192, 10)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (192 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (10 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (50 data points).\n",
      "Split on feature emp_length_4 years. (48, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (48 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (7 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (13101 data points).\n",
      "Split on feature home_ownership_MORTGAGE. (5830, 7271)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (5830 data points).\n",
      "Split on feature emp_length_7 years. (5592, 238)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (5592 data points).\n",
      "Split on feature emp_length_3 years. (5045, 547)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (5045 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (547 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (238 data points).\n",
      "Split on feature home_ownership_OWN. (184, 54)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (184 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (54 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (7271 data points).\n",
      "Split on feature emp_length_2 years. (6702, 569)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (6702 data points).\n",
      "Split on feature emp_length_4 years. (6234, 468)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (6234 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (468 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (569 data points).\n",
      "Split on feature emp_length_6 years. (569, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 1 (2335 data points).\n",
      "Split on feature emp_length_7 years. (2197, 138)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (2197 data points).\n",
      "Split on feature term_ 36 months. (1719, 478)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (1719 data points).\n",
      "Split on feature home_ownership_OTHER. (1717, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (1717 data points).\n",
      "Split on feature emp_length_3 years. (1577, 140)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1577 data points).\n",
      "Split on feature home_ownership_RENT. (904, 673)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (904 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (673 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (140 data points).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Split on feature home_ownership_RENT. (73, 67)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (73 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (67 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (2 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (478 data points).\n",
      "Split on feature emp_length_8 years. (460, 18)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (460 data points).\n",
      "Split on feature emp_length_4 years. (433, 27)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (433 data points).\n",
      "Split on feature home_ownership_MORTGAGE. (287, 146)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (287 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (146 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (27 data points).\n",
      "Split on feature home_ownership_OWN. (25, 2)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (25 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (2 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (18 data points).\n",
      "Split on feature home_ownership_OWN. (17, 1)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (17 data points).\n",
      "Split on feature home_ownership_RENT. (6, 11)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (6 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 6 (11 data points).\n",
      "Reached maximum depth. Stopping for now.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (1 data points).\n",
      "Stopping condition 1 reached.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 2 (138 data points).\n",
      "Split on feature term_ 36 months. (109, 29)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (109 data points).\n",
      "Split on feature home_ownership_RENT. (51, 58)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (51 data points).\n",
      "Split on feature home_ownership_OWN. (43, 8)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (43 data points).\n",
      "Split on feature emp_length_6 years. (43, 0)\n",
      "Creating leaf node.\n",
      "HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (8 data points).\n",
      "Split on feature emp_length_6 years. (8, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (58 data points).\n",
      "Split on feature emp_length_6 years. (58, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 3 (29 data points).\n",
      "Split on feature home_ownership_OWN. (25, 4)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (25 data points).\n",
      "Split on feature home_ownership_RENT. (12, 13)\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (12 data points).\n",
      "Split on feature emp_length_6 years. (12, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 5 (13 data points).\n",
      "Split on feature emp_length_6 years. (13, 0)\n",
      "Creating leaf node.\n",
      "--------------------------------------------------\n",
      "Subtree, depth = 4 (4 data points).\n",
      "Split on feature emp_length_6 years. (4, 0)\n",
      "Creating leaf node.\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147302312303739\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147302312303739\n",
      "替换后验证精度： 0.8147710126014437\n",
      "进行剪枝，将home_ownership_OWN变为叶子结点\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8147710126014437\n",
      "替换后验证精度： 0.8148117939725134\n",
      "进行剪枝，将home_ownership_OWN变为叶子结点\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8148117939725134\n",
      "替换后验证精度： 0.8148117939725134\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.8148117939725134\n",
      "替换后验证精度： 0.814852575343583\n",
      "进行剪枝，将home_ownership_OWN变为叶子结点\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.8147710126014437\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.814852575343583\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.8147710126014437\n",
      "不剪枝\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "未替换前验证精度： 0.814852575343583\n",
      "替换后验证精度： 0.8150564821989316\n",
      "进行剪枝，将home_ownership_RENT变为叶子结点\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "当前结点的多数label和他的左右孩子的多数标签是一样的,跳过验证\n",
      "{'bifurcation': 39, 'left': 40}\n",
      "根据两次对分叉结点的统计可以看出，确实进行了剪枝(由于数据问题，如果要通过看图判断的话，会看瞎眼的)\n"
     ]
    }
   ],
   "source": [
    "tree_with_pruning = no_pruning_decision_tree_create(train_data,validation_data, one_hot_features, target,index_tree, 'gain_ratio', max_depth = max_depth, annotate = False)\n",
    "tree_with_pruning = post_pruning(tree_with_pruning,index_tree,max_depth,validation_data,target)\n",
    "p_count =dict() \n",
    "p_count['left']=0\n",
    "p_count['bifurcation'] = 0\n",
    "p_data = generate_echarts_data(tree_with_pruning,p_count)\n",
    "print(p_count)\n",
    "if np_count['bifurcation']!=p_count['bifurcation']:\n",
    "    print(\"根据两次对分叉结点的统计可以看出，确实进行了剪枝(由于数据问题，如果要通过看图判断的话，会看瞎眼的)\")\n",
    "tree.add(\"\",\n",
    "         [p_data],\n",
    "         collapse_interval=5,\n",
    "         pos_top=\"55%\",\n",
    "         pos_left=\"0%\",\n",
    "         symbol = 'rect',\n",
    "         symbol_size = 20\n",
    "         )\n",
    "tree.render()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "不剪枝\n",
      "0.8088247288149417 0.8097397556890141 0.9984383658253992 0.8942429164410755\n",
      "后剪枝\n",
      "0.8091917461870973 0.8095821090434214 0.9993451211525868 0.8945102017810844\n"
     ]
    }
   ],
   "source": [
    "prediction = predict(tree_without_pruning,test_data)\n",
    "print(\"不剪枝\")\n",
    "print(accuracy_score(test_data['safe_loans'],prediction),precision_score(test_data['safe_loans'],prediction)\n",
    "      ,recall_score(test_data['safe_loans'],prediction),f1_score(test_data['safe_loans'],prediction))\n",
    "\n",
    "print(\"后剪枝\")\n",
    "prediction = predict(tree_with_pruning,test_data)\n",
    "print(accuracy_score(test_data['safe_loans'],prediction),precision_score(test_data['safe_loans'],prediction)\n",
    "      ,recall_score(test_data['safe_loans'],prediction),f1_score(test_data['safe_loans'],prediction))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###### 双击此处编辑\n",
    "\n",
    "模型|精度|查准率|查全率|F1\n",
    "-|-|-|-|-\n",
    "无后剪枝| 0.8088247288149417  | 0.8097397556890141  | 0.9984383658253992  | 0.8942429164410755\n",
    "有后剪枝| 0.8091917461870973   | 0.8095821090434214   | 0.9993451211525868   | 0.8945102017810844"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
