{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 回归问题\n",
    "- 根据特征X寻找切分点  \n",
    "    - 离散:直接进行分裂，计算每一个分裂点的Y的均值作为分裂的预测值，计算实际值-预测值的误差（均方误差），选取误差最小的分裂点  \n",
    "    - 连续:分别寻找X的间隔的中间数，作为分裂点，计算每一个分裂点的Y的均值作为分裂的预测值，计算实际值-预测值的误差（均方误差），选取误差最小的分裂点  \n",
    "- 计算y-pre的误差记为残差，用残差当新的y值进行下一次迭代（下一棵树）  \n",
    "    特征：X不变  \n",
    "    目标值：y = r残差 = y上一轮 - pre上一轮  \n",
    "- 重新寻找切分点  \n",
    "\n",
    "## 统计学习上关于提升树的例子\n",
    "x = [1,2,3,4,5,6,7,8,9,10]  \n",
    "y = [5.56,5.70,5.91,6.40,6.80,7.05,8.9,8.70,9.00,9.05]  \n",
    "\n",
    "## 两列特征\n",
    "x = [5,7,21,30]  年龄  \n",
    "x2 = [20,30,70,60] 体重  \n",
    "y = [1.1,1.3,1.7,1.8]   身高  \n",
    "当x_test = [7,30]时， y为多少  \n",
    "如果包含两列特征，需要逐个寻找每一列特征的最优分裂点，比较得出两者之间最优的。这里不修改findBestSplitPoint，在外面遍历最优分裂点"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 0 次迭代 误差： 0.024999999999999998 切分点 14.0 特征索引: 0 残差： [-0.1   0.1  -0.05  0.05]\n",
      "第 1 次迭代 误差： 0.011666666666666659 切分点 6.0 特征索引: 0 残差： [ 0.          0.06666667 -0.08333333  0.01666667]\n",
      "第 2 次迭代 误差： 0.002407407407407397 切分点 65.0 特征索引: 1 残差： [-0.02777778  0.03888889  0.         -0.01111111]\n",
      "第 3 次迭代 误差： 0.0013786008230452578 切分点 6.0 特征索引: 0 残差： [ 0.          0.02962963 -0.00925926 -0.02037037]\n",
      "第 4 次迭代 误差： 0.0005006858710562376 切分点 14.0 特征索引: 0 残差： [-0.01481481  0.01481481  0.00555556 -0.00555556]\n",
      "第 5 次迭代 误差： 0.00020804755372656405 切分点 6.0 特征索引: 0 残差： [ 0.          0.00987654  0.00061728 -0.01049383]\n",
      "第 0 棵树: 1.2000000000000002\n",
      "第 1 棵树: 0.03333333333333329\n",
      "第 2 棵树: 0.027777777777777773\n",
      "第 3 棵树: 0.009259259259259257\n",
      "第 4 棵树: 0.014814814814814767\n",
      "第 5 棵树: 0.004938271604938256\n",
      "年龄体重为[7, 30],y预测身高1.2901234567901234\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from sklearn.datasets import load_iris\n",
    "\n",
    "# 统计学习书上的例子\n",
    "# x = [1,2,3,4,5,6,7,8,9,10]\n",
    "# y = [5.56,5.70,5.91,6.40,6.80,7.05,8.9,8.70,9.00,9.05]\n",
    "# X_features= [x]\n",
    "\n",
    "# 身高体重的例子\n",
    "x1 = [5, 7, 21, 30]\n",
    "x2 = [20, 30, 70, 60]\n",
    "y = [1.1, 1.3, 1.7, 1.8]\n",
    "X_features = [x1, x2]\n",
    "x_array = np.array(X_features)\n",
    "y_array = np.array(y)\n",
    "\n",
    "'''\n",
    "针对离散数值，按照两数之间的间隔，寻找切分点的数组\n",
    "Arg:\n",
    "   x:特征list\n",
    "   [1,2,3,4,5,6,7,8,9,10]  \n",
    "return:\n",
    "  切分后的list\n",
    "  [1.5,2.5,3.5,4.5,6.5,7.5,8.5,9.5]    \n",
    "'''\n",
    "\n",
    "\n",
    "def find_split_point_array(x):\n",
    "    x = sorted(x)\n",
    "    split_point_array = []\n",
    "    for i in range(len(x) - 1):\n",
    "        split_point_array.append((x[i] + x[i + 1]) / 2)\n",
    "    return split_point_array\n",
    "\n",
    "\n",
    "##测试最优分裂点\n",
    "# x = [1,2,3,4,5,6,7,8,9,10]\n",
    "# find_split_point_array(x)\n",
    "\n",
    "def findBestSplitPoint(x_array, y_array):\n",
    "    minloss = np.inf\n",
    "    best_split_point = 0\n",
    "    best_c1 = 0\n",
    "    best_c2 = 0\n",
    "    best_R1 = 0\n",
    "    best_R2 = 0\n",
    "\n",
    "    for i in range(len(x_array)):\n",
    "        split_point_array = find_split_point_array(x_array[i])\n",
    "\n",
    "        # 遍历切分点\n",
    "        for c in split_point_array:\n",
    "            loss = 0\n",
    "            # 划分R1/R2区域\n",
    "            index_R1 = np.where(x_array[i] < c)\n",
    "            index_R2 = np.where(x_array[i] >= c)\n",
    "            R1 = y_array[index_R1]\n",
    "            R2 = y_array[index_R2]\n",
    "            c1 = np.mean(R1)\n",
    "            c2 = np.mean(R2)\n",
    "\n",
    "            # 计算均方误差最小的分裂点\n",
    "            loss1 = np.sum((R1 - c1) ** 2)\n",
    "            loss2 = np.sum((R2 - c2) ** 2)\n",
    "            loss = loss1 + loss2\n",
    "\n",
    "            if loss < minloss:\n",
    "                minloss = loss\n",
    "                best_split_point = c\n",
    "                best_c1 = c1\n",
    "                best_c2 = c2\n",
    "                bset_index_R1 = index_R1\n",
    "                bset_index_R2 = index_R2\n",
    "                feature_index = i\n",
    "\n",
    "    # 残值结果，用于下一次拟合\n",
    "    # 均方误差的负梯度是残差 y - f \n",
    "    #这里的f应该是前面所有树的和，但是这里只是本棵树，所以有问题。\n",
    "    residual_array = y_array\n",
    "    residual_array[bset_index_R1] = y_array[bset_index_R1] - best_c1\n",
    "    residual_array[bset_index_R2] = y_array[bset_index_R2] - best_c2\n",
    "\n",
    "    return minloss, best_split_point, residual_array, best_c1, best_c2, feature_index\n",
    "\n",
    "\n",
    "'''\n",
    "针对多个特征，计算模型输出值，这里模型多了一个特征索引值\n",
    "Args\n",
    "   x:输入的特征\n",
    "   model:list包含4个值\n",
    "          best_split_point:分裂点\n",
    "          best_c1:左节点小于分裂点\n",
    "          best_c2:右节点大于分裂点\n",
    "          index:特征索引\n",
    "return\n",
    "   预测值\n",
    "'''\n",
    "\n",
    "\n",
    "def T_feature_n(x, model):\n",
    "    # 根据切分点，计算预测值\n",
    "    best_split_point = model[0]\n",
    "    best_c1 = model[1]\n",
    "    best_c2 = model[2]\n",
    "    feature_index = model[3]\n",
    "    if x[feature_index] < best_split_point:\n",
    "        return best_c1\n",
    "    else:\n",
    "        return best_c2\n",
    "\n",
    "\n",
    "model_save = []\n",
    "maxiter = 6\n",
    "# 记录下该模型参数\n",
    "for iter_n in range(maxiter):\n",
    "    # 寻找一个分裂点，使得均方误差最小\n",
    "    minloss, best_split_point, residual_array, best_c1, best_c2, feature_index = findBestSplitPoint(x_array, y_array)\n",
    "    model_save.append([best_split_point, best_c1, best_c2, feature_index])\n",
    "    # 利用残差进行更新\n",
    "    y_array = residual_array\n",
    "    print('第', iter_n, '次迭代', '误差：', minloss, '切分点', best_split_point, '特征索引:', feature_index, '残差：', residual_array)\n",
    "\n",
    "# 初始化第0棵树值为0\n",
    "f = 0\n",
    "# 30岁，60斤 的身高是多少\n",
    "x_test = [7, 30]\n",
    "for i in range(len(model_save)):\n",
    "    print('第', i, '棵树:', T_feature_n(x_test, model_save[i]))\n",
    "    f += T_feature_n(x_test, model_save[i])\n",
    "print('年龄体重为{},y预测身高{}'.format(x_test, f))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 分类\n",
    "GBDT的分类算法从思想上和GBDT的回归算法没有区别，但是由于样本输出不是连续的值，而是离散的类别，导致我们无法直接从输出类别去拟合类别输出的误差。为了解决这个问题，主要有两个方法：  \n",
    "- 一个是用指数损失函数，此时GBDT退化为Adaboost算法（这里有疑问，两种算法的思想是不同的，但是在损失函数，计算公式上有相似）。  \n",
    "- 一种方法是用类似于逻辑回归的对数似然损失函数的方法。也就是说，我们用的是类别的预测概率值和真实概率值的差来拟合损失。\n",
    "\n",
    "## 二分类\n",
    "- 与回归类似，只不过需要对预测值进行一个sigmod转化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 181,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.48040220212170437 0.2897596342565125\n",
      "第 0 次迭代 误差： 6.3759296822177145 切分点 5.55 特征索引: 0\n",
      "0.5 0.5\n",
      "第 1 次迭代 误差： 0.0 切分点 4.35 特征索引: 0\n",
      "0.5 0.5\n",
      "第 2 次迭代 误差： 0.0 切分点 4.35 特征索引: 0\n",
      "0.5 0.5\n",
      "第 3 次迭代 误差： 0.0 切分点 4.35 特征索引: 0\n",
      "0.5 0.5\n",
      "第 4 次迭代 误差： 0.0 切分点 4.35 特征索引: 0\n",
      "0.5 0.5\n",
      "第 5 次迭代 误差： 0.0 切分点 4.35 特征索引: 0\n",
      "花的特征为[5.1 3.5],y预测分类0.0784313725490196\n",
      "花的特征为[4.9 3. ],y预测分类0.0784313725490196\n",
      "花的特征为[4.7 3.2],y预测分类0.0784313725490196\n",
      "花的特征为[4.6 3.1],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.6],y预测分类0.0784313725490196\n",
      "花的特征为[5.4 3.9],y预测分类0.0784313725490196\n",
      "花的特征为[4.6 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.4],y预测分类0.0784313725490196\n",
      "花的特征为[4.4 2.9],y预测分类0.0784313725490196\n",
      "花的特征为[4.9 3.1],y预测分类0.0784313725490196\n",
      "花的特征为[5.4 3.7],y预测分类0.0784313725490196\n",
      "花的特征为[4.8 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[4.8 3. ],y预测分类0.0784313725490196\n",
      "花的特征为[4.3 3. ],y预测分类0.0784313725490196\n",
      "花的特征为[5.8 4. ],y预测分类0.896551724137931\n",
      "花的特征为[5.7 4.4],y预测分类0.896551724137931\n",
      "花的特征为[5.4 3.9],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.5],y预测分类0.0784313725490196\n",
      "花的特征为[5.7 3.8],y预测分类0.896551724137931\n",
      "花的特征为[5.1 3.8],y预测分类0.0784313725490196\n",
      "花的特征为[5.4 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.7],y预测分类0.0784313725490196\n",
      "花的特征为[4.6 3.6],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.3],y预测分类0.0784313725490196\n",
      "花的特征为[4.8 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5. 3.],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5.2 3.5],y预测分类0.0784313725490196\n",
      "花的特征为[5.2 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[4.7 3.2],y预测分类0.0784313725490196\n",
      "花的特征为[4.8 3.1],y预测分类0.0784313725490196\n",
      "花的特征为[5.4 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5.2 4.1],y预测分类0.0784313725490196\n",
      "花的特征为[5.5 4.2],y预测分类0.0784313725490196\n",
      "花的特征为[4.9 3.1],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.2],y预测分类0.0784313725490196\n",
      "花的特征为[5.5 3.5],y预测分类0.0784313725490196\n",
      "花的特征为[4.9 3.6],y预测分类0.0784313725490196\n",
      "花的特征为[4.4 3. ],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.4],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.5],y预测分类0.0784313725490196\n",
      "花的特征为[4.5 2.3],y预测分类0.0784313725490196\n",
      "花的特征为[4.4 3.2],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.5],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.8],y预测分类0.0784313725490196\n",
      "花的特征为[4.8 3. ],y预测分类0.0784313725490196\n",
      "花的特征为[5.1 3.8],y预测分类0.0784313725490196\n",
      "花的特征为[4.6 3.2],y预测分类0.0784313725490196\n",
      "花的特征为[5.3 3.7],y预测分类0.0784313725490196\n",
      "花的特征为[5.  3.3],y预测分类0.0784313725490196\n",
      "花的特征为[7.  3.2],y预测分类0.896551724137931\n",
      "花的特征为[6.4 3.2],y预测分类0.896551724137931\n",
      "花的特征为[6.9 3.1],y预测分类0.896551724137931\n",
      "花的特征为[5.5 2.3],y预测分类0.0784313725490196\n",
      "花的特征为[6.5 2.8],y预测分类0.896551724137931\n",
      "花的特征为[5.7 2.8],y预测分类0.896551724137931\n",
      "花的特征为[6.3 3.3],y预测分类0.896551724137931\n",
      "花的特征为[4.9 2.4],y预测分类0.0784313725490196\n",
      "花的特征为[6.6 2.9],y预测分类0.896551724137931\n",
      "花的特征为[5.2 2.7],y预测分类0.0784313725490196\n",
      "花的特征为[5. 2.],y预测分类0.0784313725490196\n",
      "花的特征为[5.9 3. ],y预测分类0.896551724137931\n",
      "花的特征为[6.  2.2],y预测分类0.896551724137931\n",
      "花的特征为[6.1 2.9],y预测分类0.896551724137931\n",
      "花的特征为[5.6 2.9],y预测分类0.896551724137931\n",
      "花的特征为[6.7 3.1],y预测分类0.896551724137931\n",
      "花的特征为[5.6 3. ],y预测分类0.896551724137931\n",
      "花的特征为[5.8 2.7],y预测分类0.896551724137931\n",
      "花的特征为[6.2 2.2],y预测分类0.896551724137931\n",
      "花的特征为[5.6 2.5],y预测分类0.896551724137931\n",
      "花的特征为[5.9 3.2],y预测分类0.896551724137931\n",
      "花的特征为[6.1 2.8],y预测分类0.896551724137931\n",
      "花的特征为[6.3 2.5],y预测分类0.896551724137931\n",
      "花的特征为[6.1 2.8],y预测分类0.896551724137931\n",
      "花的特征为[6.4 2.9],y预测分类0.896551724137931\n",
      "花的特征为[6.6 3. ],y预测分类0.896551724137931\n",
      "花的特征为[6.8 2.8],y预测分类0.896551724137931\n",
      "花的特征为[6.7 3. ],y预测分类0.896551724137931\n",
      "花的特征为[6.  2.9],y预测分类0.896551724137931\n",
      "花的特征为[5.7 2.6],y预测分类0.896551724137931\n"
     ]
    }
   ],
   "source": [
    "from sklearn.datasets import load_iris\n",
    "def sigmoid(f):\n",
    "    return 1 / (1 + np.exp(f))\n",
    "\n",
    "\n",
    "'''\n",
    "针对离散数值，按照两数之间的间隔，寻找切分点的数组\n",
    "Arg:\n",
    "   x:特征list\n",
    "   [1,2,3,4,5,6,7,8,9,10]  \n",
    "return:\n",
    "  切分后的list\n",
    "  [1.5,2.5,3.5,4.5,6.5,7.5,8.5,9.5]    \n",
    "'''\n",
    "def find_split_point_array(x):\n",
    "    x = sorted(x)\n",
    "    split_point_array = []\n",
    "    for i in range(len(x) - 1):\n",
    "        split_point_array.append((x[i] + x[i + 1]) / 2)\n",
    "    return split_point_array\n",
    "\n",
    "'''\n",
    "针对多个特征，计算模型输出值，这里模型多了一个特征索引值\n",
    "Args\n",
    "   x:输入的特征\n",
    "   model:list包含4个值\n",
    "          best_split_point:分裂点\n",
    "          best_c1:左节点小于分裂点\n",
    "          best_c2:右节点大于分裂点\n",
    "          index:特征索引\n",
    "return\n",
    "   预测值\n",
    "'''\n",
    "def T_feature_n(x, model):\n",
    "    # 根据切分点，计算预测值\n",
    "    best_split_point = model[0]\n",
    "    best_c1 = model[1]\n",
    "    best_c2 = model[2]\n",
    "    feature_index = model[3]\n",
    "    if x[feature_index] < best_split_point:\n",
    "        return best_c1\n",
    "    else:\n",
    "        return best_c2\n",
    "\n",
    "def findBestSplitPoint(x_array, y_array):\n",
    "    minloss = np.inf\n",
    "    best_split_point = 0\n",
    "    best_c1 = 0\n",
    "    best_c2 = 0\n",
    "    best_R1 = 0\n",
    "    best_R2 = 0\n",
    "    # 可以加上学习率，缓慢降低。 学习率为1的话 ，相当于不加\n",
    "    learning_rate = 1\n",
    "    for i in range(len(x_array)):\n",
    "        split_point_array = find_split_point_array(x_array[i])\n",
    "\n",
    "        # 遍历切分点\n",
    "        for c in split_point_array:\n",
    "            loss = 0\n",
    "            # 划分R1/R2区域\n",
    "            index_R1 = np.where(x_array[i] < c)\n",
    "            index_R2 = np.where(x_array[i] >= c)\n",
    "            R1 = y_array[index_R1]\n",
    "            R2 = y_array[index_R2]\n",
    "            c1 = np.mean(R1)\n",
    "            c2 = np.mean(R2)\n",
    "\n",
    "            # 计算均方误差最小的分裂点\n",
    "            loss1 = np.sum((R1 - c1) ** 2)\n",
    "            loss2 = np.sum((R2 - c2) ** 2)\n",
    "            loss = loss1 + loss2\n",
    "\n",
    "            if loss < minloss:\n",
    "                minloss = loss\n",
    "                best_split_point = c\n",
    "                best_c1 = learning_rate * c1\n",
    "                best_c2 = learning_rate * c2\n",
    "                bset_index_R1 = index_R1\n",
    "                bset_index_R2 = index_R2\n",
    "                feature_index = i\n",
    "\n",
    "    # 残值结果，用于下一次拟合\n",
    "    # 均方误差的负梯度是残差 y - f\n",
    "    #这里可以抽出来一个方法 专门计算残差。需要传入模型（叶子结点、分裂点）、原始y、预测值（也就是叶子节点值）\n",
    "    residual_array = y_array\n",
    "    residual_array[bset_index_R1] = y_array[bset_index_R1] - sigmoid(best_c1)\n",
    "    residual_array[bset_index_R2] = y_array[bset_index_R2] - sigmoid(best_c2)\n",
    "    print(sigmoid(best_c1), sigmoid(best_c2))\n",
    "\n",
    "    return minloss, best_split_point, residual_array, best_c1, best_c2, feature_index\n",
    "\n",
    "\n",
    "X, y = load_iris(return_X_y=True)\n",
    "\n",
    "# 如果用前两个特征 并不能很好的将其进行分类， 加入第3 4 个特征可以很好的分类。这个跟花的数据有关，可以打印看看\n",
    "# 而且这里用的树比较简单，只分裂一次，树的高度相当于2。叶子结点为2\n",
    "x_array = X[:80, :2].T\n",
    "y_array = y[:80]\n",
    "\n",
    "model_save = []\n",
    "maxiter = 6\n",
    "# 记录下该模型参数\n",
    "for iter_n in range(maxiter):\n",
    "    # 寻找一个分裂点，使得均方误差最小\n",
    "    minloss, best_split_point, residual_array, best_c1, best_c2, feature_index = findBestSplitPoint(x_array, y_array)\n",
    "    model_save.append([best_split_point, best_c1, best_c2, feature_index])\n",
    "    # 利用残差进行更新\n",
    "    y_array += residual_array\n",
    "    print('第', iter_n, '次迭代', '误差：', minloss, '切分点', best_split_point, '特征索引:', feature_index)\n",
    "\n",
    "# 初始化第0棵树值为0\n",
    "\n",
    "# 30岁，60斤 的身高是多少\n",
    "for x_test in x_array.T:\n",
    "    f = 0\n",
    "    for i in range(len(model_save)):\n",
    "        # print('第',i,'棵树:',T_feature_n(x_test,model_save[i]))\n",
    "        f += T_feature_n(x_test, model_save[i])\n",
    "    print('花的特征为{},y预测分类{}'.format(x_test, f))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 多分类\n",
    "- 将标签转化成onehot格式，分别训练三棵树\n",
    "- 针对某一个样本，用三棵树分别预测，得到三个预测值，并将三个预测值结果用softmax函数转出成概率\n",
    "- 将转化的概率当成最终的预测值，y-f 得到残差进行下一轮循环"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.97260934 0.01369533 0.01369533]\n",
      "[0.97021863 0.01489068 0.01489068]\n",
      "[0.05292173 0.47353913 0.47353913]\n",
      "[0.05292173 0.47353913 0.47353913]\n",
      "[0.05292173 0.47353913 0.47353913]\n",
      "[0.05292173 0.47353913 0.47353913]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "def sigmoid(f):\n",
    "    return 1 / (1 + np.exp(f))\n",
    "\n",
    "'''\n",
    "针对多个特征，计算模型输出值，这里模型多了一个特征索引值\n",
    "Args\n",
    "   x:输入的特征\n",
    "   model:list包含4个值\n",
    "          best_split_point:分裂点\n",
    "          best_c1:左节点小于分裂点\n",
    "          best_c2:右节点大于分裂点\n",
    "          index:特征索引\n",
    "return\n",
    "   预测值\n",
    "'''\n",
    "\n",
    "\n",
    "def T_feature_n(x, model):\n",
    "    # 根据切分点，计算预测值\n",
    "    best_split_point = model[0]\n",
    "    best_c1 = model[1]\n",
    "    best_c2 = model[2]\n",
    "    feature_index = model[3]\n",
    "    if x[feature_index] < best_split_point:\n",
    "        return best_c1\n",
    "    else:\n",
    "        return best_c2\n",
    "\n",
    "\n",
    "# 针对多分类如何寻找分割点\n",
    "# 定义训练数据\n",
    "train_data = [[5.1,3.5,1.4,0.2],[4.9,3.0,1.4,0.2],[7.0,3.2,4.7,1.4],[6.4,3.2,4.5,1.5],[6.3,3.3,6.0,2.5],[5.8,2.7,5.1,1.9]]\n",
    "\n",
    "# 定义label\n",
    "label_data = [[1,0,0],[1,0,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]]\n",
    "raw_label = [[1,0,0],[1,0,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]]\n",
    "# index 表示的第几棵树\n",
    "def findBestLossAndSplit(train_data,label_data,index):\n",
    "    sample_numbers = len(label_data)\n",
    "    feature_numbers = len(train_data[0])\n",
    "    current_label = []\n",
    "\n",
    "    # define the minLoss\n",
    "    minLoss = 10000000\n",
    "\n",
    "    # feature represents the dimensions of the feature\n",
    "    feature = 0\n",
    "\n",
    "    # split represents the detail split value\n",
    "    split = 0\n",
    "\n",
    "    # get current label\n",
    "    for label_index in range(0,len(label_data)):\n",
    "        current_label.append(label_data[label_index][index])\n",
    "\n",
    "    # trans all features\n",
    "    for feature_index in range(0,feature_numbers):\n",
    "        ## current feature value\n",
    "        current_value = []\n",
    "\n",
    "        for sample_index in range(0,sample_numbers):\n",
    "            current_value.append(train_data[sample_index][feature_index])\n",
    "        L = 0\n",
    "        ## different split value\n",
    "        #print (current_value)\n",
    "        for index in range(0,len(current_value)):\n",
    "            R1 = []\n",
    "            R2 = []\n",
    "            y1 = 0\n",
    "            y2 = 0\n",
    "\n",
    "            for index_1 in range(0,len(current_value)):\n",
    "                if current_value[index_1] < current_value[index]:\n",
    "                    R1.append(index_1)\n",
    "                else:\n",
    "                    R2.append(index_1)\n",
    "\n",
    "            ## calculate the samples for first class\n",
    "            sum_y = 0\n",
    "            for index_R1 in R1:\n",
    "                sum_y += current_label[index_R1]\n",
    "            if len(R1) != 0:\n",
    "                y1 = float(sum_y) / float(len(R1))\n",
    "            else:\n",
    "                y1 = 0\n",
    "\n",
    "            ## calculate the samples for second class\n",
    "            sum_y = 0\n",
    "            for index_R2 in R2:\n",
    "                sum_y += current_label[index_R2]\n",
    "            if len(R2) != 0:\n",
    "                y2 = float(sum_y) / float(len(R2))\n",
    "            else:\n",
    "                y2 = 0\n",
    "\n",
    "            ## trans all samples to find minium loss and best split\n",
    "            for index_2 in range(0,len(current_value)):\n",
    "                if index_2 in R1:\n",
    "                    L += float((current_label[index_2]-y1))*float((current_label[index_2]-y1))\n",
    "                else:\n",
    "                    L += float((current_label[index_2]-y2))*float((current_label[index_2]-y2))\n",
    "\n",
    "            if L < minLoss:\n",
    "                feature = feature_index\n",
    "                split = current_value[index]\n",
    "                minLoss = L\n",
    "                best_c1 = y1\n",
    "                best_c2 = y2\n",
    "                bset_index_R1 = index_R1\n",
    "                bset_index_R2 = index_R2\n",
    "                \n",
    "    # 残值结果，用于下一次拟合\n",
    "    # 均方误差的负梯度是残差 y - f\n",
    "    #在多分类的时候，这里是否不再需要sigmod，而是针对三棵树进行softmax得到概率？\n",
    "#    residual_array = current_label\n",
    "#     residual_array[bset_index_R1] = current_label[bset_index_R1] - sigmoid(best_c1)\n",
    "#     residual_array[bset_index_R2] = current_label[bset_index_R2] - sigmoid(best_c2)\n",
    "\n",
    "    #print(residual_array)\n",
    "    return minLoss,split,feature,best_c1,best_c2,residual_array\n",
    "\n",
    "\n",
    "# muticlass = 3\n",
    "# tree_model = []\n",
    "# #分别训练三棵树，保存树的参数\n",
    "# for tree_index in range(muticlass):\n",
    "#     minLoss,best_split_point,feature_index,best_c1,best_c2,residual_array = findBestLossAndSplit(train_data,label_data,tree_index)\n",
    "#     tree_model.append([best_split_point, best_c1, best_c2, feature_index])\n",
    "# #利用训练好的三棵树，对每一个样本进行预测，并进行softmax\n",
    "# #用真实值与softmax后的概率进行相减，得到残差\n",
    "# #将残差作为新的label进行下一轮迭代\n",
    "# residual = []\n",
    "# for sample_index in range(len(train_data)):\n",
    "#     tree_pre = []\n",
    "#     for tree_index in range(muticlass):\n",
    "#         tree_pre.append(T_feature_n(train_data[sample_index], tree_model[tree_index]))\n",
    "#     softmax_pre = soft_max(tree_pre)\n",
    "#     residual_array = np.array(label_data[sample_index]) - softmax_pre\n",
    "#     #print(softmax_pre,label_data[sample_index])\n",
    "#     residual.append(residual_array.tolist())\n",
    "# print(residual)\n",
    "\n",
    "\n",
    "\n",
    "#训练\n",
    "boost_model = []\n",
    "maxiterate = 10\n",
    "for iterate in range(maxiterate):\n",
    "    muticlass = 3\n",
    "    tree_model = []\n",
    "    #分别训练三棵树，保存树的参数\n",
    "    for tree_index in range(muticlass):\n",
    "        minLoss,best_split_point,feature_index,best_c1,best_c2,residual_array = findBestLossAndSplit(train_data,label_data,tree_index)\n",
    "        tree_model.append([best_split_point, best_c1, best_c2, feature_index])\n",
    "    #利用训练好的三棵树，对每一个样本进行预测，并进行softmax\n",
    "    #用真实值与softmax后的概率进行相减，得到残差\n",
    "    #将残差作为新的label进行下一轮迭代\n",
    "    residual = []\n",
    "    for sample_index in range(len(train_data)):\n",
    "        tree_pre = []\n",
    "        for tree_index in range(muticlass):\n",
    "            tree_pre.append(T_feature_n(train_data[sample_index], tree_model[tree_index]))\n",
    "        softmax_pre = soft_max(tree_pre)\n",
    "        residual_array = np.array(raw_label[sample_index]) - softmax_pre\n",
    "        #print(softmax_pre,label_data[sample_index])\n",
    "        residual.append(residual_array.tolist())\n",
    "    #print(residual)\n",
    "    label_data = residual\n",
    "    boost_model.append(tree_model)\n",
    "\n",
    "\n",
    "#预测\n",
    "for sample_index in range(len(train_data)):\n",
    "    final_pre = np.array([0,0,0],np.float32)\n",
    "    for tree_model in boost_model:\n",
    "        tree_pre = []\n",
    "        for tree_index in range(muticlass):\n",
    "            tree_pre.append(T_feature_n(train_data[sample_index], tree_model[tree_index]))\n",
    "        #softmax_pre = soft_max(tree_pre)\n",
    "        final_pre  = final_pre + np.array(tree_pre)\n",
    "    print(soft_max(final_pre))\n",
    "\n",
    "#tree_pre    \n",
    "# for sample in train_data:\n",
    "#     for tree_index in range(muticlass):\n",
    "#         tree_pre.append(T_feature_n(sample, tree_model[tree_index]))\n",
    "# print(tree_pre)\n",
    "# tree_model   \n",
    "    \n",
    "    \n",
    " "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[5.1, 3.5, 1.4, 0.2]\n",
      "[4.9, 3.0, 1.4, 0.2]\n",
      "[7.0, 3.2, 4.7, 1.4]\n",
      "[6.4, 3.2, 4.5, 1.5]\n",
      "[6.3, 3.3, 6.0, 2.5]\n",
      "[5.8, 2.7, 5.1, 1.9]\n"
     ]
    }
   ],
   "source": [
    "for sample_index in range(len(train_data)):\n",
    "    print(train_data[sample_index])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[5.1, 3.5, 1.4, 0.2],\n",
       "       [4.9, 3. , 1.4, 0.2],\n",
       "       [7. , 3.2, 4.7, 1.4],\n",
       "       [6.4, 3.2, 4.5, 1.5],\n",
       "       [6.3, 3.3, 6. , 2.5],\n",
       "       [5.8, 2.7, 5.1, 1.9]])"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.array(train_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.57611688, 0.21194156, 0.21194156])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[1.0, 0.0, 0.0]\n",
    "def soft_max(z):\n",
    "    t = np.exp(z)\n",
    "    a = np.exp(z) / np.sum(t, axis=0)\n",
    "    return a\n",
    "soft_max([1.0, 0.0, 0.0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 137,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "train_data\n",
    "label_data\n",
    "split:分裂点\n",
    "feature:分裂的特征\n",
    "n:第几个f(x)\n",
    "'''\n",
    "def f(train_data,label_data,index,split,feature):\n",
    "#     train_data\n",
    "#     label_data\n",
    "#     #第1棵树\n",
    "#     index = 0\n",
    "#     #第\n",
    "#     feature_index = 0\n",
    "#     split = 5.1\n",
    "\n",
    "    sample_numbers = len(label_data)\n",
    "    feature_numbers = len(train_data[0])\n",
    "    current_label = []\n",
    "\n",
    "    # feature represents the dimensions of the feature\n",
    "    feature = feature_index\n",
    "\n",
    "    # split represents the detail split value\n",
    "    split = split\n",
    "\n",
    "    # get current label\n",
    "    for label_index in range(0, len(label_data)):\n",
    "        current_label.append(label_data[label_index][index])\n",
    "    #print(current_label)\n",
    "\n",
    "\n",
    "    ## current feature value\n",
    "    current_value = []\n",
    "\n",
    "    for sample_index in range(0,sample_numbers):\n",
    "        current_value.append(train_data[sample_index][feature_index])\n",
    "    L = 0\n",
    "    ## different split value\n",
    "    #print (current_value)\n",
    "\n",
    "    R1 = []\n",
    "    R2 = []\n",
    "    y1 = 0\n",
    "    y2 = 0\n",
    "\n",
    "    value_index = current_value.index(split)\n",
    "    for index_1 in range(0, len(current_value)):\n",
    "        if current_value[index_1] < current_value[value_index]:\n",
    "            R1.append(index_1)\n",
    "        else:\n",
    "            R2.append(index_1)\n",
    "    #print(R2)\n",
    "\n",
    "    ## calculate the samples for first class\n",
    "    sum_y = 0\n",
    "    for index_R1 in R1:\n",
    "        sum_y += current_label[index_R1]\n",
    "    if len(R1) != 0:\n",
    "        y1 = float(sum_y) / float(len(R1))\n",
    "    else:\n",
    "        y1 = 0\n",
    "\n",
    "    ## calculate the samples for second class\n",
    "    sum_y = 0\n",
    "    for index_R2 in R2:\n",
    "        sum_y += current_label[index_R2]\n",
    "    if len(R2) != 0:\n",
    "        y2 = float(sum_y) / float(len(R2))\n",
    "    else:\n",
    "        y2 = 0\n",
    "\n",
    "    f_n = y1*len(R1)+y2*len(R2) \n",
    "    #print(y1,y2,f_n)\n",
    "    return f_n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1.])"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "soft_max([-1.09961279])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-1.09961279, -1.09961279, -1.09961279],\n",
       "       [-1.09961279, -1.09961279, -1.09961279],\n",
       "       [-1.09961279, -1.09961279, -1.09961279],\n",
       "       [-1.09961279, -1.09961279, -1.09961279]])"
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "probas = [[0.333,0.333,0.333],[0.333,0.333,0.333],[0.333,0.333,0.333],[0.333,0.333,0.333]]\n",
    "eps = np.finfo(np.float32).eps\n",
    "#限制最大最小值\n",
    "probas = np.clip(probas, eps, 1 - eps)\n",
    "raw_predictions = np.log(probas).astype(np.float64)\n",
    "raw_predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-1.09861229, -1.09861229, -1.09861229, -1.09861229])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from scipy.special import logsumexp\n",
    "import numpy as np\n",
    "raw_predictions =np.array([[-1.09961279, -1.09961279, -1.09961279],[-1.09961279, -1.09961279, -1.09961279],[-1.09961279, -1.09961279, -1.09961279],[-1.09961279, -1.09961279, -1.09961279]]) \n",
    "raw_predictions[:, 0] -logsumexp(raw_predictions, axis=1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-1.09961279, -1.09961279, -1.09961279, -1.09961279])"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "raw_predictions[:, 0] "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-0.0010005, -0.0010005, -0.0010005, -0.0010005])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "logsumexp(raw_predictions, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in log\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "-inf"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.log(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "先计算raw_predict\n",
    "再计算prob = exp(raw_predict)\n",
    "再计算resiual = y - exp(raw_predict - logsumexp(raw_predict)) =  y-prob"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
       "       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
       "       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
       "       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
       "       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
       "       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.,\n",
       "       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
       "       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
       "       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X, y = load_iris(return_X_y=True)\n",
    "original_y = y\n",
    "np.array(original_y == 2,dtype=np.float64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GradientBoostingClassifier(criterion='friedman_mse', init=None,\n",
       "                           learning_rate=0.1, loss='deviance', max_depth=3,\n",
       "                           max_features=None, max_leaf_nodes=None,\n",
       "                           min_impurity_decrease=0.0, min_impurity_split=None,\n",
       "                           min_samples_leaf=1, min_samples_split=2,\n",
       "                           min_weight_fraction_leaf=0.0, n_estimators=100,\n",
       "                           n_iter_no_change=None, presort='auto',\n",
       "                           random_state=None, subsample=1.0, tol=0.0001,\n",
       "                           validation_fraction=0.1, verbose=0,\n",
       "                           warm_start=False)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "GradientBoostingClassifier()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## sklearn GradientBoostingRegressor源码分析\n",
    "\n",
    "1. 初始化参数，迭代次数\n",
    "2. 初始化预测值 -回归问题是均值raw_predictions\n",
    "3. boosting算法步骤  \n",
    "    3.1 for i in 迭代次数:\n",
    "           根据预测值得到残差，之后拟合一棵树\n",
    "           计算新的预测值 更新预测值，注意是所有树的求和，而不是当前这棵树！！！！\n",
    "    3.2 循环结束，保存每一棵树，保存初始化的预测值，用于预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 168,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((506, 13), (506,))"
      ]
     },
     "execution_count": 168,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.tree import DecisionTreeRegressor\n",
    "from sklearn.ensemble import GradientBoostingRegressor\n",
    "from sklearn.datasets import load_boston\n",
    "from sklearn.tree import DecisionTreeRegressor\n",
    "import numpy as np\n",
    "X, y = load_boston(return_X_y=True)\n",
    "X.shape,y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 174,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_init_raw_predictions(X,y):\n",
    "    constant_ = np.average(y, axis=0)\n",
    "    raw_predictions = y.copy()\n",
    "    raw_predictions[:] = constant_\n",
    "    return raw_predictions\n",
    "\n",
    "def negative_gradient(y, raw_predictions):\n",
    "    return y - raw_predictions.ravel()\n",
    "\n",
    "def loss_(y, raw_predictions):\n",
    "    return np.mean((y - raw_predictions.ravel())** 2)\n",
    "\n",
    "#更新预测值，注意是所有树的求和，而不是当前这棵树！！！！\n",
    "def update_terminal_regions(tree, X, y, residual, raw_predictions):\n",
    "    pre = tree.predict(X)\n",
    "    raw_predictions +=  pre\n",
    "    return raw_predictions\n",
    "\n",
    "def predict(X,init_raw_predictions,estimators_):\n",
    "    #初始化-均值\n",
    "    raw_predictions = np.full((X.shape[0]),init_raw_predictions[0])\n",
    "    for tree in estimators_:\n",
    "        pre = tree.predict(X.astype(np.float32))\n",
    "        raw_predictions+= pre\n",
    "    return raw_predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 175,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练误差逐渐减小： [46.19909168 31.57830339 28.84084433 25.38314259 23.19508896 20.92134251]\n",
      "\n",
      "预测结果： [23.19416871 23.19416871 40.69684411 40.69684411 35.29381512 23.19416871\n",
      " 23.19416871 15.18681546 15.18681546 15.18681546]\n"
     ]
    }
   ],
   "source": [
    "#初始化参数，迭代次数\n",
    "n_estimators = 6\n",
    "train_score_ = []\n",
    "estimators_ = DecisionTreeRegressor()\n",
    "estimators_ = np.resize(estimators_,n_estimators)\n",
    "train_score_ = np.resize(train_score_,n_estimators)\n",
    "\n",
    "# 初始化 -回归问题是均值 记录下来在预测时可以使用\n",
    "raw_predictions = get_init_raw_predictions(X,y)\n",
    "init_raw_predictions = raw_predictions.copy()\n",
    "\n",
    "#boosting算法\n",
    "for iteration in range(n_estimators):\n",
    "    raw_predictions_copy = raw_predictions.copy()\n",
    "    residual = negative_gradient(y, raw_predictions_copy)\n",
    "\n",
    "    #利用残差拟合一棵树,最简单的决策树有两个节点  大于  小于\n",
    "    tree = DecisionTreeRegressor(max_leaf_nodes=2)\n",
    "    tree.fit(X, residual)\n",
    "\n",
    "    #根据构建好的树更新 raw_predictions\n",
    "    raw_predictions = update_terminal_regions(tree, X, y, residual, raw_predictions)\n",
    "   \n",
    "    #记录树的结构\n",
    "    estimators_[iteration] = tree\n",
    "    #print(tree.tree_.value)\n",
    "    \n",
    "    #计算总的损失 均方误差\n",
    "    train_score_[iteration] = loss_(y, raw_predictions)\n",
    "    \n",
    "print('训练误差逐渐减小：',train_score_)\n",
    "print('\\n预测结果：',predict(X[:10,:],init_raw_predictions,estimators_).ravel())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 160,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练误差逐渐减小： [6.25000000e-03 2.91666667e-03 6.01851852e-04 3.44650206e-04\n",
      " 1.25171468e-04 5.20118884e-05]\n",
      "\n",
      "预测结果： [1.1        1.29012346 1.69938272 1.81049383]\n"
     ]
    }
   ],
   "source": [
    "#其他例子\n",
    "x1 = [5, 7, 21, 30]\n",
    "x2 = [20, 30, 70, 60]\n",
    "y = [1.1, 1.3, 1.7, 1.8]\n",
    "X = np.array([x1,x2]).T.reshape(4,2)\n",
    "y = np.array([1.1, 1.3, 1.7, 1.8])\n",
    "\n",
    "# X = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(10,-1)\n",
    "# y = np.array([5.56,5.70,5.91,6.40,6.80,7.05,8.9,8.70,9.00,9.05])\n",
    "# 初始化 -回归问题是均值 记录下来在预测时可以使用\n",
    "raw_predictions = get_init_raw_predictions(X,y)\n",
    "init_raw_predictions = raw_predictions.copy()\n",
    "\n",
    "#boosting算法\n",
    "for iteration in range(n_estimators):\n",
    "    raw_predictions_copy = raw_predictions.copy()\n",
    "    residual = negative_gradient(y, raw_predictions_copy)\n",
    "\n",
    "    #利用残差拟合一棵树,最简单的决策树有两个节点  大于  小于\n",
    "    tree = DecisionTreeRegressor(max_leaf_nodes=2)\n",
    "    tree.fit(X, residual)\n",
    "\n",
    "    #根据构建好的树更新 raw_predictions\n",
    "    raw_predictions = update_terminal_regions(tree, X, y, residual, raw_predictions)\n",
    "   \n",
    "    #记录树的结构\n",
    "    estimators_[iteration] = tree\n",
    "    #print(tree.tree_.value)\n",
    "    \n",
    "    #计算总的损失 均方误差\n",
    "    train_score_[iteration] = loss_(y, raw_predictions)\n",
    "    \n",
    "print('训练误差逐渐减小：',train_score_)\n",
    "print('\\n预测结果：',predict(X,init_raw_predictions,estimators_).ravel())\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## sklearn GradientBoostingClassifier 分类源码分析\n",
    "\n",
    "1. 初始化参数，迭代次数\n",
    "2. 初始化预测值 -回归问题是均值raw_predictions\n",
    "3. boosting算法步骤  \n",
    "    3.1 for i in 迭代次数:\n",
    "           根据预测值得到残差，之后拟合一棵树\n",
    "           \n",
    "           计算新的预测值 这里预测值不再是树的叶子节点均值（叶子节点的值其实是残差的均值），\n",
    "           而是通过update_terminal_region计算得到的，相当于一个二阶导数的加权。得到新的叶子结点值后，更新预测值\n",
    "           \n",
    "           更新预测值，注意是所有树的求和，而不是当前这棵树！！！！\n",
    "           \n",
    "           计算训练的损失，这里损失是logloss，参考loss_\n",
    "    3.2 循环结束，保存每一棵树，保存初始化的预测值，用于预测\n",
    "\n",
    "#### 回归、分类对比\n",
    "1. 相较于回归，最关键的一点在于求负梯度时，分类需要用logit（sigmoid）转化一下成概率\n",
    "2. 分类更新叶子节点时，利用了二阶导数加权，非常类似于XGB，回归直接求平均\n",
    "3. 在计算代价函数（所有样本都计算叫代价函数，单个样本计算叫损失函数）时，回归利用了均方差损失函数，分类利用了交叉熵损失函数（对数损失函数）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 176,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.datasets import load_iris\n",
    "from sklearn.ensemble import GradientBoostingRegressor\n",
    "from sklearn.datasets import load_boston\n",
    "from sklearn.tree import DecisionTreeRegressor\n",
    "import numpy as np\n",
    "from scipy.special import expit\n",
    "from sklearn.tree._tree import TREE_LEAF\n",
    "\n",
    "\n",
    "# log(x / (1 - x)) is the inverse of the sigmoid (expit) function\n",
    "#如果给定的标签值不是0,1，会根据类别标记索引\n",
    "# proba_pos_class  这里会根据先验概率计算得到    prior = pos/(pos+neg)\n",
    "def get_init_raw_predictions(X,y):\n",
    "    classes_, y = np.unique(y, return_inverse=True)\n",
    "    y = y.astype(np.float64())\n",
    "    pos = len(y[np.where(y == 1)])\n",
    "    prior_1 = pos/len(y)\n",
    "    proba_pos_class = y.copy()\n",
    "    proba_pos_class[:] = prior_1\n",
    "    #根据先验概率，利用sigmoid反函数将概率转化成预测值\n",
    "    #log(x / (1 - x)) 是  1/(1+exp(-x))的反函数\n",
    "    raw_predictions = np.log(proba_pos_class / (1 - proba_pos_class))\n",
    "    return raw_predictions\n",
    "\n",
    "\n",
    "# 计算负梯度\n",
    "# expit(x) = 1/(1+exp(-x))\n",
    "def negative_gradient(y,raw_predictions):\n",
    "    return y - expit(raw_predictions.ravel())\n",
    "\n",
    "# Compute the deviance (= 2 * negative log-likelihood).\n",
    "# logloss = - y*logp - (1-y)log(1-p)     这里传进来的是raw_predictions=f   p = 1/(1+exp(-f)) \n",
    "#可以将上式带入，化简得到该式子可以推导下  - (yf-log(1+exp(f)))\n",
    "# logaddexp(0, v) == log(1.0 + exp(v))\n",
    "def loss_(y,raw_predictions):\n",
    "    return  -2 * np.mean((y * raw_predictions) -np.logaddexp(0, raw_predictions))\n",
    "    \n",
    "\n",
    "#更新预测值，注意是所有树的求和，而不是当前这棵树！！！！\n",
    "def update_terminal_regions(tree, X, y, residual, raw_predictions):\n",
    "\n",
    "    terminal_regions = tree.apply(X.astype(np.float32))\n",
    "\n",
    "    # mask all which are not in sample mask.\n",
    "    sample_mask = np.ones((len(y), ), dtype=np.bool)    \n",
    "    masked_terminal_regions = terminal_regions.copy()\n",
    "    masked_terminal_regions[~sample_mask] = -1\n",
    "    \n",
    "    #可以对比下更新叶子节点前后的变化\n",
    "    #print(tree.value[:, 0, 0].take(terminal_regions, axis=0))\n",
    "    \n",
    "    # update each leaf (= perform line search) 更新的叶子节点，不会直接以预测值返回结果\n",
    "    for leaf in np.where(tree.children_left == TREE_LEAF)[0]:\n",
    "        tree= update_terminal_region(tree, masked_terminal_regions,\n",
    "                                     leaf, X, y, residual,\n",
    "                                     raw_predictions )\n",
    "        \n",
    "    #print(tree.value[:, 0, 0].take(terminal_regions, axis=0))\n",
    "    raw_predictions += tree.value[:, 0, 0].take(terminal_regions, axis=0)\n",
    "    return raw_predictions\n",
    "\n",
    "\n",
    "'''\n",
    "Make a single Newton-Raphson step.\n",
    "value = sum((y - prob)) / sum(prob * (1 - prob))\n",
    "y - prob = residual\n",
    "\n",
    "p是预测的概率  p = logit(raw_pre)\n",
    "logloss = -ylogp-(1-y)log(1-p)\n",
    "一阶导数G:   p-y\n",
    "二阶导数H： p(1-p)\n",
    "value的值如上面公式所示，是关于1介导和2介导的一个比值 -G/H。\n",
    "在XGB中使用的就是1、2介导的并加上正则化 w =  G/(H+α)\n",
    "所以GBDT中value的返回值并不是直接返回resudial，而是相当于做了一个加权\n",
    "'''\n",
    "def update_terminal_region(tree, terminal_regions, leaf, X, y, residual, raw_predictions):\n",
    "    terminal_region = np.where(terminal_regions == leaf)[0]\n",
    "    residual = residual.take(terminal_region, axis=0)\n",
    "    y = y.take(terminal_region, axis=0)\n",
    "\n",
    "    numerator = np.sum(residual)\n",
    "    denominator = np.sum((y - residual) * (1 - y + residual))\n",
    "\n",
    "    # prevents overflow and division by zero\n",
    "    if abs(denominator) < 1e-150:\n",
    "        tree.value[leaf, 0, 0] = 0.0\n",
    "    else:\n",
    "        tree.value[leaf, 0, 0] = numerator / denominator\n",
    "    return tree\n",
    "\n",
    "#预测\n",
    "def predict(X,init_raw_predictions,estimators_):\n",
    "    #初始化-均值\n",
    "    raw_predictions = np.full((X.shape[0]),init_raw_predictions[0])\n",
    "    for tree in estimators_:\n",
    "        pre = tree.predict(X)\n",
    "        raw_predictions+= pre\n",
    "    return raw_predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 179,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练误差逐渐减小： [0.62563623 0.35924393 0.25562034 0.19766221 0.15276796 0.10551893]\n",
      "\n",
      "预测结果： [0.03972771 0.03972771 0.03972771 0.03972771 0.03972771 0.00477442\n",
      " 0.03972771 0.03972771 0.04572219 0.03972771 0.03972771 0.03972771\n",
      " 0.03972771 0.00233191 0.13598437 0.13598437 0.00477442 0.03972771\n",
      " 0.13598437 0.00477442 0.03972771 0.03972771 0.03972771 0.03972771\n",
      " 0.03972771 0.03972771 0.03972771 0.03972771 0.03972771 0.03972771\n",
      " 0.03972771 0.03972771 0.00477442 0.00477442 0.03972771 0.03972771\n",
      " 0.03972771 0.03972771 0.00233191 0.03972771 0.03972771 0.29029874\n",
      " 0.00233191 0.03972771 0.00477442 0.03972771 0.00477442 0.03972771\n",
      " 0.03972771 0.03972771 0.92052158 0.92052158 0.92052158 0.87864195\n",
      " 0.99580567 0.96530476 0.92052158 0.87864195 0.99580567 0.87864195\n",
      " 0.87864195 0.92052158 0.99950688 0.99580567 0.96530476 0.92052158\n",
      " 0.57578134 0.99580759 0.99950688 0.99580759 0.92052158 0.99580567\n",
      " 0.99950688 0.99580567 0.99580567 0.92052158 0.99580567 0.92052158\n",
      " 0.99580567 0.99580759]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "X, y = load_iris(return_X_y=True)\n",
    "X = X[:80, :2]\n",
    "y = y[:80]\n",
    "\n",
    "#初始化参数，迭代次数\n",
    "begin_at_stage = 0\n",
    "n_estimators = 6\n",
    "train_score_ = []\n",
    "estimators_ = DecisionTreeRegressor()\n",
    "estimators_ = np.resize(estimators_,n_estimators)\n",
    "train_score_ = np.resize(train_score_,n_estimators)\n",
    "\n",
    "# 初始化 -回归问题是均值，分类问题可以根据先验概率得到\n",
    "raw_predictions = get_init_raw_predictions(X,y)\n",
    "init_raw_predictions = raw_predictions.copy()\n",
    "\n",
    "#boosting步骤    \n",
    "for iteration in range(n_estimators):\n",
    "    #根据预测值计算残差\n",
    "    raw_predictions_copy = raw_predictions.copy()\n",
    "    residual = negative_gradient(y, raw_predictions_copy)\n",
    "\n",
    "    #利用残差拟合一棵树,最简单的决策树有两个节点  大于  小于\n",
    "    tree = DecisionTreeRegressor(max_leaf_nodes=2)\n",
    "    tree.fit(X, residual)\n",
    "\n",
    "    #根据构建好的树更新 raw_predictions\n",
    "    raw_predictions = update_terminal_regions(tree.tree_, X, y, residual, raw_predictions)\n",
    "   \n",
    "    #记录树的结构\n",
    "    estimators_[iteration] = tree\n",
    "    #print(tree.tree_.value)\n",
    "    \n",
    "    #计算总的损失 均方误差\n",
    "    train_score_[iteration] = loss_(y, raw_predictions)\n",
    "    \n",
    "\n",
    "print('训练误差逐渐减小：',train_score_)\n",
    "print('\\n预测结果：',expit(predict(X,init_raw_predictions,estimators_)).ravel())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
