{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "96d0444f-57b5-4e54-b96f-d291e1b78648",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.cm as cm\n",
    "import seaborn as sns"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4ef9269f-7a9b-4fcf-a2ad-a74ed66af783",
   "metadata": {},
   "source": [
    "## Load Boston dataset from skit-learn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "129f6ed5-72c6-4429-8c1a-4a06043f7c9a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.datasets import load_boston\n",
    "boston = load_boston()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "12b3fc08-d88f-407a-9f42-5b37b93ada35",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename'])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boston.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7a745bb9-1175-460a-9bcf-c566d10206c5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(506, 13)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boston.data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e20d1221-892f-4ace-b8f4-15f412b128c7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(13,)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boston.feature_names.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "3dc694c6-11d0-4553-8a9e-fecb7c575674",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(506,)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boston.target.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "3ac87e5c-5c0d-45b0-ad5a-535839eefcbc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\".. _boston_dataset:\\n\\nBoston house prices dataset\\n---------------------------\\n\\n**Data Set Characteristics:**  \\n\\n    :Number of Instances: 506 \\n\\n    :Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.\\n\\n    :Attribute Information (in order):\\n        - CRIM     per capita crime rate by town\\n        - ZN       proportion of residential land zoned for lots over 25,000 sq.ft.\\n        - INDUS    proportion of non-retail business acres per town\\n        - CHAS     Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\\n        - NOX      nitric oxides concentration (parts per 10 million)\\n        - RM       average number of rooms per dwelling\\n        - AGE      proportion of owner-occupied units built prior to 1940\\n        - DIS      weighted distances to five Boston employment centres\\n        - RAD      index of accessibility to radial highways\\n        - TAX      full-value property-tax rate per $10,000\\n        - PTRATIO  pupil-teacher ratio by town\\n        - B        1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\\n        - LSTAT    % lower status of the population\\n        - MEDV     Median value of owner-occupied homes in $1000's\\n\\n    :Missing Attribute Values: None\\n\\n    :Creator: Harrison, D. and Rubinfeld, D.L.\\n\\nThis is a copy of UCI ML housing dataset.\\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/housing/\\n\\n\\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\\n\\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\\nprices and the demand for clean air', J. Environ. Economics & Management,\\nvol.5, 81-102, 1978.   Used in Belsley, Kuh & Welsch, 'Regression diagnostics\\n...', Wiley, 1980.   N.B. Various transformations are used in the table on\\npages 244-261 of the latter.\\n\\nThe Boston house-price data has been used in many machine learning papers that address regression\\nproblems.   \\n     \\n.. topic:: References\\n\\n   - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\\n   - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.\\n\""
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "boston.DESCR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "bc59ef48-381c-4256-aadc-b5405ea82e33",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Convert Numpy array to Pandas Data Frame\n",
    "dataset = pd.DataFrame(boston.data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "a3ca76e6-99a9-4d7c-a9a1-31c8b4d80929",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "      CRIM    ZN  INDUS  CHAS    NOX     RM    AGE     DIS  RAD    TAX  \\\n",
      "0  0.00632  18.0   2.31   0.0  0.538  6.575   65.2  4.0900  1.0  296.0   \n",
      "1  0.02731   0.0   7.07   0.0  0.469  6.421   78.9  4.9671  2.0  242.0   \n",
      "2  0.02729   0.0   7.07   0.0  0.469  7.185   61.1  4.9671  2.0  242.0   \n",
      "3  0.03237   0.0   2.18   0.0  0.458  6.998   45.8  6.0622  3.0  222.0   \n",
      "4  0.06905   0.0   2.18   0.0  0.458  7.147   54.2  6.0622  3.0  222.0   \n",
      "5  0.02985   0.0   2.18   0.0  0.458  6.430   58.7  6.0622  3.0  222.0   \n",
      "6  0.08829  12.5   7.87   0.0  0.524  6.012   66.6  5.5605  5.0  311.0   \n",
      "7  0.14455  12.5   7.87   0.0  0.524  6.172   96.1  5.9505  5.0  311.0   \n",
      "8  0.21124  12.5   7.87   0.0  0.524  5.631  100.0  6.0821  5.0  311.0   \n",
      "9  0.17004  12.5   7.87   0.0  0.524  6.004   85.9  6.5921  5.0  311.0   \n",
      "\n",
      "   PTRATIO       B  LSTAT  \n",
      "0     15.3  396.90   4.98  \n",
      "1     17.8  396.90   9.14  \n",
      "2     17.8  392.83   4.03  \n",
      "3     18.7  394.63   2.94  \n",
      "4     18.7  396.90   5.33  \n",
      "5     18.7  394.12   5.21  \n",
      "6     15.2  395.60  12.43  \n",
      "7     15.2  396.90  19.15  \n",
      "8     15.2  386.63  29.93  \n",
      "9     15.2  386.71  17.10  \n"
     ]
    }
   ],
   "source": [
    "# Add Column Names in the Dataset\n",
    "dataset.columns = boston.feature_names\n",
    "print(dataset.head(10))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fd5353b5-ed3e-486e-9595-d60d87517278",
   "metadata": {},
   "source": [
    "CRIM：城镇人均犯罪率。\n",
    "\n",
    "ZN：城镇超过25000平方英尺的住宅区域的占地比例。\n",
    "\n",
    "INDUS：城镇非零售用地占地比例。\n",
    "\n",
    "CHAS：是否靠近河边，1为靠近，0为远离。\n",
    "\n",
    "NOX：一氧化氮浓度\n",
    "\n",
    "RM：每套房产的平均房间个数。\n",
    "\n",
    "AGE：在1940年之前就盖好，且业主自住的房子的比例。\n",
    "\n",
    "DIS：与波士顿市中心的距离。\n",
    "\n",
    "RAD：周边高速公路的便利性指数。\n",
    "\n",
    "TAX：每10000美元的财产税率。\n",
    "\n",
    "PTRATIO：小学老师的比例。\n",
    "\n",
    "B：城镇黑人的比例。\n",
    "\n",
    "LSTAT：地位较低的人口比例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "1fc17d6d-b6db-4aac-98a7-f8b8abe0d520",
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset['MEDV'] = boston.target"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "39750979-8b59-493c-859f-4de373a0212a",
   "metadata": {},
   "source": [
    "# 1. 数据分析与可视化"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bf09bad8-f7ea-40da-825c-d5258db00caa",
   "metadata": {},
   "source": [
    "## 皮尔森Pearson相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "67d8d329-8802-46b1-b5c8-57a466ad7a9a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "aeca3a921132419995a6d3e2485c0ee4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "[Text(0.5, 0, 'CRIM'),\n",
       " Text(1.5, 0, 'ZN'),\n",
       " Text(2.5, 0, 'INDUS'),\n",
       " Text(3.5, 0, 'CHAS'),\n",
       " Text(4.5, 0, 'NOX'),\n",
       " Text(5.5, 0, 'RM'),\n",
       " Text(6.5, 0, 'AGE'),\n",
       " Text(7.5, 0, 'DIS'),\n",
       " Text(8.5, 0, 'RAD'),\n",
       " Text(9.5, 0, 'TAX'),\n",
       " Text(10.5, 0, 'PTRATIO'),\n",
       " Text(11.5, 0, 'B'),\n",
       " Text(12.5, 0, 'LSTAT'),\n",
       " Text(13.5, 0, 'MEDV')]"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "import palettable\n",
    "fig = plt.figure(figsize=(10,7.5))\n",
    "ax = sns.heatmap(dataset.corr(method = \"pearson\"), \n",
    "                annot=True, \n",
    "                cmap=palettable.cmocean.diverging.Curl_10.mpl_colors,\n",
    "                linewidths=0.5,\n",
    "                linecolor='w')\n",
    "ax.set_title('Pearson Heat Map', fontsize=20)\n",
    "ax.set_xticklabels(ax.get_xticklabels(), rotation=45)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4d97b618-119d-4387-9f19-b30f85b6d620",
   "metadata": {},
   "source": [
    "## Spearman相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "64bdb106-bb64-41f7-a0ab-370070fa3f10",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ac0b43c448b34e8dae4eff5d571edc09",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "[Text(0.5, 0, 'CRIM'),\n",
       " Text(1.5, 0, 'ZN'),\n",
       " Text(2.5, 0, 'INDUS'),\n",
       " Text(3.5, 0, 'CHAS'),\n",
       " Text(4.5, 0, 'NOX'),\n",
       " Text(5.5, 0, 'RM'),\n",
       " Text(6.5, 0, 'AGE'),\n",
       " Text(7.5, 0, 'DIS'),\n",
       " Text(8.5, 0, 'RAD'),\n",
       " Text(9.5, 0, 'TAX'),\n",
       " Text(10.5, 0, 'PTRATIO'),\n",
       " Text(11.5, 0, 'B'),\n",
       " Text(12.5, 0, 'LSTAT'),\n",
       " Text(13.5, 0, 'MEDV')]"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "fig = plt.figure(figsize=(10,7.5))\n",
    "ax = sns.heatmap(dataset.corr(method = \"spearman\"), \n",
    "                annot=True, \n",
    "                 cmap='Blues',\n",
    "                linewidths=0.5,\n",
    "                linecolor='w')\n",
    "ax.set_title('Spearman Heat Map', fontsize=20)\n",
    "ax.set_xticklabels(ax.get_xticklabels(), rotation=45)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "44bac94b-855f-4168-8192-2e8a4496dd26",
   "metadata": {},
   "source": [
    "## Kendall相关性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 400,
   "id": "2f008eb6-cc9f-47fd-bcde-1ae7fbf6f327",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "2aeaa0629e6e43a9a28b219830071750",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "[Text(0.5, 0, 'CRIM'),\n",
       " Text(1.5, 0, 'ZN'),\n",
       " Text(2.5, 0, 'INDUS'),\n",
       " Text(3.5, 0, 'CHAS'),\n",
       " Text(4.5, 0, 'NOX'),\n",
       " Text(5.5, 0, 'RM'),\n",
       " Text(6.5, 0, 'AGE'),\n",
       " Text(7.5, 0, 'DIS'),\n",
       " Text(8.5, 0, 'RAD'),\n",
       " Text(9.5, 0, 'TAX'),\n",
       " Text(10.5, 0, 'PTRATIO'),\n",
       " Text(11.5, 0, 'B'),\n",
       " Text(12.5, 0, 'LSTAT'),\n",
       " Text(13.5, 0, 'MEDV')]"
      ]
     },
     "execution_count": 400,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "fig = plt.figure(figsize=(10,8))\n",
    "ax = sns.heatmap(dataset.corr(method = \"kendall\"), \n",
    "                annot=True, \n",
    "                linewidths=0.5,\n",
    "                linecolor='w')\n",
    "ax.set_title('Kendall Heat Map', fontsize=20)\n",
    "ax.set_xticklabels(ax.get_xticklabels(), rotation=45)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c80d2021-49a5-4562-93be-a3365993da7f",
   "metadata": {},
   "source": [
    "- Pearson相关系数是在原始数据的方差和协方差基础上计算得到，所以对离群值比较敏感，它度量的是线性相关。因此，即使Pearson相关系数为0，也只能说明变量之间不存在线性相关，但仍有可能存在曲线相关。\n",
    "- Spearman相关系数和Kendall相关系数都是建立在秩和观测值的相对大小的基础上得到，是一种更为一般性的非参数方法，对离群值的敏感度较低，因而也更具有耐受性，度量的主要是变量之间的联系。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "b18b26ac-67a1-419e-b295-fd6527d5a803",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b49ed7eace28419a8991336e7143057e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "<seaborn.axisgrid.PairGrid at 0x2c90fd42400>"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "g = sns.PairGrid(dataset.iloc[:,::2])\n",
    "g.map_diag(sns.distplot)\n",
    "g.map_upper(plt.scatter)\n",
    "g.map_lower(sns.kdeplot)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "da974307-373e-4816-9738-d4439b980558",
   "metadata": {},
   "source": [
    "## 房价MEDV与各变量的关系"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "61bc1703-6cc1-47c3-b3b1-d56b7b5983f3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "066a1269b4dc4dcea1bc7346c63e842f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "Text(0.5, 1.0, 'Correlations between MEDV and variables')"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "fig = plt.figure(figsize=(10,7.5))\n",
    "dataset.corr()['MEDV'].sort_values(ascending = False).plot(kind='bar')\n",
    "plt.title(\"Correlations between MEDV and variables\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "31e7b85e-9d97-429e-ae77-0f516100f935",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "460566e15b2b47c9b822d47e0a22d81a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "Text(0.5, 1, '各属性与房价的关系')"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "plt.rcParams[\"font.family\"] = 'SimHei'  # 将字体改为中文\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 设置了中文字体默认后，坐标的\"-\"号无法显示，设置这个参数就可以避免\n",
    "plt.figure(figsize=(16, 10))  # 设置绘图尺寸\n",
    "column = len(dataset.columns)\n",
    "\n",
    "for i in range(column-1):\n",
    "    plt.subplot(4, 4, (i + 1))\n",
    "    plt.scatter(dataset.iloc[:,i], dataset['MEDV'], edgecolors='w', alpha=0.7)\n",
    "    plt.xlabel('{}'.format(dataset.columns[i]), fontsize=10)  # 设置x轴标签文本\n",
    "    plt.ylabel('房价', fontsize=10)\n",
    "    plt.title('波士顿房价与{}关系'.format(dataset.columns[i]), fontsize=10)  # 设置图标题\n",
    "\n",
    "plt.tight_layout(rect=[0,0,1,0.9])                                   #   优化子图与总标题的位置，防止重叠\n",
    "plt.suptitle('各属性与房价的关系', x=0.5, y=1, fontsize=20)          #   设置总标题\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "24575ecc-7e7b-4cd9-80a6-68d79e9f7b12",
   "metadata": {},
   "source": [
    "## 单变量的分布"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "11f9d664-e20d-4d7a-bcd1-743ef0c3ec45",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "aed90aba69494631a3d43b173c647fd2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "Text(0, 0.5, 'Frequency')"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "fig = plt.figure(figsize=(10,7.5))\n",
    "plt.grid(axis='y', alpha=0.5)\n",
    "ax = sns.distplot(dataset['MEDV'], bins=30, hist_kws=dict(edgecolor=\"w\", linewidth=2))\n",
    "ax.set_title('Histogram', fontsize=20)\n",
    "ax.set_xlabel('MEDV or Price', fontsize=20)\n",
    "ax.set_ylabel('Frequency', fontsize=20)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "96d985cc-e8bd-4c54-b3bd-8b7ec90332e5",
   "metadata": {},
   "source": [
    "# 2. 准备数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 500,
   "id": "20c3ac8c-4f15-4e56-b790-49dd1cab2f74",
   "metadata": {},
   "outputs": [],
   "source": [
    "X=dataset.iloc[:,dataset.keys()!='MEDV']\n",
    "# Z-SCORE标准化\n",
    "X=(X-X.mean())/(X.std())  \n",
    "Y = dataset['MEDV'] "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 501,
   "id": "9f960415-7c92-4e46-bb47-aa271f396280",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X Train:  (404, 13)\n",
      "Y Train:  (404,)\n",
      "X Test:  (102, 13)\n",
      "Y Test:  (102,)\n"
     ]
    }
   ],
   "source": [
    "# 划分数据集\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)\n",
    "print(\"X Train: \", X_train.shape)\n",
    "print(\"Y Train: \", Y_train.shape)\n",
    "print(\"X Test: \", X_test.shape)\n",
    "print(\"Y Test: \", Y_test.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a24c3c67-d0d4-441d-9871-826a1e672226",
   "metadata": {},
   "source": [
    "# 3. 建立模型"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7bb0bafd-6da5-4292-b5a8-dc939a46bf4b",
   "metadata": {},
   "source": [
    "## 3.1 多变量线性回归 LinearRegression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 502,
   "id": "0dd0a3d2-9966-43f6-bffa-d6844d4640d2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LinearRegression()"
      ]
     },
     "execution_count": 502,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.linear_model import LinearRegression\n",
    "lr = LinearRegression()\n",
    "lr.fit(X_train, Y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 503,
   "id": "45f44d10-b037-4ac4-a44a-92feac3f45a0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[-1.0273982   1.0443783   0.03763083  0.59455017 -1.86836579  2.60580253\n",
      " -0.0878549  -2.91935098  2.12612403 -1.85216165 -2.2643624   0.74041111\n",
      " -3.51906316]\n",
      "22.480352884751223\n"
     ]
    }
   ],
   "source": [
    "# coef_存放回归系数，intercept_则存放截距\n",
    "print(lr.coef_)\n",
    "print(lr.intercept_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afaf063f-8901-42c5-a090-1daa0abf6bc9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 504,
   "id": "20c8474a-0504-4bc5-994c-9b2cd7d1d9fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测\n",
    "Y_pred = lr.predict(X_test) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 505,
   "id": "ef39f121-9d67-4e10-b674-e77c294cd278",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_lr = pd.DataFrame(X_test)\n",
    "model_lr['MEDV'] = Y_test\n",
    "model_lr['Predicted MEDV'] = Y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 506,
   "id": "3fb712bc-7978-4cda-aef3-d00d3c717d03",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>CRIM</th>\n",
       "      <th>ZN</th>\n",
       "      <th>INDUS</th>\n",
       "      <th>CHAS</th>\n",
       "      <th>NOX</th>\n",
       "      <th>RM</th>\n",
       "      <th>AGE</th>\n",
       "      <th>DIS</th>\n",
       "      <th>RAD</th>\n",
       "      <th>TAX</th>\n",
       "      <th>PTRATIO</th>\n",
       "      <th>B</th>\n",
       "      <th>LSTAT</th>\n",
       "      <th>MEDV</th>\n",
       "      <th>Predicted MEDV</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>329</th>\n",
       "      <td>-0.412284</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-1.151075</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.817198</td>\n",
       "      <td>0.068836</td>\n",
       "      <td>-1.825115</td>\n",
       "      <td>0.674147</td>\n",
       "      <td>-0.637331</td>\n",
       "      <td>0.129128</td>\n",
       "      <td>-0.718509</td>\n",
       "      <td>0.203034</td>\n",
       "      <td>-0.744016</td>\n",
       "      <td>22.6</td>\n",
       "      <td>24.889638</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>371</th>\n",
       "      <td>0.653229</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>0.658496</td>\n",
       "      <td>-0.097684</td>\n",
       "      <td>1.116390</td>\n",
       "      <td>-1.247058</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>0.103795</td>\n",
       "      <td>-0.437339</td>\n",
       "      <td>50.0</td>\n",
       "      <td>23.721411</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>219</th>\n",
       "      <td>-0.406819</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>0.401324</td>\n",
       "      <td>3.664771</td>\n",
       "      <td>-0.040517</td>\n",
       "      <td>0.125766</td>\n",
       "      <td>0.846397</td>\n",
       "      <td>-0.205034</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.784617</td>\n",
       "      <td>-0.949462</td>\n",
       "      <td>0.406003</td>\n",
       "      <td>-0.301505</td>\n",
       "      <td>23.0</td>\n",
       "      <td>29.364999</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>403</th>\n",
       "      <td>2.463299</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>1.193543</td>\n",
       "      <td>-1.331642</td>\n",
       "      <td>0.974288</td>\n",
       "      <td>-0.993604</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>0.440616</td>\n",
       "      <td>0.996622</td>\n",
       "      <td>8.3</td>\n",
       "      <td>12.122386</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>78</th>\n",
       "      <td>-0.413538</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>0.246813</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-1.015684</td>\n",
       "      <td>-0.074912</td>\n",
       "      <td>-0.528437</td>\n",
       "      <td>0.578929</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.060741</td>\n",
       "      <td>0.112920</td>\n",
       "      <td>0.325604</td>\n",
       "      <td>-0.043840</td>\n",
       "      <td>21.2</td>\n",
       "      <td>21.443823</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>56</th>\n",
       "      <td>-0.417713</td>\n",
       "      <td>3.157316</td>\n",
       "      <td>-1.515487</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-1.248688</td>\n",
       "      <td>0.139999</td>\n",
       "      <td>-1.167895</td>\n",
       "      <td>2.560921</td>\n",
       "      <td>-0.867024</td>\n",
       "      <td>-0.565081</td>\n",
       "      <td>-0.533747</td>\n",
       "      <td>0.440616</td>\n",
       "      <td>-0.963871</td>\n",
       "      <td>24.7</td>\n",
       "      <td>25.442171</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>455</th>\n",
       "      <td>0.132400</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>1.366138</td>\n",
       "      <td>0.342100</td>\n",
       "      <td>0.636797</td>\n",
       "      <td>-0.645503</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>-3.349082</td>\n",
       "      <td>0.766964</td>\n",
       "      <td>14.1</td>\n",
       "      <td>15.571783</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>60</th>\n",
       "      <td>-0.402742</td>\n",
       "      <td>0.584688</td>\n",
       "      <td>-0.875579</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.877607</td>\n",
       "      <td>-0.773728</td>\n",
       "      <td>-0.084369</td>\n",
       "      <td>1.629074</td>\n",
       "      <td>-0.177944</td>\n",
       "      <td>-0.737150</td>\n",
       "      <td>0.574826</td>\n",
       "      <td>0.421009</td>\n",
       "      <td>0.069589</td>\n",
       "      <td>18.7</td>\n",
       "      <td>17.937195</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>213</th>\n",
       "      <td>-0.403765</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-0.079701</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.566935</td>\n",
       "      <td>0.128613</td>\n",
       "      <td>-1.288681</td>\n",
       "      <td>0.071405</td>\n",
       "      <td>-0.637331</td>\n",
       "      <td>-0.778684</td>\n",
       "      <td>0.066730</td>\n",
       "      <td>0.319141</td>\n",
       "      <td>-0.458344</td>\n",
       "      <td>28.1</td>\n",
       "      <td>25.305888</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>108</th>\n",
       "      <td>-0.405218</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-0.375604</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.299411</td>\n",
       "      <td>0.269515</td>\n",
       "      <td>1.013366</td>\n",
       "      <td>-0.646880</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.143809</td>\n",
       "      <td>1.129112</td>\n",
       "      <td>0.422433</td>\n",
       "      <td>-0.053642</td>\n",
       "      <td>19.8</td>\n",
       "      <td>22.373233</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>102 rows × 15 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         CRIM        ZN     INDUS      CHAS       NOX        RM       AGE  \\\n",
       "329 -0.412284 -0.487240 -1.151075 -0.272329 -0.817198  0.068836 -1.825115   \n",
       "371  0.653229 -0.487240  1.014995 -0.272329  0.658496 -0.097684  1.116390   \n",
       "219 -0.406819 -0.487240  0.401324  3.664771 -0.040517  0.125766  0.846397   \n",
       "403  2.463299 -0.487240  1.014995 -0.272329  1.193543 -1.331642  0.974288   \n",
       "78  -0.413538 -0.487240  0.246813 -0.272329 -1.015684 -0.074912 -0.528437   \n",
       "..        ...       ...       ...       ...       ...       ...       ...   \n",
       "56  -0.417713  3.157316 -1.515487 -0.272329 -1.248688  0.139999 -1.167895   \n",
       "455  0.132400 -0.487240  1.014995 -0.272329  1.366138  0.342100  0.636797   \n",
       "60  -0.402742  0.584688 -0.875579 -0.272329 -0.877607 -0.773728 -0.084369   \n",
       "213 -0.403765 -0.487240 -0.079701 -0.272329 -0.566935  0.128613 -1.288681   \n",
       "108 -0.405218 -0.487240 -0.375604 -0.272329 -0.299411  0.269515  1.013366   \n",
       "\n",
       "          DIS       RAD       TAX   PTRATIO         B     LSTAT  MEDV  \\\n",
       "329  0.674147 -0.637331  0.129128 -0.718509  0.203034 -0.744016  22.6   \n",
       "371 -1.247058  1.659603  1.529413  0.805778  0.103795 -0.437339  50.0   \n",
       "219 -0.205034 -0.522484 -0.784617 -0.949462  0.406003 -0.301505  23.0   \n",
       "403 -0.993604  1.659603  1.529413  0.805778  0.440616  0.996622   8.3   \n",
       "78   0.578929 -0.522484 -0.060741  0.112920  0.325604 -0.043840  21.2   \n",
       "..        ...       ...       ...       ...       ...       ...   ...   \n",
       "56   2.560921 -0.867024 -0.565081 -0.533747  0.440616 -0.963871  24.7   \n",
       "455 -0.645503  1.659603  1.529413  0.805778 -3.349082  0.766964  14.1   \n",
       "60   1.629074 -0.177944 -0.737150  0.574826  0.421009  0.069589  18.7   \n",
       "213  0.071405 -0.637331 -0.778684  0.066730  0.319141 -0.458344  28.1   \n",
       "108 -0.646880 -0.522484 -0.143809  1.129112  0.422433 -0.053642  19.8   \n",
       "\n",
       "     Predicted MEDV  \n",
       "329       24.889638  \n",
       "371       23.721411  \n",
       "219       29.364999  \n",
       "403       12.122386  \n",
       "78        21.443823  \n",
       "..              ...  \n",
       "56        25.442171  \n",
       "455       15.571783  \n",
       "60        17.937195  \n",
       "213       25.305888  \n",
       "108       22.373233  \n",
       "\n",
       "[102 rows x 15 columns]"
      ]
     },
     "execution_count": 506,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e37dd0e4-479f-4d8b-b6f8-8d23735396b7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 507,
   "id": "76396985-ebcd-4eaf-aeda-578c8179899c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mse:33.44897999767656,mae:3.8429092204444997\n"
     ]
    }
   ],
   "source": [
    "# Get Mean Squared Error (MSE)\n",
    "from sklearn.metrics import mean_squared_error\n",
    "mse = mean_squared_error(Y_test, Y_pred)\n",
    "# Get Mean Absolute Error (MAE)\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "mae = mean_absolute_error(Y_test, Y_pred)\n",
    "print(f\"mse:{mse},mae:{mae}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9cd3f500-f5ba-4d92-9b4e-364f801817f3",
   "metadata": {},
   "source": [
    "线性回归模型常用的优化方法，包括增加多项式特征以及数据归一化处理等\n",
    "## 3.2 多项式回归-非线性 y=wx+b -> y=wx+w1x^2 +w2x^3+…… "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 508,
   "id": "629103d1-cf57-494b-a731-a67699648bc5",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.preprocessing import PolynomialFeatures\n",
    "from sklearn.pipeline import Pipeline\n",
    "def polynomial_model(degree=1):\n",
    "    polynomial_features = PolynomialFeatures(degree=degree,include_bias=False)\n",
    "    linear_regression = LinearRegression(normalize=True)\n",
    "    # 这是一个流水线，先增加多项式阶数，然后再用线性回归算法来拟合数据\n",
    "    pipeline = Pipeline([(\"polynomial_features\", polynomial_features),\n",
    "                         (\"linear_regression\", linear_regression)])\n",
    "    return pipeline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 509,
   "id": "c2984092-11e6-41bb-9d28-7bd0dbf4bb3f",
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 510,
   "id": "3a353549-b5ee-416e-ba9f-33fdc9ebaa2c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Pipeline(steps=[('polynomial_features', PolynomialFeatures(include_bias=False)),\n",
       "                ('linear_regression', LinearRegression(normalize=True))])"
      ]
     },
     "execution_count": 510,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "poly_model = polynomial_model(degree=2)\n",
    "poly_model.fit(X_train, Y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 511,
   "id": "a8449d29-a257-46bd-9ec9-62f1bd34338d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测\n",
    "Y_pred = poly_model.predict(X_test) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 512,
   "id": "4adfccde-4b13-4df3-9877-d2443bbbdc16",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_poly = pd.DataFrame(X_test)\n",
    "model_poly['MEDV'] = Y_test\n",
    "model_poly['Predicted MEDV'] = Y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 513,
   "id": "eeff6c4f-da10-47dd-acb5-e5e629819967",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mse:31.277814971446688,mae:3.3277633001008353\n"
     ]
    }
   ],
   "source": [
    "# Get Mean Squared Error (MSE)\n",
    "from sklearn.metrics import mean_squared_error\n",
    "mse = mean_squared_error(Y_test, Y_pred)\n",
    "# Get Mean Absolute Error (MAE)\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "mae = mean_absolute_error(Y_test, Y_pred)\n",
    "print(f\"mse:{mse},mae:{mae}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "67083c58-59b2-4ecf-862e-d16f51e96ffd",
   "metadata": {},
   "source": [
    "## 3.3 神经网络+ensemble\n",
    "https://ensemble-pytorch.readthedocs.io/en/stable/introduction.html"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 514,
   "id": "ad92576d-cfd5-4046-af59-dd2e60e77f4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "from torchensemble import GradientBoostingRegressor,BaggingRegressor,FusionRegressor,VotingRegressor,SnapshotEnsembleRegressor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 515,
   "id": "533dbb28-5ad1-40ea-8915-74b2dd27c1cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1.定义一个model\n",
    "class MLP(nn.Module):\n",
    "\n",
    "    def __init__(self):\n",
    "        super(MLP, self).__init__()\n",
    "        self.linear1 = nn.Linear(13, 128)\n",
    "        self.linear2 = nn.Linear(128, 128)\n",
    "        self.linear3 = nn.Linear(128, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.view(x.size()[0], -1)\n",
    "        x = F.relu(self.linear1(x))\n",
    "        x = F.relu(self.linear2(x))\n",
    "        x = self.linear3(x)\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 516,
   "id": "8bb4b05f-089d-4c3d-a3b7-f9b9e7013002",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. Define the ensemble\n",
    "mlp_model = BaggingRegressor(\n",
    "    estimator=MLP,\n",
    "    n_estimators=10,\n",
    "    cuda=False,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 517,
   "id": "f7598049-578d-4276-ad54-d695593fbff9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3.准备数据\n",
    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 518,
   "id": "262441ad-3f21-40e6-a705-f207dc367fbc",
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train=torch.FloatTensor(np.array(X_train))\n",
    "X_test=torch.FloatTensor(np.array(X_test))\n",
    "Y_train=torch.FloatTensor(np.array(Y_train)).reshape(-1, 1)\n",
    "Y_test=torch.FloatTensor(np.array(Y_test)).reshape(-1, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 519,
   "id": "915a394c-ac30-4a10-8b05-a122c415392d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([404, 13])"
      ]
     },
     "execution_count": 519,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 520,
   "id": "d4e86f9e-a4f7-4f0b-846a-d6ba221bb7fe",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([404, 1])"
      ]
     },
     "execution_count": 520,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 521,
   "id": "c1565332-ebcd-4b3b-bcf6-73d2e142c535",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Tensor -> Data loader\n",
    "train_data = TensorDataset(X_train, Y_train)\n",
    "train_loader = DataLoader(train_data, batch_size=10, shuffle=True)\n",
    "\n",
    "test_data = TensorDataset(X_test, Y_test)\n",
    "test_loader = DataLoader(test_data, batch_size=10, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 522,
   "id": "ccfd53cd-41d5-4f51-a9a3-c9a63c15630e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 4. 设置优化器optimizer\n",
    "lr = 1e-3\n",
    "weight_decay = 5e-4\n",
    "mlp_model.set_optimizer(\"Adam\", lr=lr, weight_decay=weight_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 523,
   "id": "94fbbd44-8573-430a-aa64-05f7ae4159c0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 492.57272\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 661.58588\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 841.02472\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 675.25623\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 460.38776\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 487.92099\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 756.50970\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 803.36945\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 842.34357\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 893.55109\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 295.17859\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 453.42401\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 296.46353\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 249.19002\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 166.94260\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 239.17291\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 321.86261\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 345.22263\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 380.51654\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 380.81436\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 64.55321\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 44.42136\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 78.72710\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 18.25661\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 32.36075\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 3.71953\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 21.79659\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 96.84765\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 63.58706\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 6.67329\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 10.81219\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 19.53244\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 6.35377\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 10.78554\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 15.42299\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 8.51373\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 19.43707\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 21.75904\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 25.13419\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 17.54169\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 3.44173\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 67.90717\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 11.53385\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 13.69804\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 24.59277\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 10.56685\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 83.96107\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 151.87674\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 4.57750\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 1.33000\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 42.79034\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 22.91041\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 4.38579\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 21.14622\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 5.50317\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 9.54070\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 13.34257\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 4.60881\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 6.46028\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 7.01033\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 6.87150\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 9.99544\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 14.20729\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 11.26515\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 7.88688\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 12.05886\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 10.21295\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 25.20183\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 5.84007\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 15.83749\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 2.92548\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 9.49381\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 1.72904\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 10.82793\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 8.77388\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 13.83766\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 86.60300\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 8.21410\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 6.13435\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 8.55173\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 15.31661\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 82.04492\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 12.54836\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 5.99385\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 11.32175\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 6.83679\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 4.83037\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 11.23558\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 6.24137\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 6.13086\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 4.50770\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 37.04900\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 3.61345\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 12.74670\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 8.82194\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 4.59518\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 16.33167\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 6.15108\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 7.51778\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 5.45424\n"
     ]
    }
   ],
   "source": [
    "# 5. 开始训练\n",
    "mlp_model.fit(train_loader,epochs=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 524,
   "id": "e67c5bb6-bde7-4813-aac9-b6dbd8f1aad4",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "testing_mse = mlp_model.evaluate(test_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 525,
   "id": "181eb328-6843-4f56-b6a2-67cbc8af2658",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "26.197526411576703"
      ]
     },
     "execution_count": 525,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "testing_mse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 526,
   "id": "e65be3f8-b8bd-434e-a305-053a28486396",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测\n",
    "Y_pred = mlp_model.predict(X_test) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 527,
   "id": "409f0518-050f-4bab-9ef9-5f1cc4f54b48",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([102, 1])"
      ]
     },
     "execution_count": 527,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_pred.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 528,
   "id": "8c5d632f-6522-4533-bf6b-8a4aee9e8931",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_MLP_vote = pd.DataFrame(np.array(X_test))\n",
    "model_MLP_vote['MEDV'] = np.array(Y_test)\n",
    "model_MLP_vote['Predicted MEDV'] = np.array(Y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 529,
   "id": "22fbb4c5-69a8-4be4-be86-7c257cce8561",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "      <th>4</th>\n",
       "      <th>5</th>\n",
       "      <th>6</th>\n",
       "      <th>7</th>\n",
       "      <th>8</th>\n",
       "      <th>9</th>\n",
       "      <th>10</th>\n",
       "      <th>11</th>\n",
       "      <th>12</th>\n",
       "      <th>MEDV</th>\n",
       "      <th>Predicted MEDV</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>-0.412284</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-1.151075</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.817198</td>\n",
       "      <td>0.068836</td>\n",
       "      <td>-1.825115</td>\n",
       "      <td>0.674147</td>\n",
       "      <td>-0.637331</td>\n",
       "      <td>0.129128</td>\n",
       "      <td>-0.718509</td>\n",
       "      <td>0.203034</td>\n",
       "      <td>-0.744016</td>\n",
       "      <td>22.600000</td>\n",
       "      <td>25.338652</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.653229</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>0.658496</td>\n",
       "      <td>-0.097684</td>\n",
       "      <td>1.116390</td>\n",
       "      <td>-1.247058</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>0.103795</td>\n",
       "      <td>-0.437339</td>\n",
       "      <td>50.000000</td>\n",
       "      <td>22.484280</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-0.406819</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>0.401324</td>\n",
       "      <td>3.664771</td>\n",
       "      <td>-0.040517</td>\n",
       "      <td>0.125766</td>\n",
       "      <td>0.846397</td>\n",
       "      <td>-0.205034</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.784617</td>\n",
       "      <td>-0.949462</td>\n",
       "      <td>0.406003</td>\n",
       "      <td>-0.301505</td>\n",
       "      <td>23.000000</td>\n",
       "      <td>27.051666</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>2.463299</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>1.193543</td>\n",
       "      <td>-1.331642</td>\n",
       "      <td>0.974288</td>\n",
       "      <td>-0.993604</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>0.440616</td>\n",
       "      <td>0.996622</td>\n",
       "      <td>8.300000</td>\n",
       "      <td>11.203319</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-0.413538</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>0.246813</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-1.015684</td>\n",
       "      <td>-0.074912</td>\n",
       "      <td>-0.528437</td>\n",
       "      <td>0.578929</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.060741</td>\n",
       "      <td>0.112920</td>\n",
       "      <td>0.325604</td>\n",
       "      <td>-0.043840</td>\n",
       "      <td>21.200001</td>\n",
       "      <td>18.238495</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>97</th>\n",
       "      <td>-0.417713</td>\n",
       "      <td>3.157316</td>\n",
       "      <td>-1.515487</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-1.248688</td>\n",
       "      <td>0.139999</td>\n",
       "      <td>-1.167894</td>\n",
       "      <td>2.560921</td>\n",
       "      <td>-0.867024</td>\n",
       "      <td>-0.565081</td>\n",
       "      <td>-0.533747</td>\n",
       "      <td>0.440616</td>\n",
       "      <td>-0.963871</td>\n",
       "      <td>24.700001</td>\n",
       "      <td>25.831537</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>98</th>\n",
       "      <td>0.132400</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>1.014995</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>1.366138</td>\n",
       "      <td>0.342100</td>\n",
       "      <td>0.636797</td>\n",
       "      <td>-0.645503</td>\n",
       "      <td>1.659603</td>\n",
       "      <td>1.529413</td>\n",
       "      <td>0.805778</td>\n",
       "      <td>-3.349082</td>\n",
       "      <td>0.766964</td>\n",
       "      <td>14.100000</td>\n",
       "      <td>14.295522</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>99</th>\n",
       "      <td>-0.402742</td>\n",
       "      <td>0.584688</td>\n",
       "      <td>-0.875579</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.877607</td>\n",
       "      <td>-0.773728</td>\n",
       "      <td>-0.084369</td>\n",
       "      <td>1.629074</td>\n",
       "      <td>-0.177944</td>\n",
       "      <td>-0.737150</td>\n",
       "      <td>0.574826</td>\n",
       "      <td>0.421009</td>\n",
       "      <td>0.069589</td>\n",
       "      <td>18.700001</td>\n",
       "      <td>15.542188</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>100</th>\n",
       "      <td>-0.403765</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-0.079701</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.566935</td>\n",
       "      <td>0.128613</td>\n",
       "      <td>-1.288681</td>\n",
       "      <td>0.071405</td>\n",
       "      <td>-0.637331</td>\n",
       "      <td>-0.778684</td>\n",
       "      <td>0.066730</td>\n",
       "      <td>0.319141</td>\n",
       "      <td>-0.458344</td>\n",
       "      <td>28.100000</td>\n",
       "      <td>23.538509</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>101</th>\n",
       "      <td>-0.405218</td>\n",
       "      <td>-0.487240</td>\n",
       "      <td>-0.375604</td>\n",
       "      <td>-0.272329</td>\n",
       "      <td>-0.299411</td>\n",
       "      <td>0.269515</td>\n",
       "      <td>1.013366</td>\n",
       "      <td>-0.646880</td>\n",
       "      <td>-0.522484</td>\n",
       "      <td>-0.143809</td>\n",
       "      <td>1.129112</td>\n",
       "      <td>0.422433</td>\n",
       "      <td>-0.053642</td>\n",
       "      <td>19.799999</td>\n",
       "      <td>21.733047</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>102 rows × 15 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            0         1         2         3         4         5         6  \\\n",
       "0   -0.412284 -0.487240 -1.151075 -0.272329 -0.817198  0.068836 -1.825115   \n",
       "1    0.653229 -0.487240  1.014995 -0.272329  0.658496 -0.097684  1.116390   \n",
       "2   -0.406819 -0.487240  0.401324  3.664771 -0.040517  0.125766  0.846397   \n",
       "3    2.463299 -0.487240  1.014995 -0.272329  1.193543 -1.331642  0.974288   \n",
       "4   -0.413538 -0.487240  0.246813 -0.272329 -1.015684 -0.074912 -0.528437   \n",
       "..        ...       ...       ...       ...       ...       ...       ...   \n",
       "97  -0.417713  3.157316 -1.515487 -0.272329 -1.248688  0.139999 -1.167894   \n",
       "98   0.132400 -0.487240  1.014995 -0.272329  1.366138  0.342100  0.636797   \n",
       "99  -0.402742  0.584688 -0.875579 -0.272329 -0.877607 -0.773728 -0.084369   \n",
       "100 -0.403765 -0.487240 -0.079701 -0.272329 -0.566935  0.128613 -1.288681   \n",
       "101 -0.405218 -0.487240 -0.375604 -0.272329 -0.299411  0.269515  1.013366   \n",
       "\n",
       "            7         8         9        10        11        12       MEDV  \\\n",
       "0    0.674147 -0.637331  0.129128 -0.718509  0.203034 -0.744016  22.600000   \n",
       "1   -1.247058  1.659603  1.529413  0.805778  0.103795 -0.437339  50.000000   \n",
       "2   -0.205034 -0.522484 -0.784617 -0.949462  0.406003 -0.301505  23.000000   \n",
       "3   -0.993604  1.659603  1.529413  0.805778  0.440616  0.996622   8.300000   \n",
       "4    0.578929 -0.522484 -0.060741  0.112920  0.325604 -0.043840  21.200001   \n",
       "..        ...       ...       ...       ...       ...       ...        ...   \n",
       "97   2.560921 -0.867024 -0.565081 -0.533747  0.440616 -0.963871  24.700001   \n",
       "98  -0.645503  1.659603  1.529413  0.805778 -3.349082  0.766964  14.100000   \n",
       "99   1.629074 -0.177944 -0.737150  0.574826  0.421009  0.069589  18.700001   \n",
       "100  0.071405 -0.637331 -0.778684  0.066730  0.319141 -0.458344  28.100000   \n",
       "101 -0.646880 -0.522484 -0.143809  1.129112  0.422433 -0.053642  19.799999   \n",
       "\n",
       "     Predicted MEDV  \n",
       "0         25.338652  \n",
       "1         22.484280  \n",
       "2         27.051666  \n",
       "3         11.203319  \n",
       "4         18.238495  \n",
       "..              ...  \n",
       "97        25.831537  \n",
       "98        14.295522  \n",
       "99        15.542188  \n",
       "100       23.538509  \n",
       "101       21.733047  \n",
       "\n",
       "[102 rows x 15 columns]"
      ]
     },
     "execution_count": 529,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_MLP_vote"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 530,
   "id": "1a207355-b667-4f27-acb8-9ba1e25da0ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mse:27.28972625732422,mae:3.4026076793670654\n"
     ]
    }
   ],
   "source": [
    "Y_test=np.array(Y_test)\n",
    "Y_pred=np.array(Y_pred)\n",
    "# Get Mean Squared Error (MSE)\n",
    "from sklearn.metrics import mean_squared_error\n",
    "mse = mean_squared_error(Y_test, Y_pred)\n",
    "# Get Mean Absolute Error (MAE)\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "mae = mean_absolute_error(Y_test, Y_pred)\n",
    "print(f\"mse:{mse},mae:{mae}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "01175bbb-01b3-4910-a3a9-57c8a60a3714",
   "metadata": {},
   "source": [
    "## 封装神经网络的这个模型，便于之后的检验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 589,
   "id": "6bec0acd-4b0d-4469-b9a3-3d0cfdde6d76",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ensemble_model():\n",
    "    def __init__(self,ensemble,network,estimators=10,lr=1e-3,weight_decay=5e-4,epoch=50,use_cuda=False):\n",
    "        super(ensemble_model, self).__init__()\n",
    "        self.epoch=epoch\n",
    "        self.model=ensemble(\n",
    "                        estimator=network,\n",
    "                        n_estimators=10,cuda=use_cuda)\n",
    "        #设置优化器optimizer\n",
    "        self.model.set_optimizer(\"Adam\", lr=lr, weight_decay=weight_decay)\n",
    "    def fit(self,X_train,Y_train):\n",
    "        X_train=torch.FloatTensor(np.array(X_train))\n",
    "        Y_train=torch.FloatTensor(np.array(Y_train)).reshape(-1,1)\n",
    "        # Tensor -> Data loader\n",
    "        train_data = TensorDataset(X_train, Y_train)\n",
    "        train_loader = DataLoader(train_data, batch_size=10, shuffle=True)  \n",
    "        self.model.fit(train_loader, epochs=self.epoch)\n",
    "    def predict(self,X_test):\n",
    "        X_test=torch.FloatTensor(np.array(X_test))\n",
    "        # 预测\n",
    "        Y_pred = self.model.predict(X_test)\n",
    "        return np.array(Y_pred)\n",
    "    def score(self,X_test,Y_test):\n",
    "        X_test=torch.FloatTensor(np.array(X_test))\n",
    "        Y_test=torch.FloatTensor(np.array(Y_test)).reshape(-1, 1)\n",
    "        test_data = TensorDataset(X_test, Y_test)\n",
    "        test_loader = DataLoader(test_data, batch_size=10, shuffle=False)\n",
    "        testing_mse = self.model.evaluate(test_loader)\n",
    "        return testing_mse\n",
    "        \n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8b2024ee-717e-4d5a-bb87-2a37932cc33a",
   "metadata": {},
   "source": [
    "# 4. 模型检验 mae/mse/r2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 532,
   "id": "860477f5-2a8b-4107-b432-a4a427c67758",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.model_selection import KFold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 533,
   "id": "7d6482da-da6e-4fb4-943a-a23c8054aa8d",
   "metadata": {},
   "outputs": [],
   "source": [
    "mlp_model=ensemble_model(VotingRegressor,MLP,epoch=10) #只设置10，演示用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 534,
   "id": "8b168d53-0472-4f17-a154-3796cacc088b",
   "metadata": {},
   "outputs": [],
   "source": [
    "poly_model = polynomial_model(degree=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 535,
   "id": "741d1a75-596a-4648-956f-6d66c6480584",
   "metadata": {},
   "outputs": [],
   "source": [
    "lr = LinearRegression()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5d14a81d-555d-4673-bfdb-0cb8afb9cff6",
   "metadata": {},
   "source": [
    "cross_val_score可以选择的scoring方法：https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 536,
   "id": "3ec62e86-dc17-4b6a-bc64-d9096213b538",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Kfold  7\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 636.43872\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 554.03479\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 356.32129\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 371.38129\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 514.46912\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 646.95233\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 650.71271\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 542.82245\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 309.38565\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 757.55432\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 115.45845\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 186.87463\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 188.64508\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 171.39880\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 162.55998\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 335.92328\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 313.36716\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 165.87553\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 388.94763\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 132.59552\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 239.45111\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 109.79166\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 81.54823\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 59.94487\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 110.48894\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 59.04919\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 126.20172\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 111.98873\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 224.25235\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 187.12318\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 129.17751\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 80.87749\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 68.73672\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 44.21090\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 97.08410\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 140.50128\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 55.97330\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 115.31651\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 168.54305\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 15.58364\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 77.17490\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 94.05092\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 144.19818\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 134.17665\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 176.27615\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 229.22896\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 88.32547\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 59.44048\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 102.93005\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 56.84435\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 75.26352\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 63.38501\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 131.12930\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 115.74700\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 90.87747\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 85.27776\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 73.30087\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 15.24955\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 41.18623\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 246.47321\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 75.46983\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 107.23981\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 41.53188\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 86.46340\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 252.80536\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 46.14600\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 194.28159\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 97.34595\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 179.00531\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 55.25537\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 50.37340\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 141.26773\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 75.31036\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 138.07857\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 58.87871\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 140.90216\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 153.68405\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 83.55695\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 47.94676\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 21.82154\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 69.02205\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 134.23004\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 69.46023\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 51.52736\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 189.15993\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 28.78643\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 53.04337\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 66.42164\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 112.69277\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 32.32143\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 30.81242\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 57.27824\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 133.89796\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 99.61747\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 108.27487\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 60.01418\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 161.94472\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 174.85535\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 147.04488\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 79.15160\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 489.32483\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 475.50415\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 867.79456\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 996.14111\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 668.39960\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 591.99194\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 583.09686\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 421.19308\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 733.84155\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 635.50812\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 299.17694\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 282.02081\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 151.60852\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 172.92628\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 270.96066\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 186.96751\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 317.63486\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 280.49194\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 135.23459\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 145.48262\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 122.69405\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 60.33596\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 99.04739\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 204.28609\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 110.97984\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 91.96847\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 56.18970\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 72.52363\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 41.22575\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 114.01586\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 110.13969\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 153.57066\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 53.72777\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 30.96772\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 63.49573\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 102.13735\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 39.55727\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 46.56932\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 130.89531\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 120.83987\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 46.28624\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 144.38861\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 81.73099\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 99.50321\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 175.36975\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 239.37862\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 20.51536\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 55.04962\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 132.86731\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 104.81673\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 74.06900\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 134.52519\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 170.12703\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 51.04306\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 84.55242\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 149.21326\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 262.28198\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 110.50620\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 69.39069\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 118.44151\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 154.54570\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 119.66482\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 33.14312\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 99.46955\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 76.60827\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 39.63484\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 8.28644\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 61.55563\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 58.31657\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 79.61411\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 93.42599\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 94.62114\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 118.07568\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 135.85721\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 56.08384\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 109.96152\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 231.17804\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 32.99321\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 100.45874\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 120.36988\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 176.84639\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 208.45451\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 84.75038\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 108.10909\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 26.41240\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 96.37271\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 74.80601\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 85.04401\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 98.37039\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 41.32453\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 110.34986\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 244.47328\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 144.42320\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 91.75726\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 59.88339\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 125.34705\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 32.08944\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 61.22128\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 45.95309\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 88.90208\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 655.88861\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 433.85504\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 674.07373\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 687.60760\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 431.13431\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 690.45728\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 503.81964\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 426.18262\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 441.81445\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 592.08667\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 399.47781\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 188.64481\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 247.93402\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 279.34167\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 172.20282\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 274.49670\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 278.47794\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 147.42305\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 213.35562\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 121.28084\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 89.38980\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 40.89177\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 112.59748\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 182.77748\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 96.02211\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 92.92744\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 50.41069\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 98.55201\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 80.15479\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 87.75462\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 99.03756\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 67.32404\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 43.45002\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 162.54045\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 57.44861\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 71.31198\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 48.56250\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 27.75138\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 128.26144\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 54.15859\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 89.69535\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 122.82956\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 31.03759\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 97.59209\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 84.72039\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 56.49178\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 8.15996\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 179.12903\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 124.93533\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 41.03111\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 78.10916\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 168.91733\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 53.18862\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 85.65743\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 123.00359\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 15.79283\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 58.45644\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 165.01849\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 43.66084\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 158.13490\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 54.26421\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 43.32402\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 109.57129\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 128.49623\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 163.30128\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 75.05538\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 38.96085\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 130.01810\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 36.92115\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 159.89105\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 16.19196\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 45.54027\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 27.65985\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 40.03835\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 189.49678\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 150.33430\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 31.52143\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 39.39916\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 105.02559\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 29.01740\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 27.98235\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 77.02473\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 101.55152\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 25.69006\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 43.72138\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 106.85975\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 62.31780\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 99.40746\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 163.53224\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 72.90096\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 37.73698\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 131.24570\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 42.73529\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 116.63683\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 38.24210\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 195.13983\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 77.24512\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 88.95359\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 84.04683\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 30.30892\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 362.97641\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 413.07867\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 528.11841\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 352.56421\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 441.21863\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 432.01141\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 429.83578\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 337.60965\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 357.25345\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 468.14178\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 156.13310\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 71.27221\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 116.10075\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 134.84195\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 200.58362\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 143.66429\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 113.89854\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 111.33877\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 304.68005\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 111.50788\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 57.43023\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 35.28875\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 62.93157\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 61.63730\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 39.45560\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 178.36459\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 150.11903\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 61.07394\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 136.61075\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 222.56145\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 108.87293\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 106.22141\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 59.46272\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 52.82346\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 68.13721\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 41.70947\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 22.24233\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 42.90077\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 129.88725\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 55.52021\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 40.77652\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 112.26426\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 27.87027\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 48.81165\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 26.71304\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 47.90660\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 28.66456\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 32.21216\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 65.48608\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 102.62350\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 142.33888\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 24.89050\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 78.59072\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 114.45013\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 135.47060\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 35.26852\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 39.99913\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 91.45319\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 121.38617\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 20.81186\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 195.54575\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 133.70126\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 109.62691\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 37.62413\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 32.69658\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 142.44997\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 175.49939\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 51.00305\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 87.64482\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 124.06082\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 45.14317\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 54.77616\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 129.13220\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 69.86085\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 47.10130\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 150.15016\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 85.84776\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 13.46936\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 189.63393\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 64.08327\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 42.13285\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 57.19910\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 117.47462\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 45.66689\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 81.48116\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 25.73149\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 97.36018\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 113.31258\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 29.31687\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 150.78717\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 34.93911\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 57.92937\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 55.91811\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 11.57419\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 68.53705\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 128.30998\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 150.25372\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 62.41103\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 53.03269\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 87.92886\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 460.03561\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 461.57840\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 387.38760\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 489.29141\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 662.35529\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 769.14752\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 875.63843\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 489.96326\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 286.61972\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 790.79266\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 101.36494\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 281.89017\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 447.71710\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 378.16031\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 319.08438\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 169.97633\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 164.18910\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 257.02054\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 250.89781\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 133.44693\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 154.22632\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 89.52396\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 53.99009\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 128.92737\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 142.28540\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 125.68271\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 53.06829\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 132.19106\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 112.84965\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 20.24762\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 151.98598\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 57.67062\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 49.70166\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 84.32114\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 56.54406\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 114.18184\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 107.23786\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 18.14593\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 135.88103\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 29.16192\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 164.90031\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 87.07342\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 107.07805\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 113.35739\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 99.04356\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 46.88649\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 72.45716\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 151.65643\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 47.52844\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 55.85634\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 41.60020\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 183.74239\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 43.87394\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 100.56651\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 125.96526\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 95.77510\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 41.53514\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 113.01056\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 63.34706\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 46.81182\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 32.06839\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 46.52098\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 40.83058\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 61.50181\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 232.51318\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 99.67606\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 113.45193\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 173.50166\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 27.97440\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 58.63695\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 80.24673\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 74.04800\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 101.86437\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 112.65276\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 62.61613\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 151.46803\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 42.43872\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 187.82959\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 96.43221\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 59.49332\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 166.31039\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 107.09124\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 63.29414\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 253.00449\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 109.00948\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 42.15845\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 127.51241\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 21.54238\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 68.81788\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 92.18494\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 129.80386\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 130.70695\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 212.10330\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 84.55911\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 96.73795\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 45.18566\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 76.97635\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 61.51091\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 46.65325\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 133.44296\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 633.12823\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 591.97772\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 735.70081\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 466.14587\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 854.54956\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 730.28888\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 465.82980\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 584.29730\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 717.95349\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 642.11523\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 293.23248\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 547.27057\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 171.86115\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 212.86281\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 183.19649\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 179.55676\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 317.44681\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 189.29958\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 192.46840\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 247.79417\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 47.59914\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 170.23277\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 84.23380\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 62.98834\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 101.26014\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 132.00864\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 26.91745\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 70.59267\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 49.35103\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 76.78267\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 57.79201\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 39.09267\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 31.07136\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 171.95004\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 131.51057\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 50.93884\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 158.06532\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 31.53970\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 40.52443\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 221.58253\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 77.90822\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 51.08113\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 55.83382\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 109.68015\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 90.74895\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 70.07961\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 184.14594\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 70.38927\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 59.24060\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 18.06568\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 26.78623\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 107.54848\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 11.18219\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 57.45922\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 66.76395\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 53.57102\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 100.93122\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 42.53246\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 33.25098\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 68.77298\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 117.29234\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 127.24065\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 39.36067\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 19.67039\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 66.17732\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 31.70319\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 146.75362\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 47.90514\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 22.91331\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 63.62617\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 73.22619\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 63.44228\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 99.50688\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 188.98705\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 41.47540\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 46.43586\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 54.32506\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 130.40929\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 48.21869\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 96.86635\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 88.81997\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 90.15924\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 40.91479\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 52.04247\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 35.25963\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 112.97691\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 63.45551\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 34.26460\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 219.78307\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 67.60757\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 68.45484\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 61.60154\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 86.33959\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 141.87154\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 123.00283\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 130.98111\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 138.88396\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 102.73585\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 96.49963\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 33.84228\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 572.65082\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 706.19531\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 482.72757\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 348.14468\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 654.22968\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 747.78461\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 559.34607\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 550.92877\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 463.06171\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 626.26398\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 293.14938\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 145.40430\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 527.95056\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 168.24814\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 345.88550\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 201.84676\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 419.46945\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 167.36110\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 316.24564\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 152.88274\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 32.85944\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 98.34849\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 53.82914\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 87.85693\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 196.10294\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 58.17375\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 37.19869\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 112.88619\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 122.75806\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 58.70250\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 32.42068\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 109.97833\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 79.09044\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 62.26065\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 58.13915\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 71.76749\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 73.37105\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 34.20566\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 105.71352\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 124.46085\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 107.51099\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 59.35466\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 112.17169\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 97.95198\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 53.43457\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 119.39871\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 138.54279\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 40.53479\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 116.08458\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 50.74737\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 73.09402\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 40.80655\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 31.00625\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 216.38673\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 134.84593\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 153.38589\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 57.59130\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 59.83628\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 136.70256\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 117.84671\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 90.28272\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 75.19530\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 246.72914\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 183.89178\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 62.29859\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 196.52992\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 129.45332\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 89.38226\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 143.18958\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 94.89451\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 53.09309\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 53.97768\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 27.25607\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 88.22240\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 118.98615\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 135.17145\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 51.20808\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 132.11472\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 40.99625\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 150.12619\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 164.37053\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 113.31824\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 139.83369\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 68.78366\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 30.97492\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 33.94147\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 204.85945\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 158.37848\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 110.42283\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 251.59312\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 85.48763\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 145.96667\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 58.84143\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 44.27253\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 49.25546\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 106.32261\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 49.68790\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 60.59935\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 50.09193\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 46.39286\n",
      "神经网络集成学习 , MSE:  84.13543530973119\n",
      "线性回归, MSE:  37.28723086835304\n",
      "多项式回归, MSE:  5.96855946951234e+25\n"
     ]
    }
   ],
   "source": [
    "regressors = [mlp_model,lr, poly_model]\n",
    "kfolds = [7]\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "for kfold in kfolds :\n",
    "    print (\"Kfold \", kfold)\n",
    "    for regressor in regressors :\n",
    "        kf=KFold(kfold, random_state = 0)\n",
    "        cv_results=[]\n",
    "        for train_index, test_index in kf.split(X):\n",
    "            X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n",
    "            y_train, y_test = Y.iloc[train_index], Y.iloc[test_index]\n",
    "            regressor.fit(X_train,y_train)\n",
    "            y_pred=regressor.predict(X_test)\n",
    "            mse = mean_squared_error(y_test, y_pred)\n",
    "            cv_results.append(mse)\n",
    "        if regressor == lr :\n",
    "                print(\"线性回归, MSE: \", (np.array(cv_results)).mean())\n",
    "        elif regressor == poly_model:\n",
    "            print(\"多项式回归, MSE: \", (np.array(cv_results)).mean())\n",
    "        else:\n",
    "            print(\"神经网络集成学习 , MSE: \", np.array(cv_results).mean())\n",
    "            "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 567,
   "id": "863610b5-bd4d-4952-b0bb-51b04aca88c4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "神经网梯度提升集成学习 , MSE:18.09613218514352,mae:2.775951684689989,train_r2:0.9606113327419143,test_r2:0.7777664364732888\n",
      "线性回归, MSE:33.44897999767656,mae:3.8429092204444997,train_r2:0.7730135569264234,test_r2:0.5892223849182505\n",
      "多项式回归, MSE:31.277814971446688,mae:3.3277633001008353,train_r2:0.9490240966612832,test_r2:0.6158858584078923\n"
     ]
    }
   ],
   "source": [
    "mlp_model=ensemble_model(GradientBoostingRegressor,MLP,epoch=10) #只设置10，演示用\n",
    "poly_model = polynomial_model(degree=2)\n",
    "lr = LinearRegression()\n",
    "regressors = [mlp_model,lr, poly_model]\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "for regressor in regressors :\n",
    "    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)\n",
    "    regressor.fit(X_train,Y_train)\n",
    "    Y_pred=regressor.predict(X_test)\n",
    "    from sklearn.metrics import mean_squared_error\n",
    "    mse = mean_squared_error(Y_test, Y_pred)\n",
    "    # Get Mean Absolute Error (MAE)\n",
    "    from sklearn.metrics import mean_absolute_error\n",
    "    mae = mean_absolute_error(Y_test, Y_pred)\n",
    "    from  sklearn.metrics import r2_score\n",
    "    train_r2=r2_score( Y_train,regressor.predict(X_train))\n",
    "    test_r2=r2_score( Y_test,Y_pred)\n",
    "    if regressor == lr:\n",
    "        print(f\"线性回归, MSE:{mse},mae:{mae},train_r2:{train_r2},test_r2:{test_r2}\")\n",
    "    elif regressor == poly_model:\n",
    "        print(f\"多项式回归, MSE:{mse},mae:{mae},train_r2:{train_r2},test_r2:{test_r2}\")\n",
    "    else:\n",
    "        print(f\"神经网梯度提升集成学习 , MSE:{mse},mae:{mae},train_r2:{train_r2},test_r2:{test_r2}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d10f0aff-8102-4d29-944d-9fb56ba5ae55",
   "metadata": {},
   "source": [
    "通过运算决定系数 R2 来量化模型的表现。模型的决定系数是回归分析中十分常用的统计信息，经常被当作衡量模型预测能力好坏的标准。\n",
    "\n",
    "R2的数值范围从0至1，表示目标变量的预测值和实际值之间的相关程度平方的百分比。一个模型的R2 值为0还不如直接用平均值来预测效果好；而一个R2 值为1的模型则可以对目标变量进行完美的预测。从0至1之间的数值，则表示该模型中目标变量中有百分之多少能够用特征来解释。模型也可能出现负值的R2，这种情况下模型所做预测有时会比直接计算目标变量的平均值差很多。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0c29a8b7-ddfa-4df2-9d12-63077eda39c3",
   "metadata": {},
   "source": [
    "# 5. 可视化结果"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a5e72e36-95dc-4dfe-8938-c56110e32983",
   "metadata": {},
   "source": [
    "## 多项式 阶数拟合图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 577,
   "id": "47f99284-c691-4ad9-b398-c98686df1329",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import learning_curve\n",
    "import numpy as np\n",
    "\n",
    "def plot_learning_curve(plt, estimator, title, X, y, ylim=None, cv=None,\n",
    "                        n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n",
    "    \"\"\"\n",
    "    Generate a simple plot of the test and training learning curve.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    estimator : object type that implements the \"fit\" and \"predict\" methods\n",
    "        An object of that type which is cloned for each validation.\n",
    "\n",
    "    title : string\n",
    "        Title for the chart.\n",
    "\n",
    "    X : array-like, shape (n_samples, n_features)\n",
    "        Training vector, where n_samples is the number of samples and\n",
    "        n_features is the number of features.\n",
    "\n",
    "    y : array-like, shape (n_samples) or (n_samples, n_features), optional\n",
    "        Target relative to X for classification or regression;\n",
    "        None for unsupervised learning.\n",
    "\n",
    "    ylim : tuple, shape (ymin, ymax), optional\n",
    "        Defines minimum and maximum yvalues plotted.\n",
    "\n",
    "    cv : int, cross-validation generator or an iterable, optional\n",
    "        Determines the cross-validation splitting strategy.\n",
    "        Possible inputs for cv are:\n",
    "          - None, to use the default 3-fold cross-validation,\n",
    "          - integer, to specify the number of folds.\n",
    "          - An object to be used as a cross-validation generator.\n",
    "          - An iterable yielding train/test splits.\n",
    "\n",
    "        For integer/None inputs, if ``y`` is binary or multiclass,\n",
    "        :class:`StratifiedKFold` used. If the estimator is not a classifier\n",
    "        or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n",
    "\n",
    "        Refer :ref:`User Guide <cross_validation>` for the various\n",
    "        cross-validators that can be used here.\n",
    "\n",
    "    n_jobs : integer, optional\n",
    "        Number of jobs to run in parallel (default 1).\n",
    "    \"\"\"\n",
    "    plt.title(title)\n",
    "    if ylim is not None:\n",
    "        plt.ylim(*ylim)\n",
    "    plt.xlabel(\"Training examples\")\n",
    "    plt.ylabel(\"Score\")\n",
    "    train_sizes, train_scores, test_scores = learning_curve(\n",
    "        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n",
    "    train_scores_mean = np.mean(train_scores, axis=1)\n",
    "    train_scores_std = np.std(train_scores, axis=1)\n",
    "    test_scores_mean = np.mean(test_scores, axis=1)\n",
    "    test_scores_std = np.std(test_scores, axis=1)\n",
    "    plt.grid()\n",
    "\n",
    "    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n",
    "                     train_scores_mean + train_scores_std, alpha=0.1,\n",
    "                     color=\"r\")\n",
    "    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n",
    "                     test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n",
    "    plt.plot(train_sizes, train_scores_mean, 'o--', color=\"r\",\n",
    "             label=\"Training score\")\n",
    "    plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n",
    "             label=\"Cross-validation score\")\n",
    "\n",
    "    plt.legend(loc=\"best\")\n",
    "    return plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 578,
   "id": "cf254bec-bed2-4b8b-bdb7-b75060632241",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f24191e8187c468aa120defb95160bb9",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "from sklearn.model_selection import ShuffleSplit\n",
    "cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)\n",
    "plt.figure(figsize=(15,3))\n",
    "title = 'Learning Curves (degree={0})'\n",
    "degrees = [1,2,3]\n",
    "for i in range(len(degrees)):\n",
    "    plt.subplot(1,3,i+1)\n",
    "    plot_learning_curve(plt,polynomial_model(degrees[i]),title.format(degrees[i]),\n",
    "                        X,Y,ylim=(0.01,1.01),cv=cv)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "32356a14-a91d-4851-ab30-0293dabfb6ee",
   "metadata": {},
   "source": [
    "## 各种神经网络集成方法拟合图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 632,
   "id": "da542a1f-c8c1-4899-8c5b-9af9e78a926f",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 749.24146\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 696.75574\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 994.71356\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 733.96899\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 334.13699\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 637.71661\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 399.10434\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 1198.55042\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 385.29056\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 736.40723\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 218.81308\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 191.41740\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 282.68179\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 215.46555\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 163.02173\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 187.42685\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 504.41968\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 244.32930\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 631.88550\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 272.25699\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 11.67692\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 32.24753\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 104.35062\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 157.31253\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 15.24693\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 14.58890\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 10.46272\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 38.53964\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 30.13626\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 21.89109\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 43.91519\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 5.70909\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 6.49834\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 15.58304\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 5.61300\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 8.06375\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 25.56878\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 59.38055\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 30.33938\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 33.20668\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 5.40285\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 4.68547\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 14.26468\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 11.85713\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 10.11875\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 21.27861\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 7.92298\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 6.59219\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 7.17308\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 32.54437\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 6.98096\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 8.72468\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 12.54459\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 11.01079\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 10.44972\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 13.79897\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 23.69768\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 3.90892\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 52.55056\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 8.58532\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 18.86224\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 5.96779\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 1.73038\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 12.59964\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 8.18423\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 5.84235\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 3.69512\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 5.14997\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 14.46783\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 5.62276\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 15.92823\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 3.71103\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 12.64446\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 23.19324\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 4.21747\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 13.53011\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 5.47688\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 15.40279\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 5.11876\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 12.95925\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 6.76985\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 6.03963\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 11.22871\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 11.85721\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 4.19734\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 8.36229\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 2.89658\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 4.08160\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 7.20705\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 8.46026\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 11.89309\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 7.48001\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 3.96016\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 5.12641\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 1.21219\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 17.69534\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 7.46944\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 7.17614\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 6.26987\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 5.63158\n",
      "Estimator: 000 | Epoch: 000 | Batch: 000 | Loss: 795.75177\n",
      "Estimator: 001 | Epoch: 000 | Batch: 000 | Loss: 406.49789\n",
      "Estimator: 002 | Epoch: 000 | Batch: 000 | Loss: 761.63593\n",
      "Estimator: 003 | Epoch: 000 | Batch: 000 | Loss: 460.56274\n",
      "Estimator: 004 | Epoch: 000 | Batch: 000 | Loss: 751.87543\n",
      "Estimator: 005 | Epoch: 000 | Batch: 000 | Loss: 502.01480\n",
      "Estimator: 006 | Epoch: 000 | Batch: 000 | Loss: 666.49951\n",
      "Estimator: 007 | Epoch: 000 | Batch: 000 | Loss: 457.29630\n",
      "Estimator: 008 | Epoch: 000 | Batch: 000 | Loss: 745.25690\n",
      "Estimator: 009 | Epoch: 000 | Batch: 000 | Loss: 477.85489\n",
      "Estimator: 000 | Epoch: 001 | Batch: 000 | Loss: 294.73859\n",
      "Estimator: 001 | Epoch: 001 | Batch: 000 | Loss: 231.65039\n",
      "Estimator: 002 | Epoch: 001 | Batch: 000 | Loss: 268.32642\n",
      "Estimator: 003 | Epoch: 001 | Batch: 000 | Loss: 437.72208\n",
      "Estimator: 004 | Epoch: 001 | Batch: 000 | Loss: 157.27542\n",
      "Estimator: 005 | Epoch: 001 | Batch: 000 | Loss: 309.76459\n",
      "Estimator: 006 | Epoch: 001 | Batch: 000 | Loss: 125.33368\n",
      "Estimator: 007 | Epoch: 001 | Batch: 000 | Loss: 154.42728\n",
      "Estimator: 008 | Epoch: 001 | Batch: 000 | Loss: 251.69926\n",
      "Estimator: 009 | Epoch: 001 | Batch: 000 | Loss: 278.75009\n",
      "Estimator: 000 | Epoch: 002 | Batch: 000 | Loss: 23.47667\n",
      "Estimator: 001 | Epoch: 002 | Batch: 000 | Loss: 21.61066\n",
      "Estimator: 002 | Epoch: 002 | Batch: 000 | Loss: 67.09584\n",
      "Estimator: 003 | Epoch: 002 | Batch: 000 | Loss: 34.68600\n",
      "Estimator: 004 | Epoch: 002 | Batch: 000 | Loss: 32.36581\n",
      "Estimator: 005 | Epoch: 002 | Batch: 000 | Loss: 30.42886\n",
      "Estimator: 006 | Epoch: 002 | Batch: 000 | Loss: 112.91225\n",
      "Estimator: 007 | Epoch: 002 | Batch: 000 | Loss: 29.12949\n",
      "Estimator: 008 | Epoch: 002 | Batch: 000 | Loss: 19.33382\n",
      "Estimator: 009 | Epoch: 002 | Batch: 000 | Loss: 41.73723\n",
      "Estimator: 000 | Epoch: 003 | Batch: 000 | Loss: 18.47356\n",
      "Estimator: 001 | Epoch: 003 | Batch: 000 | Loss: 24.42707\n",
      "Estimator: 002 | Epoch: 003 | Batch: 000 | Loss: 14.44370\n",
      "Estimator: 003 | Epoch: 003 | Batch: 000 | Loss: 10.08873\n",
      "Estimator: 004 | Epoch: 003 | Batch: 000 | Loss: 12.44604\n",
      "Estimator: 005 | Epoch: 003 | Batch: 000 | Loss: 20.35165\n",
      "Estimator: 006 | Epoch: 003 | Batch: 000 | Loss: 14.24166\n",
      "Estimator: 007 | Epoch: 003 | Batch: 000 | Loss: 23.99514\n",
      "Estimator: 008 | Epoch: 003 | Batch: 000 | Loss: 25.84081\n",
      "Estimator: 009 | Epoch: 003 | Batch: 000 | Loss: 20.48953\n",
      "Estimator: 000 | Epoch: 004 | Batch: 000 | Loss: 25.60865\n",
      "Estimator: 001 | Epoch: 004 | Batch: 000 | Loss: 9.28163\n",
      "Estimator: 002 | Epoch: 004 | Batch: 000 | Loss: 10.36909\n",
      "Estimator: 003 | Epoch: 004 | Batch: 000 | Loss: 15.32417\n",
      "Estimator: 004 | Epoch: 004 | Batch: 000 | Loss: 17.92773\n",
      "Estimator: 005 | Epoch: 004 | Batch: 000 | Loss: 8.66640\n",
      "Estimator: 006 | Epoch: 004 | Batch: 000 | Loss: 6.84383\n",
      "Estimator: 007 | Epoch: 004 | Batch: 000 | Loss: 14.81144\n",
      "Estimator: 008 | Epoch: 004 | Batch: 000 | Loss: 21.41713\n",
      "Estimator: 009 | Epoch: 004 | Batch: 000 | Loss: 21.79226\n",
      "Estimator: 000 | Epoch: 005 | Batch: 000 | Loss: 2.84823\n",
      "Estimator: 001 | Epoch: 005 | Batch: 000 | Loss: 38.82938\n",
      "Estimator: 002 | Epoch: 005 | Batch: 000 | Loss: 10.39920\n",
      "Estimator: 003 | Epoch: 005 | Batch: 000 | Loss: 13.02404\n",
      "Estimator: 004 | Epoch: 005 | Batch: 000 | Loss: 27.93639\n",
      "Estimator: 005 | Epoch: 005 | Batch: 000 | Loss: 16.71595\n",
      "Estimator: 006 | Epoch: 005 | Batch: 000 | Loss: 79.12387\n",
      "Estimator: 007 | Epoch: 005 | Batch: 000 | Loss: 6.26910\n",
      "Estimator: 008 | Epoch: 005 | Batch: 000 | Loss: 38.06765\n",
      "Estimator: 009 | Epoch: 005 | Batch: 000 | Loss: 11.62410\n",
      "Estimator: 000 | Epoch: 006 | Batch: 000 | Loss: 8.18183\n",
      "Estimator: 001 | Epoch: 006 | Batch: 000 | Loss: 4.98494\n",
      "Estimator: 002 | Epoch: 006 | Batch: 000 | Loss: 3.92231\n",
      "Estimator: 003 | Epoch: 006 | Batch: 000 | Loss: 6.47588\n",
      "Estimator: 004 | Epoch: 006 | Batch: 000 | Loss: 10.66014\n",
      "Estimator: 005 | Epoch: 006 | Batch: 000 | Loss: 5.87665\n",
      "Estimator: 006 | Epoch: 006 | Batch: 000 | Loss: 15.25414\n",
      "Estimator: 007 | Epoch: 006 | Batch: 000 | Loss: 19.89437\n",
      "Estimator: 008 | Epoch: 006 | Batch: 000 | Loss: 6.86892\n",
      "Estimator: 009 | Epoch: 006 | Batch: 000 | Loss: 11.09460\n",
      "Estimator: 000 | Epoch: 007 | Batch: 000 | Loss: 3.64968\n",
      "Estimator: 001 | Epoch: 007 | Batch: 000 | Loss: 7.82581\n",
      "Estimator: 002 | Epoch: 007 | Batch: 000 | Loss: 4.69777\n",
      "Estimator: 003 | Epoch: 007 | Batch: 000 | Loss: 8.15261\n",
      "Estimator: 004 | Epoch: 007 | Batch: 000 | Loss: 21.86896\n",
      "Estimator: 005 | Epoch: 007 | Batch: 000 | Loss: 8.46948\n",
      "Estimator: 006 | Epoch: 007 | Batch: 000 | Loss: 12.06732\n",
      "Estimator: 007 | Epoch: 007 | Batch: 000 | Loss: 3.91807\n",
      "Estimator: 008 | Epoch: 007 | Batch: 000 | Loss: 3.46096\n",
      "Estimator: 009 | Epoch: 007 | Batch: 000 | Loss: 6.60669\n",
      "Estimator: 000 | Epoch: 008 | Batch: 000 | Loss: 9.35258\n",
      "Estimator: 001 | Epoch: 008 | Batch: 000 | Loss: 5.24265\n",
      "Estimator: 002 | Epoch: 008 | Batch: 000 | Loss: 13.52185\n",
      "Estimator: 003 | Epoch: 008 | Batch: 000 | Loss: 9.82367\n",
      "Estimator: 004 | Epoch: 008 | Batch: 000 | Loss: 5.87540\n",
      "Estimator: 005 | Epoch: 008 | Batch: 000 | Loss: 14.07308\n",
      "Estimator: 006 | Epoch: 008 | Batch: 000 | Loss: 4.75839\n",
      "Estimator: 007 | Epoch: 008 | Batch: 000 | Loss: 7.12221\n",
      "Estimator: 008 | Epoch: 008 | Batch: 000 | Loss: 10.83623\n",
      "Estimator: 009 | Epoch: 008 | Batch: 000 | Loss: 7.38165\n",
      "Estimator: 000 | Epoch: 009 | Batch: 000 | Loss: 3.09892\n",
      "Estimator: 001 | Epoch: 009 | Batch: 000 | Loss: 39.56350\n",
      "Estimator: 002 | Epoch: 009 | Batch: 000 | Loss: 5.40393\n",
      "Estimator: 003 | Epoch: 009 | Batch: 000 | Loss: 2.85382\n",
      "Estimator: 004 | Epoch: 009 | Batch: 000 | Loss: 6.37047\n",
      "Estimator: 005 | Epoch: 009 | Batch: 000 | Loss: 7.01445\n",
      "Estimator: 006 | Epoch: 009 | Batch: 000 | Loss: 49.27558\n",
      "Estimator: 007 | Epoch: 009 | Batch: 000 | Loss: 7.37517\n",
      "Estimator: 008 | Epoch: 009 | Batch: 000 | Loss: 42.57196\n",
      "Estimator: 009 | Epoch: 009 | Batch: 000 | Loss: 14.70802\n"
     ]
    }
   ],
   "source": [
    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)\n",
    "method_name=['GradientBoosting','Bagging','Fusion','Voting','SnapshotEnsemble']\n",
    "method=[ensemble_model(i,MLP,epoch=10) for i in [GradientBoostingRegressor,BaggingRegressor,FusionRegressor,VotingRegressor,SnapshotEnsembleRegressor]]\n",
    "predict={}\n",
    "true_={}\n",
    "for i in range(len(method)):\n",
    "    model=method[i]\n",
    "    model.fit(X_train,Y_train)\n",
    "    mse=model.score(X_test,Y_test)\n",
    "    Y_predict=model.predict(X_test)\n",
    "    predict[method_name[i]]=Y_predict[:,0]\n",
    "    true_[method_name[i]]=np.array(Y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 633,
   "id": "d092c983-c689-4e6e-a7c6-79fd767a3f86",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'GradientBoosting': array([23.142233 , 27.035881 , 25.940426 , 11.426883 , 18.971087 ,\n",
       "        19.044994 , 23.095572 , 20.864798 , 19.074602 , 16.378284 ,\n",
       "         8.44351  , 11.745795 , 14.757016 ,  8.180301 , 45.76589  ,\n",
       "        32.74307  , 24.438744 , 38.323494 , 31.00717  , 22.418756 ,\n",
       "        23.268154 , 20.745571 , 20.638798 , 26.456526 , 21.699106 ,\n",
       "        24.688036 , 16.539196 , 16.262987 , 41.698795 , 17.774061 ,\n",
       "        15.747303 , 17.281044 , 19.908312 , 19.62324  , 26.655733 ,\n",
       "        21.913189 ,  7.6598372, 34.46279  , 14.853894 , 13.954255 ,\n",
       "        23.794075 , 22.04856  , 19.462353 , 18.116491 , 22.749931 ,\n",
       "        23.837893 , 19.59913  , 16.08849  , 15.682152 , 22.583906 ,\n",
       "        13.872931 , 22.395012 , 21.474892 , 41.625053 , 12.580174 ,\n",
       "        19.173922 , 17.161795 , 18.30543  , 14.922227 , 21.302345 ,\n",
       "        20.16074  , 19.680872 , 33.314163 , 31.406013 , 19.283857 ,\n",
       "        28.803602 , 15.593161 , 21.821983 , 12.716979 , 22.95363  ,\n",
       "        19.872358 , 21.00489  , 25.676058 , 27.680279 , 27.57983  ,\n",
       "         7.563188 , 41.87129  , 21.947437 , 26.416887 , 18.598501 ,\n",
       "        25.135872 , 19.42042  , 23.059805 , 42.599495 , 45.56816  ,\n",
       "        22.851772 , 24.898668 , 14.35182  , 27.921152 , 15.232912 ,\n",
       "        15.321322 ,  8.827341 , 21.677317 , 29.140953 , 20.895851 ,\n",
       "        20.487225 , 11.232611 , 23.91061  , 13.32474  , 17.582813 ,\n",
       "        24.007944 , 20.373257 ], dtype=float32),\n",
       " 'Bagging': array([25.158932 , 22.70275  , 27.311237 , 11.178988 , 17.707207 ,\n",
       "        19.300694 , 21.07274  , 21.364975 , 17.346128 , 17.877926 ,\n",
       "         9.3004465, 12.219922 , 15.138456 , 10.5361805, 43.363758 ,\n",
       "        33.22053  , 21.75949  , 40.00661  , 31.296667 , 22.325573 ,\n",
       "        23.689508 , 21.025677 , 19.410912 , 26.072159 , 22.63215  ,\n",
       "        19.749775 , 16.968786 , 16.789385 , 43.486683 , 16.956333 ,\n",
       "        15.221985 , 17.794239 , 17.391079 , 18.886316 , 28.219746 ,\n",
       "        21.544294 ,  8.197737 , 25.712357 , 14.128267 , 12.553707 ,\n",
       "        25.480684 , 21.01855  , 21.403057 , 15.98551  , 24.301159 ,\n",
       "        25.069511 , 18.534466 , 20.914104 , 14.858019 , 23.496916 ,\n",
       "        18.269444 , 16.038412 , 19.576902 , 37.86866  , 13.026293 ,\n",
       "        17.753208 , 18.1979   , 17.637283 , 12.925754 , 21.38001  ,\n",
       "        21.248621 , 17.697649 , 31.236362 , 30.445297 , 16.123995 ,\n",
       "        29.231716 , 16.461836 , 18.054117 , 12.093325 , 21.780409 ,\n",
       "        18.90424  , 21.03613  , 28.320593 , 26.143732 , 25.17309  ,\n",
       "         9.107503 , 41.69787  , 20.858013 , 27.542547 , 15.750353 ,\n",
       "        24.795956 , 19.307863 , 18.347225 , 43.1747   , 46.701344 ,\n",
       "        23.904835 , 25.204334 , 14.551575 , 27.340466 , 15.983617 ,\n",
       "        16.204752 ,  9.969607 , 24.502127 , 30.01275  , 23.337675 ,\n",
       "        18.688465 , 12.533313 , 25.922718 , 14.45618  , 15.357226 ,\n",
       "        23.290903 , 21.631048 ], dtype=float32),\n",
       " 'Fusion': array([25.454563 , 23.441952 , 26.240002 , 11.205146 , 18.191732 ,\n",
       "        19.559732 , 21.759974 , 21.401958 , 17.862877 , 18.499025 ,\n",
       "         9.83271  , 12.963162 , 15.10889  , 10.703906 , 43.56698  ,\n",
       "        32.987072 , 22.486141 , 39.76583  , 31.179834 , 22.377598 ,\n",
       "        23.611351 , 21.313206 , 19.576214 , 25.614075 , 22.770737 ,\n",
       "        20.961206 , 17.390324 , 16.789665 , 42.929516 , 16.968706 ,\n",
       "        15.396322 , 17.786758 , 17.514984 , 18.663631 , 27.75896  ,\n",
       "        22.408825 ,  7.739221 , 27.426245 , 15.085385 , 12.9496975,\n",
       "        25.945047 , 21.302559 , 21.62268  , 16.567245 , 24.383553 ,\n",
       "        24.857334 , 19.048534 , 19.856339 , 14.630014 , 23.431011 ,\n",
       "        17.836695 , 16.348503 , 19.692667 , 37.763695 , 13.162231 ,\n",
       "        17.88734  , 18.264301 , 18.261753 , 14.521616 , 20.824602 ,\n",
       "        21.081127 , 17.738323 , 31.155085 , 30.638382 , 16.844784 ,\n",
       "        29.303852 , 16.375813 , 17.947186 , 12.690714 , 21.829308 ,\n",
       "        19.217945 , 21.505598 , 27.828098 , 25.966105 , 25.920385 ,\n",
       "         9.159673 , 41.252853 , 20.913507 , 27.217514 , 15.907339 ,\n",
       "        24.452988 , 18.875921 , 19.175343 , 42.703667 , 46.113228 ,\n",
       "        23.750917 , 25.112223 , 15.357435 , 27.399103 , 15.8621855,\n",
       "        16.422693 ,  9.896867 , 24.029438 , 29.565922 , 22.945282 ,\n",
       "        18.782574 , 12.980496 , 25.397625 , 14.636856 , 15.622145 ,\n",
       "        23.278017 , 21.5686   ], dtype=float32),\n",
       " 'Voting': array([25.633993 , 23.381891 , 27.080097 , 10.928111 , 18.102417 ,\n",
       "        19.245274 , 21.416576 , 21.146885 , 17.754314 , 17.622194 ,\n",
       "         9.447882 , 12.396652 , 14.809427 , 10.242834 , 44.029858 ,\n",
       "        33.59993  , 22.373709 , 40.540085 , 31.693491 , 22.149866 ,\n",
       "        23.673456 , 21.292515 , 19.441189 , 26.227978 , 22.634644 ,\n",
       "        19.9541   , 17.110521 , 16.851643 , 43.758385 , 16.924555 ,\n",
       "        15.045321 , 17.417727 , 17.566702 , 19.087906 , 28.128662 ,\n",
       "        21.840794 ,  7.741023 , 26.821796 , 14.411647 , 12.346877 ,\n",
       "        26.166805 , 21.214624 , 21.674263 , 15.87398  , 24.305248 ,\n",
       "        25.000492 , 19.013279 , 20.222042 , 15.137418 , 23.48881  ,\n",
       "        17.85131  , 16.071445 , 19.583752 , 38.164165 , 13.1332035,\n",
       "        17.788864 , 18.6682   , 17.970196 , 13.344576 , 21.503485 ,\n",
       "        21.452251 , 17.539669 , 31.67185  , 31.066833 , 16.200186 ,\n",
       "        29.969687 , 16.053936 , 17.90152  , 12.2081175, 21.759876 ,\n",
       "        19.15094  , 21.569597 , 28.64733  , 26.088028 , 26.126995 ,\n",
       "         8.755997 , 42.04226  , 20.889603 , 27.537176 , 15.705823 ,\n",
       "        24.90958  , 18.775318 , 18.731451 , 43.596752 , 47.059288 ,\n",
       "        23.757442 , 25.376007 , 14.972394 , 27.344675 , 15.613785 ,\n",
       "        16.228191 ,  9.6830635, 24.756039 , 30.290958 , 23.131771 ,\n",
       "        19.010138 , 12.0739   , 26.034958 , 14.372873 , 15.548561 ,\n",
       "        23.357214 , 21.57565  ], dtype=float32),\n",
       " 'SnapshotEnsemble': array([20.629566 , 16.907085 , 25.286686 , 11.802556 , 13.470537 ,\n",
       "        14.7794285, 12.836233 , 17.59872  , 12.81814  , 14.085233 ,\n",
       "        13.937787 , 12.02921  , 14.223234 , 12.241922 , 36.182922 ,\n",
       "        28.67922  , 12.840889 , 33.429012 , 24.1834   , 17.657267 ,\n",
       "        18.503757 , 15.108786 , 13.527794 , 21.80891  , 19.667332 ,\n",
       "        14.302908 , 12.613904 , 16.577694 , 32.888332 , 11.522276 ,\n",
       "        12.7612095, 14.065699 , 17.121407 , 17.60139  , 21.451313 ,\n",
       "        19.883287 , 11.219809 , 20.983826 , 12.080892 , 11.720954 ,\n",
       "        20.499294 , 18.3643   , 18.024014 , 12.231576 , 19.690903 ,\n",
       "        20.3373   , 15.31679  , 22.979902 , 15.026217 , 17.36701  ,\n",
       "        18.888268 , 11.58286  , 14.446231 , 29.304214 , 11.8358345,\n",
       "        15.138789 , 18.835787 , 17.291613 , 13.3261385, 20.017767 ,\n",
       "        24.347996 , 13.602702 , 24.57867  , 22.933344 , 11.105974 ,\n",
       "        23.487255 , 14.714653 , 13.403188 , 11.993733 , 17.539534 ,\n",
       "        13.180539 , 15.399237 , 26.374857 , 22.347452 , 14.871435 ,\n",
       "        11.657855 , 29.866602 , 16.380539 , 21.724016 , 10.528838 ,\n",
       "        21.234962 , 16.04342  , 14.725728 , 31.093359 , 33.68859  ,\n",
       "        19.609509 , 19.900887 , 11.694164 , 22.943348 , 13.551538 ,\n",
       "        17.584486 , 12.139372 , 26.418371 , 26.623669 , 17.68967  ,\n",
       "        18.579227 , 13.484528 , 27.370731 , 13.596812 , 15.228468 ,\n",
       "        18.236216 , 15.823385 ], dtype=float32)}"
      ]
     },
     "execution_count": 633,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 659,
   "id": "81ad2b87-3864-48cb-86d0-f7ad44e23f0b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def vis_regressor(ax,y_true,y_predict,title):\n",
    "    res_df = pd.concat([pd.DataFrame({'原始值': y_true}), pd.DataFrame({'预测值': y_predict})], axis=1)\n",
    "    sns.lineplot(x=res_df.index.tolist(), y=res_df['预测值'], linewidth=2, ax=ax)\n",
    "    sns.scatterplot(x=res_df.index.tolist(), y=res_df['原始值'], s=60, color='r', marker='v', ax=ax)\n",
    "    ax.set_ylabel('')\n",
    "    ax.set_title(title)\n",
    "    ax.legend(labels=['预测值', '真实值'],loc='upper left', fontsize=15, frameon=True, fancybox=True, framealpha=1, borderpad=0.3,\n",
    "           ncol=1, markerfirst=True, markerscale=1, numpoints=1, handlelength=3.5)\n",
    "    return ax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 660,
   "id": "710cd2de-a8f8-4404-a6d5-ed962096e22f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "24dcc693269f41a19e89dd4875101ff5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "sns.set(style=\"whitegrid\")\n",
    "%matplotlib widget\n",
    "\n",
    "\n",
    "plt.figure(figsize=(20,5))\n",
    "title = 'method={0}'\n",
    "sns.set(style=\"whitegrid\")\n",
    "for i in range(len(method_name)):\n",
    "    plt.rcParams[\"font.family\"] = 'SimHei'  # 将字体改为中文\n",
    "    plt.rcParams['axes.unicode_minus'] = False  # 设置了中文字体默认后，坐标的\"-\"号无法显示，设置这个参数就可以避免\n",
    "    layout = (1, 5)\n",
    "    ax = plt.subplot2grid(layout,(0,i))\n",
    "    vis_regressor(ax,true_[method_name[i]],predict[method_name[i]],title=title.format(method_name[i]))\n",
    "plt.tight_layout()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
