{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 293,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 294,
   "metadata": {},
   "outputs": [],
   "source": [
    "# sklearn部分功能库 请勿修改\n",
    "# 数据集划分\n",
    "from sklearn.model_selection import train_test_split\n",
    "# 数据归一化\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "# 模型评估\n",
    "from sklearn.metrics import r2_score\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from sklearn.metrics import mean_absolute_percentage_error"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 295,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_excel('data.xlsx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 296,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_label = data.columns[:3]\n",
    "y_label = data.columns[3:]\n",
    "X = data.iloc[:, :3].values\n",
    "Y = data.iloc[:, 3:].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 297,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 298,
   "metadata": {},
   "outputs": [],
   "source": [
    "# knn\n",
    "from sklearn.neighbors import KNeighborsRegressor\n",
    "# 随机森林\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "# ada boost\n",
    "from sklearn.ensemble import AdaBoostRegressor\n",
    "# 梯度提升\n",
    "from sklearn.ensemble import GradientBoostingRegressor\n",
    "# 决策树\n",
    "from sklearn.tree import DecisionTreeRegressor\n",
    "# 支持向量机\n",
    "from sklearn.svm import SVR\n",
    "# lasso\n",
    "from sklearn.linear_model import Lasso\n",
    "# 弹性网络\n",
    "from sklearn.linear_model import ElasticNet\n",
    "# xgboost\n",
    "from xgboost import XGBRegressor\n",
    "# lightgbm\n",
    "from lightgbm import LGBMRegressor\n",
    "# 额外树\n",
    "from sklearn.ensemble import ExtraTreesRegressor\n",
    "# 直方图梯度提升\n",
    "from sklearn.ensemble import HistGradientBoostingRegressor\n",
    "# catboost\n",
    "from catboost import CatBoostRegressor\n",
    "# 线性回归\n",
    "from sklearn.linear_model import LinearRegression\n",
    "# 岭回归\n",
    "from sklearn.linear_model import Ridge\n",
    "# 多项式回归\n",
    "from sklearn.preprocessing import PolynomialFeatures\n",
    "from sklearn.pipeline import make_pipeline\n",
    "# bagging回归\n",
    "from sklearn.ensemble import BaggingRegressor\n",
    "# 贝叶斯回归\n",
    "from sklearn.linear_model import BayesianRidge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 299,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型评估\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "# mape\n",
    "from sklearn.metrics import mean_absolute_percentage_error\n",
    "# mae\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "# rmse\n",
    "from sklearn.metrics import mean_squared_error"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 300,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import learning_curve\n",
    "def draw_learning_curve(model, model_name, X, Y, type):\n",
    "    train_sizes, train_scores, test_scores = learning_curve(estimator=model, X=X_train, y=Y_train, train_sizes=np.linspace(0.1, 1.0, 10), cv=10, n_jobs=-1, scoring=type)\n",
    "    train_mean = np.mean(train_scores, axis=1)\n",
    "    train_std = np.std(train_scores, axis=1)\n",
    "    test_mean = np.mean(test_scores, axis=1)\n",
    "    test_std = np.std(test_scores, axis=1)\n",
    "\n",
    "    plt.figure(figsize=(10, 6))\n",
    "    plt.plot(train_sizes, train_mean, label='Training score')\n",
    "    plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.2)\n",
    "    plt.plot(train_sizes, test_mean, label='Cross-validation score')\n",
    "    plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.2)\n",
    "    plt.title(f'{model_name} Learning Curve({type})')\n",
    "    plt.xlabel('Training Set Size')\n",
    "    plt.ylabel(f'{type} Score')\n",
    "    plt.legend(loc='best')\n",
    "    plt.grid(True)\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 301,
   "metadata": {},
   "outputs": [],
   "source": [
    "models = {\n",
    "    \"KNN Regressor\": KNeighborsRegressor(),\n",
    "    \"Random Forest Regressor\": RandomForestRegressor(random_state=42),\n",
    "    \"Decision Tree Regressor\": DecisionTreeRegressor(random_state=42),\n",
    "    \"Lasso\": Lasso(),\n",
    "    \"Elastic Net\": ElasticNet(),\n",
    "    \"XGBoost\": XGBRegressor(),\n",
    "    \"Extra Trees Regressor\": ExtraTreesRegressor(random_state=42),\n",
    "    \"Linear Regression\": LinearRegression(),\n",
    "    \"Ridge\": Ridge(),\n",
    "    \"Polynomial Regression\": make_pipeline(PolynomialFeatures(2), LinearRegression()),\n",
    "    \"Bagging Regressor\": BaggingRegressor()\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 302,
   "metadata": {},
   "outputs": [],
   "source": [
    "multi_output_models = {\n",
    "    \"Ada Boost Regressor\": AdaBoostRegressor(random_state=42),\n",
    "    \"Gradient Boosting Regressor\": GradientBoostingRegressor(random_state=42),\n",
    "    \"SVR\": SVR(),\n",
    "    \"LightGBM\": LGBMRegressor(),\n",
    "    \"Hist Gradient Boosting Regressor\": HistGradientBoostingRegressor(random_state=42),\n",
    "    \"CatBoost\": CatBoostRegressor(verbose=0),\n",
    "    \"Bayesian Ridge\": BayesianRidge()\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 303,
   "metadata": {},
   "outputs": [],
   "source": [
    "mse= {}\n",
    "r2 = {}\n",
    "# mape = {}\n",
    "mae = {}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 304,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training KNN Regressor\n",
      "Training Random Forest Regressor\n",
      "Training Decision Tree Regressor\n",
      "Training Lasso\n",
      "Training Elastic Net\n",
      "Training XGBoost\n",
      "Training Extra Trees Regressor\n",
      "Training Linear Regression\n",
      "Training Ridge\n",
      "Training Polynomial Regression\n",
      "Training Bagging Regressor\n"
     ]
    }
   ],
   "source": [
    "# 训练模型\n",
    "for name, model in models.items():\n",
    "    print(f\"Training {name}\")\n",
    "    model.fit(X_train, Y_train)\n",
    "    Y_pred = model.predict(X_test)\n",
    "    # mse\n",
    "    mse[name] = mean_squared_error(Y_test, Y_pred)\n",
    "    # r2\n",
    "    r2[name] = r2_score(Y_test, Y_pred)\n",
    "    # mape\n",
    "    # mape[name] = mean_absolute_percentage_error(Y_test, Y_pred)\n",
    "    # mae\n",
    "    mae[name] = mean_absolute_error(Y_test, Y_pred)\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'neg_mean_squared_error')\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'r2')\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'neg_mean_absolute_error')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Ada Boost Regressor\n",
      "Training Gradient Boosting Regressor\n",
      "Training SVR\n",
      "Training LightGBM\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000418 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.030190\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000412 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.000270\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000386 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.000228\n",
      "[LightGBM] [Info] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000468 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.064817\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000385 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.060448\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000428 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.064643\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000463 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.062572\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000483 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.060819\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000482 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.064323\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000397 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.071603\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000470 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.067889\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000392 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.077458\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000444 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.002035\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000397 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.157556\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000398 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.002200\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000536 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.006546\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.000399 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 761\n",
      "[LightGBM] [Info] Number of data points in the train set: 7936, number of used features: 3\n",
      "[LightGBM] [Info] Start training from score 0.121921\n",
      "Training Hist Gradient Boosting Regressor\n",
      "Training CatBoost\n",
      "Training Bayesian Ridge\n"
     ]
    }
   ],
   "source": [
    "from sklearn.multioutput import MultiOutputRegressor\n",
    "for name, model in multi_output_models.items():\n",
    "    print(f\"Training {name}\")\n",
    "    model = MultiOutputRegressor(model)\n",
    "    model.fit(X_train, Y_train)\n",
    "    Y_pred = model.predict(X_test)\n",
    "    # mse\n",
    "    mse[name] = mean_squared_error(Y_test, Y_pred)\n",
    "    # r2\n",
    "    r2[name] = r2_score(Y_test, Y_pred)\n",
    "    # mape\n",
    "    # mape[name] = mean_absolute_percentage_error(Y_test, Y_pred)\n",
    "    # mae\n",
    "    mae[name] = mean_absolute_error(Y_test, Y_pred)\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'neg_mean_squared_error')\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'r2')\n",
    "    # draw_learning_curve(model, name, X_train, Y_train, 'neg_mean_absolute_error')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 306,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>MSE</th>\n",
       "      <th>R2</th>\n",
       "      <th>MAE</th>\n",
       "      <th>MAPE</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>KNN Regressor</th>\n",
       "      <td>0.000006</td>\n",
       "      <td>0.999361</td>\n",
       "      <td>0.000321</td>\n",
       "      <td>9.223917e+09</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Random Forest Regressor</th>\n",
       "      <td>0.000003</td>\n",
       "      <td>0.998508</td>\n",
       "      <td>0.000191</td>\n",
       "      <td>2.089233e+09</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Decision Tree Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.997899</td>\n",
       "      <td>0.000245</td>\n",
       "      <td>6.275278e+07</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Lasso</th>\n",
       "      <td>0.020934</td>\n",
       "      <td>-0.000282</td>\n",
       "      <td>0.086060</td>\n",
       "      <td>1.973319e+14</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Elastic Net</th>\n",
       "      <td>0.020934</td>\n",
       "      <td>-0.000282</td>\n",
       "      <td>0.086060</td>\n",
       "      <td>1.973319e+14</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>XGBoost</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999155</td>\n",
       "      <td>0.000247</td>\n",
       "      <td>2.914644e+10</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Extra Trees Regressor</th>\n",
       "      <td>0.000001</td>\n",
       "      <td>0.999433</td>\n",
       "      <td>0.000148</td>\n",
       "      <td>8.157286e+09</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Linear Regression</th>\n",
       "      <td>0.001134</td>\n",
       "      <td>0.975886</td>\n",
       "      <td>0.007872</td>\n",
       "      <td>8.703085e+12</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ridge</th>\n",
       "      <td>0.001157</td>\n",
       "      <td>0.975272</td>\n",
       "      <td>0.008085</td>\n",
       "      <td>9.015223e+12</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Polynomial Regression</th>\n",
       "      <td>0.000095</td>\n",
       "      <td>0.996902</td>\n",
       "      <td>0.002407</td>\n",
       "      <td>2.371486e+12</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bagging Regressor</th>\n",
       "      <td>0.000003</td>\n",
       "      <td>0.998074</td>\n",
       "      <td>0.000205</td>\n",
       "      <td>6.741053e+07</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ada Boost Regressor</th>\n",
       "      <td>0.000022</td>\n",
       "      <td>0.998131</td>\n",
       "      <td>0.001314</td>\n",
       "      <td>3.893268e+11</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Gradient Boosting Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.999378</td>\n",
       "      <td>0.000411</td>\n",
       "      <td>1.301613e+11</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>SVR</th>\n",
       "      <td>0.005774</td>\n",
       "      <td>-0.146419</td>\n",
       "      <td>0.063107</td>\n",
       "      <td>2.538780e+14</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>LightGBM</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999354</td>\n",
       "      <td>0.000237</td>\n",
       "      <td>2.344284e+10</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Hist Gradient Boosting Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.999005</td>\n",
       "      <td>0.000344</td>\n",
       "      <td>3.544413e+10</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>CatBoost</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999262</td>\n",
       "      <td>0.000291</td>\n",
       "      <td>1.122619e+11</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bayesian Ridge</th>\n",
       "      <td>0.001134</td>\n",
       "      <td>0.975886</td>\n",
       "      <td>0.007872</td>\n",
       "      <td>8.703365e+12</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                       MSE        R2       MAE          MAPE\n",
       "KNN Regressor                     0.000006  0.999361  0.000321  9.223917e+09\n",
       "Random Forest Regressor           0.000003  0.998508  0.000191  2.089233e+09\n",
       "Decision Tree Regressor           0.000004  0.997899  0.000245  6.275278e+07\n",
       "Lasso                             0.020934 -0.000282  0.086060  1.973319e+14\n",
       "Elastic Net                       0.020934 -0.000282  0.086060  1.973319e+14\n",
       "XGBoost                           0.000002  0.999155  0.000247  2.914644e+10\n",
       "Extra Trees Regressor             0.000001  0.999433  0.000148  8.157286e+09\n",
       "Linear Regression                 0.001134  0.975886  0.007872  8.703085e+12\n",
       "Ridge                             0.001157  0.975272  0.008085  9.015223e+12\n",
       "Polynomial Regression             0.000095  0.996902  0.002407  2.371486e+12\n",
       "Bagging Regressor                 0.000003  0.998074  0.000205  6.741053e+07\n",
       "Ada Boost Regressor               0.000022  0.998131  0.001314  3.893268e+11\n",
       "Gradient Boosting Regressor       0.000004  0.999378  0.000411  1.301613e+11\n",
       "SVR                               0.005774 -0.146419  0.063107  2.538780e+14\n",
       "LightGBM                          0.000002  0.999354  0.000237  2.344284e+10\n",
       "Hist Gradient Boosting Regressor  0.000004  0.999005  0.000344  3.544413e+10\n",
       "CatBoost                          0.000002  0.999262  0.000291  1.122619e+11\n",
       "Bayesian Ridge                    0.001134  0.975886  0.007872  8.703365e+12"
      ]
     },
     "execution_count": 306,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 将多个评价指标合并为一个dataframe\n",
    "evaluation = pd.DataFrame([mse, r2, mae], index=['MSE', 'R2', 'MAE']).T\n",
    "evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 307,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy.stats import entropy\n",
    "\n",
    "def normalize_data(df, maximize_columns):\n",
    "    normalized_df = df.copy()\n",
    "    \n",
    "    for column in df.columns:\n",
    "        if column in maximize_columns:\n",
    "            # 对于需要最大化的指标，使用归一化处理\n",
    "            normalized_df[column] = (df[column] - df[column].min()) / (df[column].max() - df[column].min())\n",
    "        else:\n",
    "            # 对于需要最小化的指标，使用逆归一化处理\n",
    "            normalized_df[column] = (df[column].max() - df[column]) / (df[column].max() - df[column].min())\n",
    "    \n",
    "    return normalized_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 308,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>MSE</th>\n",
       "      <th>R2</th>\n",
       "      <th>MAE</th>\n",
       "      <th>MAPE</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>KNN Regressor</th>\n",
       "      <td>0.999791</td>\n",
       "      <td>0.999937</td>\n",
       "      <td>0.997982</td>\n",
       "      <td>0.999964</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Random Forest Regressor</th>\n",
       "      <td>0.999920</td>\n",
       "      <td>0.999193</td>\n",
       "      <td>0.999490</td>\n",
       "      <td>0.999992</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Decision Tree Regressor</th>\n",
       "      <td>0.999853</td>\n",
       "      <td>0.998661</td>\n",
       "      <td>0.998866</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Lasso</th>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.127536</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.222729</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Elastic Net</th>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.127536</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.222729</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>XGBoost</th>\n",
       "      <td>0.999945</td>\n",
       "      <td>0.999758</td>\n",
       "      <td>0.998848</td>\n",
       "      <td>0.999885</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Extra Trees Regressor</th>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.999968</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Linear Regression</th>\n",
       "      <td>0.945864</td>\n",
       "      <td>0.979451</td>\n",
       "      <td>0.910088</td>\n",
       "      <td>0.965720</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ridge</th>\n",
       "      <td>0.944762</td>\n",
       "      <td>0.978915</td>\n",
       "      <td>0.907605</td>\n",
       "      <td>0.964490</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Polynomial Regression</th>\n",
       "      <td>0.995527</td>\n",
       "      <td>0.997791</td>\n",
       "      <td>0.973704</td>\n",
       "      <td>0.990659</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bagging Regressor</th>\n",
       "      <td>0.999917</td>\n",
       "      <td>0.998814</td>\n",
       "      <td>0.999325</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ada Boost Regressor</th>\n",
       "      <td>0.999014</td>\n",
       "      <td>0.998864</td>\n",
       "      <td>0.986420</td>\n",
       "      <td>0.998467</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Gradient Boosting Regressor</th>\n",
       "      <td>0.999848</td>\n",
       "      <td>0.999952</td>\n",
       "      <td>0.996934</td>\n",
       "      <td>0.999488</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>SVR</th>\n",
       "      <td>0.724242</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.267164</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>LightGBM</th>\n",
       "      <td>0.999952</td>\n",
       "      <td>0.999931</td>\n",
       "      <td>0.998960</td>\n",
       "      <td>0.999908</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Hist Gradient Boosting Regressor</th>\n",
       "      <td>0.999870</td>\n",
       "      <td>0.999627</td>\n",
       "      <td>0.997710</td>\n",
       "      <td>0.999861</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>CatBoost</th>\n",
       "      <td>0.999956</td>\n",
       "      <td>0.999851</td>\n",
       "      <td>0.998330</td>\n",
       "      <td>0.999558</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bayesian Ridge</th>\n",
       "      <td>0.945862</td>\n",
       "      <td>0.979450</td>\n",
       "      <td>0.910091</td>\n",
       "      <td>0.965719</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                       MSE        R2       MAE      MAPE\n",
       "KNN Regressor                     0.999791  0.999937  0.997982  0.999964\n",
       "Random Forest Regressor           0.999920  0.999193  0.999490  0.999992\n",
       "Decision Tree Regressor           0.999853  0.998661  0.998866  1.000000\n",
       "Lasso                             0.000000  0.127536  0.000000  0.222729\n",
       "Elastic Net                       0.000000  0.127536  0.000000  0.222729\n",
       "XGBoost                           0.999945  0.999758  0.998848  0.999885\n",
       "Extra Trees Regressor             1.000000  1.000000  1.000000  0.999968\n",
       "Linear Regression                 0.945864  0.979451  0.910088  0.965720\n",
       "Ridge                             0.944762  0.978915  0.907605  0.964490\n",
       "Polynomial Regression             0.995527  0.997791  0.973704  0.990659\n",
       "Bagging Regressor                 0.999917  0.998814  0.999325  1.000000\n",
       "Ada Boost Regressor               0.999014  0.998864  0.986420  0.998467\n",
       "Gradient Boosting Regressor       0.999848  0.999952  0.996934  0.999488\n",
       "SVR                               0.724242  0.000000  0.267164  0.000000\n",
       "LightGBM                          0.999952  0.999931  0.998960  0.999908\n",
       "Hist Gradient Boosting Regressor  0.999870  0.999627  0.997710  0.999861\n",
       "CatBoost                          0.999956  0.999851  0.998330  0.999558\n",
       "Bayesian Ridge                    0.945862  0.979450  0.910091  0.965719"
      ]
     },
     "execution_count": 308,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_data = evaluation.copy()\n",
    "maximize_columns = ['R2']\n",
    "normalized_data = normalize_data(model_data, maximize_columns)\n",
    "normalized_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 309,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calculate_entropy_weights(df):\n",
    "    # 计算每个特征的熵值\n",
    "    k = 1.0 / np.log(len(df))\n",
    "    entropy_values = []\n",
    "    for column in df.columns:\n",
    "        # 计算每个特征的概率分布\n",
    "        p = df[column] / df[column].sum()\n",
    "        # 计算熵值\n",
    "        ent = entropy(p, base=2)\n",
    "        entropy_values.append(ent)\n",
    "    \n",
    "    # 计算权重\n",
    "    weights = [(1 - ent) * k for ent in entropy_values]\n",
    "    weight_sum = sum(weights)\n",
    "    weights = [w / weight_sum for w in weights]\n",
    "    \n",
    "    return weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 310,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.2506331968648897,\n",
       " 0.24936348169701825,\n",
       " 0.24805826058927613,\n",
       " 0.25194506084881596]"
      ]
     },
     "execution_count": 310,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "weights = calculate_entropy_weights(normalized_data)\n",
    "weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 311,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calculate_topsis(df, weights):\n",
    "    # 计算理想解和负理想解\n",
    "    ideal_solution = df.max()\n",
    "    negative_ideal_solution = df.min()\n",
    "    \n",
    "    # 计算欧氏距离\n",
    "    positive_dist = np.sqrt(((df - ideal_solution) ** 2).sum(axis=1))\n",
    "    negative_dist = np.sqrt(((df - negative_ideal_solution) ** 2).sum(axis=1))\n",
    "    \n",
    "    # 计算相对接近度\n",
    "    topsis_scores = negative_dist / (positive_dist + negative_dist)\n",
    "    \n",
    "    return topsis_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 312,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Extra Trees Regressor               0.999984\n",
       "Random Forest Regressor             0.999521\n",
       "LightGBM                            0.999476\n",
       "XGBoost                             0.999408\n",
       "Bagging Regressor                   0.999317\n",
       "CatBoost                            0.999133\n",
       "Decision Tree Regressor             0.999120\n",
       "KNN Regressor                       0.998985\n",
       "Hist Gradient Boosting Regressor    0.998836\n",
       "Gradient Boosting Regressor         0.998445\n",
       "Ada Boost Regressor                 0.993143\n",
       "Polynomial Regression               0.985877\n",
       "Bayesian Ridge                      0.944227\n",
       "Linear Regression                   0.944227\n",
       "Ridge                               0.942750\n",
       "SVR                                 0.323200\n",
       "Elastic Net                         0.122736\n",
       "Lasso                               0.122736\n",
       "dtype: float64"
      ]
     },
     "execution_count": 312,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "topsis_scores = calculate_topsis(normalized_data, weights)\n",
    "# 对topsis_scores从大到小排序\n",
    "topsis_scores = topsis_scores.sort_values(ascending=False)\n",
    "topsis_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 313,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['Extra Trees Regressor', 'Random Forest Regressor', 'LightGBM', 'XGBoost']"
      ]
     },
     "execution_count": 313,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 构建集成学习模型\n",
    "from sklearn.ensemble import StackingRegressor\n",
    "\n",
    "# 选取得分最高的前四个模型\n",
    "top_models = list(topsis_scores.head(4).index)\n",
    "top_models\n",
    "# 树模型：极端树\n",
    "# boosting算法：lightgbm\n",
    "# 临近算法：knn\n",
    "# 线性模型：贝叶斯回归"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 338,
   "metadata": {},
   "outputs": [],
   "source": [
    "# estimators = [\n",
    "#     ('Extra Trees Regressor', ExtraTreesRegressor(random_state=42)),\n",
    "#     ('LightGBM', LGBMRegressor()),\n",
    "#     ('KNN Regressor', KNeighborsRegressor()),\n",
    "#     ('Bayesian Ridge', BayesianRidge()),\n",
    "# ]\n",
    "\n",
    "estimators = [\n",
    "    ('Extra Trees Regressor', ExtraTreesRegressor(random_state=42)),\n",
    "    ('BayesianRidge', BayesianRidge()),\n",
    "]\n",
    "\n",
    "stacking_regressor = StackingRegressor(estimators=estimators, final_estimator=Ridge())\n",
    "\n",
    "\n",
    "multi_output_model = MultiOutputRegressor(stacking_regressor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 339,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<style>#sk-container-id-21 {\n",
       "  /* Definition of color scheme common for light and dark mode */\n",
       "  --sklearn-color-text: black;\n",
       "  --sklearn-color-line: gray;\n",
       "  /* Definition of color scheme for unfitted estimators */\n",
       "  --sklearn-color-unfitted-level-0: #fff5e6;\n",
       "  --sklearn-color-unfitted-level-1: #f6e4d2;\n",
       "  --sklearn-color-unfitted-level-2: #ffe0b3;\n",
       "  --sklearn-color-unfitted-level-3: chocolate;\n",
       "  /* Definition of color scheme for fitted estimators */\n",
       "  --sklearn-color-fitted-level-0: #f0f8ff;\n",
       "  --sklearn-color-fitted-level-1: #d4ebff;\n",
       "  --sklearn-color-fitted-level-2: #b3dbfd;\n",
       "  --sklearn-color-fitted-level-3: cornflowerblue;\n",
       "\n",
       "  /* Specific color for light theme */\n",
       "  --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black)));\n",
       "  --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, white)));\n",
       "  --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, black)));\n",
       "  --sklearn-color-icon: #696969;\n",
       "\n",
       "  @media (prefers-color-scheme: dark) {\n",
       "    /* Redefinition of color scheme for dark theme */\n",
       "    --sklearn-color-text-on-default-background: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white)));\n",
       "    --sklearn-color-background: var(--sg-background-color, var(--theme-background, var(--jp-layout-color0, #111)));\n",
       "    --sklearn-color-border-box: var(--sg-text-color, var(--theme-code-foreground, var(--jp-content-font-color1, white)));\n",
       "    --sklearn-color-icon: #878787;\n",
       "  }\n",
       "}\n",
       "\n",
       "#sk-container-id-21 {\n",
       "  color: var(--sklearn-color-text);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 pre {\n",
       "  padding: 0;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 input.sk-hidden--visually {\n",
       "  border: 0;\n",
       "  clip: rect(1px 1px 1px 1px);\n",
       "  clip: rect(1px, 1px, 1px, 1px);\n",
       "  height: 1px;\n",
       "  margin: -1px;\n",
       "  overflow: hidden;\n",
       "  padding: 0;\n",
       "  position: absolute;\n",
       "  width: 1px;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-dashed-wrapped {\n",
       "  border: 1px dashed var(--sklearn-color-line);\n",
       "  margin: 0 0.4em 0.5em 0.4em;\n",
       "  box-sizing: border-box;\n",
       "  padding-bottom: 0.4em;\n",
       "  background-color: var(--sklearn-color-background);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-container {\n",
       "  /* jupyter's `normalize.less` sets `[hidden] { display: none; }`\n",
       "     but bootstrap.min.css set `[hidden] { display: none !important; }`\n",
       "     so we also need the `!important` here to be able to override the\n",
       "     default hidden behavior on the sphinx rendered scikit-learn.org.\n",
       "     See: https://github.com/scikit-learn/scikit-learn/issues/21755 */\n",
       "  display: inline-block !important;\n",
       "  position: relative;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-text-repr-fallback {\n",
       "  display: none;\n",
       "}\n",
       "\n",
       "div.sk-parallel-item,\n",
       "div.sk-serial,\n",
       "div.sk-item {\n",
       "  /* draw centered vertical line to link estimators */\n",
       "  background-image: linear-gradient(var(--sklearn-color-text-on-default-background), var(--sklearn-color-text-on-default-background));\n",
       "  background-size: 2px 100%;\n",
       "  background-repeat: no-repeat;\n",
       "  background-position: center center;\n",
       "}\n",
       "\n",
       "/* Parallel-specific style estimator block */\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel-item::after {\n",
       "  content: \"\";\n",
       "  width: 100%;\n",
       "  border-bottom: 2px solid var(--sklearn-color-text-on-default-background);\n",
       "  flex-grow: 1;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel {\n",
       "  display: flex;\n",
       "  align-items: stretch;\n",
       "  justify-content: center;\n",
       "  background-color: var(--sklearn-color-background);\n",
       "  position: relative;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel-item {\n",
       "  display: flex;\n",
       "  flex-direction: column;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel-item:first-child::after {\n",
       "  align-self: flex-end;\n",
       "  width: 50%;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel-item:last-child::after {\n",
       "  align-self: flex-start;\n",
       "  width: 50%;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-parallel-item:only-child::after {\n",
       "  width: 0;\n",
       "}\n",
       "\n",
       "/* Serial-specific style estimator block */\n",
       "\n",
       "#sk-container-id-21 div.sk-serial {\n",
       "  display: flex;\n",
       "  flex-direction: column;\n",
       "  align-items: center;\n",
       "  background-color: var(--sklearn-color-background);\n",
       "  padding-right: 1em;\n",
       "  padding-left: 1em;\n",
       "}\n",
       "\n",
       "\n",
       "/* Toggleable style: style used for estimator/Pipeline/ColumnTransformer box that is\n",
       "clickable and can be expanded/collapsed.\n",
       "- Pipeline and ColumnTransformer use this feature and define the default style\n",
       "- Estimators will overwrite some part of the style using the `sk-estimator` class\n",
       "*/\n",
       "\n",
       "/* Pipeline and ColumnTransformer style (default) */\n",
       "\n",
       "#sk-container-id-21 div.sk-toggleable {\n",
       "  /* Default theme specific background. It is overwritten whether we have a\n",
       "  specific estimator or a Pipeline/ColumnTransformer */\n",
       "  background-color: var(--sklearn-color-background);\n",
       "}\n",
       "\n",
       "/* Toggleable label */\n",
       "#sk-container-id-21 label.sk-toggleable__label {\n",
       "  cursor: pointer;\n",
       "  display: block;\n",
       "  width: 100%;\n",
       "  margin-bottom: 0;\n",
       "  padding: 0.5em;\n",
       "  box-sizing: border-box;\n",
       "  text-align: center;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 label.sk-toggleable__label-arrow:before {\n",
       "  /* Arrow on the left of the label */\n",
       "  content: \"▸\";\n",
       "  float: left;\n",
       "  margin-right: 0.25em;\n",
       "  color: var(--sklearn-color-icon);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 label.sk-toggleable__label-arrow:hover:before {\n",
       "  color: var(--sklearn-color-text);\n",
       "}\n",
       "\n",
       "/* Toggleable content - dropdown */\n",
       "\n",
       "#sk-container-id-21 div.sk-toggleable__content {\n",
       "  max-height: 0;\n",
       "  max-width: 0;\n",
       "  overflow: hidden;\n",
       "  text-align: left;\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-0);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-toggleable__content.fitted {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-0);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-toggleable__content pre {\n",
       "  margin: 0.2em;\n",
       "  border-radius: 0.25em;\n",
       "  color: var(--sklearn-color-text);\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-0);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-toggleable__content.fitted pre {\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-0);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 input.sk-toggleable__control:checked~div.sk-toggleable__content {\n",
       "  /* Expand drop-down */\n",
       "  max-height: 200px;\n",
       "  max-width: 100%;\n",
       "  overflow: auto;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {\n",
       "  content: \"▾\";\n",
       "}\n",
       "\n",
       "/* Pipeline/ColumnTransformer-specific style */\n",
       "\n",
       "#sk-container-id-21 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {\n",
       "  color: var(--sklearn-color-text);\n",
       "  background-color: var(--sklearn-color-unfitted-level-2);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-label.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {\n",
       "  background-color: var(--sklearn-color-fitted-level-2);\n",
       "}\n",
       "\n",
       "/* Estimator-specific style */\n",
       "\n",
       "/* Colorize estimator box */\n",
       "#sk-container-id-21 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-2);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-estimator.fitted input.sk-toggleable__control:checked~label.sk-toggleable__label {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-2);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-label label.sk-toggleable__label,\n",
       "#sk-container-id-21 div.sk-label label {\n",
       "  /* The background is the default theme color */\n",
       "  color: var(--sklearn-color-text-on-default-background);\n",
       "}\n",
       "\n",
       "/* On hover, darken the color of the background */\n",
       "#sk-container-id-21 div.sk-label:hover label.sk-toggleable__label {\n",
       "  color: var(--sklearn-color-text);\n",
       "  background-color: var(--sklearn-color-unfitted-level-2);\n",
       "}\n",
       "\n",
       "/* Label box, darken color on hover, fitted */\n",
       "#sk-container-id-21 div.sk-label.fitted:hover label.sk-toggleable__label.fitted {\n",
       "  color: var(--sklearn-color-text);\n",
       "  background-color: var(--sklearn-color-fitted-level-2);\n",
       "}\n",
       "\n",
       "/* Estimator label */\n",
       "\n",
       "#sk-container-id-21 div.sk-label label {\n",
       "  font-family: monospace;\n",
       "  font-weight: bold;\n",
       "  display: inline-block;\n",
       "  line-height: 1.2em;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-label-container {\n",
       "  text-align: center;\n",
       "}\n",
       "\n",
       "/* Estimator-specific */\n",
       "#sk-container-id-21 div.sk-estimator {\n",
       "  font-family: monospace;\n",
       "  border: 1px dotted var(--sklearn-color-border-box);\n",
       "  border-radius: 0.25em;\n",
       "  box-sizing: border-box;\n",
       "  margin-bottom: 0.5em;\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-0);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-estimator.fitted {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-0);\n",
       "}\n",
       "\n",
       "/* on hover */\n",
       "#sk-container-id-21 div.sk-estimator:hover {\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-2);\n",
       "}\n",
       "\n",
       "#sk-container-id-21 div.sk-estimator.fitted:hover {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-2);\n",
       "}\n",
       "\n",
       "/* Specification for estimator info (e.g. \"i\" and \"?\") */\n",
       "\n",
       "/* Common style for \"i\" and \"?\" */\n",
       "\n",
       ".sk-estimator-doc-link,\n",
       "a:link.sk-estimator-doc-link,\n",
       "a:visited.sk-estimator-doc-link {\n",
       "  float: right;\n",
       "  font-size: smaller;\n",
       "  line-height: 1em;\n",
       "  font-family: monospace;\n",
       "  background-color: var(--sklearn-color-background);\n",
       "  border-radius: 1em;\n",
       "  height: 1em;\n",
       "  width: 1em;\n",
       "  text-decoration: none !important;\n",
       "  margin-left: 1ex;\n",
       "  /* unfitted */\n",
       "  border: var(--sklearn-color-unfitted-level-1) 1pt solid;\n",
       "  color: var(--sklearn-color-unfitted-level-1);\n",
       "}\n",
       "\n",
       ".sk-estimator-doc-link.fitted,\n",
       "a:link.sk-estimator-doc-link.fitted,\n",
       "a:visited.sk-estimator-doc-link.fitted {\n",
       "  /* fitted */\n",
       "  border: var(--sklearn-color-fitted-level-1) 1pt solid;\n",
       "  color: var(--sklearn-color-fitted-level-1);\n",
       "}\n",
       "\n",
       "/* On hover */\n",
       "div.sk-estimator:hover .sk-estimator-doc-link:hover,\n",
       ".sk-estimator-doc-link:hover,\n",
       "div.sk-label-container:hover .sk-estimator-doc-link:hover,\n",
       ".sk-estimator-doc-link:hover {\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-3);\n",
       "  color: var(--sklearn-color-background);\n",
       "  text-decoration: none;\n",
       "}\n",
       "\n",
       "div.sk-estimator.fitted:hover .sk-estimator-doc-link.fitted:hover,\n",
       ".sk-estimator-doc-link.fitted:hover,\n",
       "div.sk-label-container:hover .sk-estimator-doc-link.fitted:hover,\n",
       ".sk-estimator-doc-link.fitted:hover {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-3);\n",
       "  color: var(--sklearn-color-background);\n",
       "  text-decoration: none;\n",
       "}\n",
       "\n",
       "/* Span, style for the box shown on hovering the info icon */\n",
       ".sk-estimator-doc-link span {\n",
       "  display: none;\n",
       "  z-index: 9999;\n",
       "  position: relative;\n",
       "  font-weight: normal;\n",
       "  right: .2ex;\n",
       "  padding: .5ex;\n",
       "  margin: .5ex;\n",
       "  width: min-content;\n",
       "  min-width: 20ex;\n",
       "  max-width: 50ex;\n",
       "  color: var(--sklearn-color-text);\n",
       "  box-shadow: 2pt 2pt 4pt #999;\n",
       "  /* unfitted */\n",
       "  background: var(--sklearn-color-unfitted-level-0);\n",
       "  border: .5pt solid var(--sklearn-color-unfitted-level-3);\n",
       "}\n",
       "\n",
       ".sk-estimator-doc-link.fitted span {\n",
       "  /* fitted */\n",
       "  background: var(--sklearn-color-fitted-level-0);\n",
       "  border: var(--sklearn-color-fitted-level-3);\n",
       "}\n",
       "\n",
       ".sk-estimator-doc-link:hover span {\n",
       "  display: block;\n",
       "}\n",
       "\n",
       "/* \"?\"-specific style due to the `<a>` HTML tag */\n",
       "\n",
       "#sk-container-id-21 a.estimator_doc_link {\n",
       "  float: right;\n",
       "  font-size: 1rem;\n",
       "  line-height: 1em;\n",
       "  font-family: monospace;\n",
       "  background-color: var(--sklearn-color-background);\n",
       "  border-radius: 1rem;\n",
       "  height: 1rem;\n",
       "  width: 1rem;\n",
       "  text-decoration: none;\n",
       "  /* unfitted */\n",
       "  color: var(--sklearn-color-unfitted-level-1);\n",
       "  border: var(--sklearn-color-unfitted-level-1) 1pt solid;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 a.estimator_doc_link.fitted {\n",
       "  /* fitted */\n",
       "  border: var(--sklearn-color-fitted-level-1) 1pt solid;\n",
       "  color: var(--sklearn-color-fitted-level-1);\n",
       "}\n",
       "\n",
       "/* On hover */\n",
       "#sk-container-id-21 a.estimator_doc_link:hover {\n",
       "  /* unfitted */\n",
       "  background-color: var(--sklearn-color-unfitted-level-3);\n",
       "  color: var(--sklearn-color-background);\n",
       "  text-decoration: none;\n",
       "}\n",
       "\n",
       "#sk-container-id-21 a.estimator_doc_link.fitted:hover {\n",
       "  /* fitted */\n",
       "  background-color: var(--sklearn-color-fitted-level-3);\n",
       "}\n",
       "</style><div id=\"sk-container-id-21\" class=\"sk-top-container\"><div class=\"sk-text-repr-fallback\"><pre>MultiOutputRegressor(estimator=StackingRegressor(estimators=[(&#x27;Extra Trees &#x27;\n",
       "                                                              &#x27;Regressor&#x27;,\n",
       "                                                              ExtraTreesRegressor(random_state=42)),\n",
       "                                                             (&#x27;BayesianRidge&#x27;,\n",
       "                                                              BayesianRidge())],\n",
       "                                                 final_estimator=Ridge()))</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class=\"sk-container\" hidden><div class=\"sk-item sk-dashed-wrapped\"><div class=\"sk-label-container\"><div class=\"sk-label fitted sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-135\" type=\"checkbox\" ><label for=\"sk-estimator-id-135\" class=\"sk-toggleable__label fitted sk-toggleable__label-arrow fitted\">&nbsp;&nbsp;MultiOutputRegressor<a class=\"sk-estimator-doc-link fitted\" rel=\"noreferrer\" target=\"_blank\" href=\"https://scikit-learn.org/1.5/modules/generated/sklearn.multioutput.MultiOutputRegressor.html\">?<span>Documentation for MultiOutputRegressor</span></a><span class=\"sk-estimator-doc-link fitted\">i<span>Fitted</span></span></label><div class=\"sk-toggleable__content fitted\"><pre>MultiOutputRegressor(estimator=StackingRegressor(estimators=[(&#x27;Extra Trees &#x27;\n",
       "                                                              &#x27;Regressor&#x27;,\n",
       "                                                              ExtraTreesRegressor(random_state=42)),\n",
       "                                                             (&#x27;BayesianRidge&#x27;,\n",
       "                                                              BayesianRidge())],\n",
       "                                                 final_estimator=Ridge()))</pre></div> </div></div><div class=\"sk-parallel\"><div class=\"sk-parallel-item\"><div class=\"sk-item\"><div class=\"sk-label-container\"><div class=\"sk-label fitted sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-136\" type=\"checkbox\" ><label for=\"sk-estimator-id-136\" class=\"sk-toggleable__label fitted sk-toggleable__label-arrow fitted\">estimator: StackingRegressor</label><div class=\"sk-toggleable__content fitted\"><pre>StackingRegressor(estimators=[(&#x27;Extra Trees Regressor&#x27;,\n",
       "                               ExtraTreesRegressor(random_state=42)),\n",
       "                              (&#x27;BayesianRidge&#x27;, BayesianRidge())],\n",
       "                  final_estimator=Ridge())</pre></div> </div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-parallel\"><div class=\"sk-parallel-item\"><div class=\"sk-item\"><div class=\"sk-label-container\"><div class=\"sk-label fitted sk-toggleable\"><label>Extra Trees Regressor</label></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator fitted sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-137\" type=\"checkbox\" ><label for=\"sk-estimator-id-137\" class=\"sk-toggleable__label fitted sk-toggleable__label-arrow fitted\">&nbsp;ExtraTreesRegressor<a class=\"sk-estimator-doc-link fitted\" rel=\"noreferrer\" target=\"_blank\" href=\"https://scikit-learn.org/1.5/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html\">?<span>Documentation for ExtraTreesRegressor</span></a></label><div class=\"sk-toggleable__content fitted\"><pre>ExtraTreesRegressor(random_state=42)</pre></div> </div></div></div></div></div><div class=\"sk-parallel-item\"><div class=\"sk-item\"><div class=\"sk-label-container\"><div class=\"sk-label fitted sk-toggleable\"><label>BayesianRidge</label></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator fitted sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-138\" type=\"checkbox\" ><label for=\"sk-estimator-id-138\" class=\"sk-toggleable__label fitted sk-toggleable__label-arrow fitted\">&nbsp;BayesianRidge<a class=\"sk-estimator-doc-link fitted\" rel=\"noreferrer\" target=\"_blank\" href=\"https://scikit-learn.org/1.5/modules/generated/sklearn.linear_model.BayesianRidge.html\">?<span>Documentation for BayesianRidge</span></a></label><div class=\"sk-toggleable__content fitted\"><pre>BayesianRidge()</pre></div> </div></div></div></div></div></div></div><div class=\"sk-item\"><div class=\"sk-parallel\"><div class=\"sk-parallel-item\"><div class=\"sk-item\"><div class=\"sk-label-container\"><div class=\"sk-label fitted sk-toggleable\"><label>final_estimator</label></div></div><div class=\"sk-serial\"><div class=\"sk-item\"><div class=\"sk-estimator fitted sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-139\" type=\"checkbox\" ><label for=\"sk-estimator-id-139\" class=\"sk-toggleable__label fitted sk-toggleable__label-arrow fitted\">&nbsp;Ridge<a class=\"sk-estimator-doc-link fitted\" rel=\"noreferrer\" target=\"_blank\" href=\"https://scikit-learn.org/1.5/modules/generated/sklearn.linear_model.Ridge.html\">?<span>Documentation for Ridge</span></a></label><div class=\"sk-toggleable__content fitted\"><pre>Ridge()</pre></div> </div></div></div></div></div></div></div></div></div></div></div></div></div></div></div></div>"
      ],
      "text/plain": [
       "MultiOutputRegressor(estimator=StackingRegressor(estimators=[('Extra Trees '\n",
       "                                                              'Regressor',\n",
       "                                                              ExtraTreesRegressor(random_state=42)),\n",
       "                                                             ('BayesianRidge',\n",
       "                                                              BayesianRidge())],\n",
       "                                                 final_estimator=Ridge()))"
      ]
     },
     "execution_count": 339,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "multi_output_model.fit(X_train, Y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 340,
   "metadata": {},
   "outputs": [],
   "source": [
    "name = 'Stacking Regressor'\n",
    "Y_pred = multi_output_model.predict(X_test)\n",
    "# mse\n",
    "mse[name] = mean_squared_error(Y_test, Y_pred)\n",
    "# r2\n",
    "r2[name] = r2_score(Y_test, Y_pred)\n",
    "# mape\n",
    "# mape[name] = mean_absolute_percentage_error(Y_test, Y_pred)\n",
    "# mae\n",
    "mae[name] = mean_absolute_error(Y_test, Y_pred)\n",
    "# draw_learning_curve(multi_output_model, name, X_train, Y_train, 'neg_mean_squared_error')\n",
    "# draw_learning_curve(multi_output_model, name, X_train, Y_train, 'r2')\n",
    "# draw_learning_curve(multi_output_model, name, X_train, Y_train, 'neg_mean_absolute_error')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 341,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>MSE</th>\n",
       "      <th>R2</th>\n",
       "      <th>MAE</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>KNN Regressor</th>\n",
       "      <td>0.000006</td>\n",
       "      <td>0.999361</td>\n",
       "      <td>0.000321</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Random Forest Regressor</th>\n",
       "      <td>0.000003</td>\n",
       "      <td>0.998508</td>\n",
       "      <td>0.000191</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Decision Tree Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.997899</td>\n",
       "      <td>0.000245</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Lasso</th>\n",
       "      <td>0.020934</td>\n",
       "      <td>-0.000282</td>\n",
       "      <td>0.086060</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Elastic Net</th>\n",
       "      <td>0.020934</td>\n",
       "      <td>-0.000282</td>\n",
       "      <td>0.086060</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>XGBoost</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999155</td>\n",
       "      <td>0.000247</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Extra Trees Regressor</th>\n",
       "      <td>0.000001</td>\n",
       "      <td>0.999433</td>\n",
       "      <td>0.000148</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Linear Regression</th>\n",
       "      <td>0.001134</td>\n",
       "      <td>0.975886</td>\n",
       "      <td>0.007872</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ridge</th>\n",
       "      <td>0.001157</td>\n",
       "      <td>0.975272</td>\n",
       "      <td>0.008085</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Polynomial Regression</th>\n",
       "      <td>0.000095</td>\n",
       "      <td>0.996902</td>\n",
       "      <td>0.002407</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bagging Regressor</th>\n",
       "      <td>0.000003</td>\n",
       "      <td>0.998074</td>\n",
       "      <td>0.000205</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Ada Boost Regressor</th>\n",
       "      <td>0.000022</td>\n",
       "      <td>0.998131</td>\n",
       "      <td>0.001314</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Gradient Boosting Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.999378</td>\n",
       "      <td>0.000411</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>SVR</th>\n",
       "      <td>0.005774</td>\n",
       "      <td>-0.146419</td>\n",
       "      <td>0.063107</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>LightGBM</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999354</td>\n",
       "      <td>0.000237</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Hist Gradient Boosting Regressor</th>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.999005</td>\n",
       "      <td>0.000344</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>CatBoost</th>\n",
       "      <td>0.000002</td>\n",
       "      <td>0.999262</td>\n",
       "      <td>0.000291</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Bayesian Ridge</th>\n",
       "      <td>0.001134</td>\n",
       "      <td>0.975886</td>\n",
       "      <td>0.007872</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Stacking Regressor</th>\n",
       "      <td>0.000008</td>\n",
       "      <td>0.815420</td>\n",
       "      <td>0.001300</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                       MSE        R2       MAE\n",
       "KNN Regressor                     0.000006  0.999361  0.000321\n",
       "Random Forest Regressor           0.000003  0.998508  0.000191\n",
       "Decision Tree Regressor           0.000004  0.997899  0.000245\n",
       "Lasso                             0.020934 -0.000282  0.086060\n",
       "Elastic Net                       0.020934 -0.000282  0.086060\n",
       "XGBoost                           0.000002  0.999155  0.000247\n",
       "Extra Trees Regressor             0.000001  0.999433  0.000148\n",
       "Linear Regression                 0.001134  0.975886  0.007872\n",
       "Ridge                             0.001157  0.975272  0.008085\n",
       "Polynomial Regression             0.000095  0.996902  0.002407\n",
       "Bagging Regressor                 0.000003  0.998074  0.000205\n",
       "Ada Boost Regressor               0.000022  0.998131  0.001314\n",
       "Gradient Boosting Regressor       0.000004  0.999378  0.000411\n",
       "SVR                               0.005774 -0.146419  0.063107\n",
       "LightGBM                          0.000002  0.999354  0.000237\n",
       "Hist Gradient Boosting Regressor  0.000004  0.999005  0.000344\n",
       "CatBoost                          0.000002  0.999262  0.000291\n",
       "Bayesian Ridge                    0.001134  0.975886  0.007872\n",
       "Stacking Regressor                0.000008  0.815420  0.001300"
      ]
     },
     "execution_count": 341,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluation = pd.DataFrame([mse, r2, mae], index=['MSE', 'R2', 'MAE']).T\n",
    "evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 342,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'KNN Regressor': 5.528190367404337e-06,\n",
       " 'Random Forest Regressor': 2.8174354790892455e-06,\n",
       " 'Decision Tree Regressor': 4.230420589951267e-06,\n",
       " 'Lasso': 0.020934441796832105,\n",
       " 'Elastic Net': 0.020934441796832105,\n",
       " 'XGBoost': 2.2939647190995077e-06,\n",
       " 'Extra Trees Regressor': 1.1509644435514137e-06,\n",
       " 'Linear Regression': 0.0011343882020023721,\n",
       " 'Ridge': 0.001157472158898901,\n",
       " 'Polynomial Regression': 9.478091382573779e-05,\n",
       " 'Bagging Regressor': 2.8835102831157015e-06,\n",
       " 'Ada Boost Regressor': 2.1787178080725862e-05,\n",
       " 'Gradient Boosting Regressor': 4.333139486135479e-06,\n",
       " 'SVR': 0.005773681250300531,\n",
       " 'LightGBM': 2.1535195216130873e-06,\n",
       " 'Hist Gradient Boosting Regressor': 3.882174154477993e-06,\n",
       " 'CatBoost': 2.0675619747997908e-06,\n",
       " 'Bayesian Ridge': 0.0011344444024724287,\n",
       " 'Stacking Regressor': 8.149512441282864e-06}"
      ]
     },
     "execution_count": 342,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 343,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.002854735091262036"
      ]
     },
     "execution_count": 343,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import math\n",
    "math.sqrt(mse['Stacking Regressor'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 344,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.0010728301093609434"
      ]
     },
     "execution_count": 344,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "math.sqrt(mse['Extra Trees Regressor'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 355,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.008794075687970271"
      ]
     },
     "execution_count": 355,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# k折交叉验证\n",
    "from sklearn.model_selection import cross_val_score\n",
    "\n",
    "# 交叉验证\n",
    "scores = cross_val_score(multi_output_model, X, Y, cv=10, scoring='neg_mean_squared_error')\n",
    "scores = np.sqrt(-scores)\n",
    "scores.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 356,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.00780779, 0.00098579, 0.00091777, 0.00092949, 0.0010044 ,\n",
       "       0.00122946, 0.02229253, 0.03302013, 0.01873292, 0.00102047])"
      ]
     },
     "execution_count": 356,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 363,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-0.3920293399733893"
      ]
     },
     "execution_count": 363,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores = cross_val_score(multi_output_model, X, Y, cv=10, scoring='r2')\n",
    "scores.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 364,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.76725733,  0.        ,  0.        ,  0.        ,  0.        ,\n",
       "        0.        ,  0.70886074, -6.10870004,  0.71228857,  0.        ])"
      ]
     },
     "execution_count": 364,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 359,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.007886676049097442"
      ]
     },
     "execution_count": 359,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 检验极端树模型\n",
    "scores = cross_val_score(ExtraTreesRegressor(random_state=42), X, Y, cv=10, scoring='neg_mean_squared_error')\n",
    "scores = np.sqrt(-scores)\n",
    "scores.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 360,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.00497945, 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.02629104, 0.03183502, 0.01576125, 0.        ])"
      ]
     },
     "execution_count": 360,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 361,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9454217145389971"
      ]
     },
     "execution_count": 361,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores = cross_val_score(ExtraTreesRegressor(random_state=42), X, Y, cv=10, scoring='r2')\n",
    "scores.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 362,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.99545358, 1.        , 1.        , 1.        , 1.        ,\n",
       "       1.        , 0.91469015, 0.5512332 , 0.99284021, 1.        ])"
      ]
     },
     "execution_count": 362,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
