{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型构建\n",
    "- 针对 训练集train + taget 以及  测试集test  最终给出预测结果\n",
    "- 针对两种方式的训练：\n",
    "- 1. 利用交叉CV5次，以及早停机制，建立5个模型 CV值在11/12左右  stackingCV 结果：12.263240421771934\n",
    "- 2. 利用固定的模型调节出参数，如LGBMRegressor 进行CV调参，CV值在20.85 \n",
    "- 可以看到还是第一种方式占优"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#构造数据\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.datasets import load_boston\n",
    "\n",
    "X,y = load_boston(return_X_y = True)\n",
    "X = pd.DataFrame(X,columns=['col_'+ str(i) for i in range(X.shape[1]) ])\n",
    "y = pd.DataFrame(y,columns=['target'])\n",
    "\n",
    "#构建一个训练集和测试集，测试集是没有target的\n",
    "train = pd.concat([X,y],axis=1)\n",
    "test = X.iloc[:10,:]\n",
    "\n",
    "# train + taget\n",
    "# test\n",
    "\n",
    "x_train = X.copy()\n",
    "y_train = y.copy()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### LGB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import lightgbm as lgb\n",
    "from sklearn.model_selection import KFold, RepeatedKFold\n",
    "from sklearn.metrics import mean_squared_error as mse\n",
    "\n",
    "\n",
    "import warnings\n",
    "warnings.simplefilter('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lgb_cv(train, test, params, fit_params,cat_features, feature_names, nfold = 5, seed = 0):\n",
    "    #构造存放预测结果的变量\n",
    "    train_pred = pd.DataFrame({'true': train['target'],'pred': np.zeros(len(train))})\n",
    "    test_pred = pd.DataFrame({'pred': np.zeros(len(test))})\n",
    "    \n",
    "    kfolder = KFold(n_splits=nfold, shuffle=True, random_state=seed)\n",
    "    for fold_id, (trn_idx, val_idx) in enumerate(kfolder.split(train['target'])):\n",
    "        lgb_trn = lgb.Dataset(\n",
    "            data=train.iloc[trn_idx][feature_names],\n",
    "            label=train.iloc[trn_idx]['target'],\n",
    "            categorical_feature=cat_features,\n",
    "            feature_name=feature_names)\n",
    "        lgb_val = lgb.Dataset(\n",
    "            data=train.iloc[val_idx][feature_names],\n",
    "            label=train.iloc[val_idx]['target'],\n",
    "            categorical_feature=cat_features,\n",
    "            feature_name=feature_names)\n",
    "        \n",
    "        lgb_reg = lgb.train(params=params, train_set=lgb_trn, **fit_params,valid_sets=[lgb_trn, lgb_val])\n",
    "        val_pred = lgb_reg.predict(\n",
    "            train.iloc[val_idx][feature_names],\n",
    "            num_iteration=lgb_reg.best_iteration)\n",
    "        \n",
    "        train_pred.loc[val_idx, 'pred'] = val_pred\n",
    "       \n",
    "        print('='*50,fold_id,'='*50)\n",
    "        print('LOSS',mse(train.iloc[val_idx]['target'], val_pred))\n",
    "        test_pred['pred'] += lgb_reg.predict(\n",
    "            test[feature_names], num_iteration=lgb_reg.best_iteration) / nfold\n",
    "    print('\\nCV LOSS:', mse(train_pred['true'], train_pred['pred']))\n",
    "    return test_pred,train_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ====== lgb ============================================================\n",
    "fit_params = {'num_boost_round': 10000, 'verbose_eval': False,\n",
    "              'early_stopping_rounds': 500}\n",
    "params_lgb = {'num_leaves': 120, 'max_depth': 7, 'learning_rate': 0.01,\n",
    "              'min_data_in_leaf': 12, # 'min_child_samples': 45,\n",
    "              'objective': 'regression', 'boosting': 'gbdt',\n",
    "              'feature_fraction': 0.8, 'bagging_freq': 5,\n",
    "              'bagging_fraction': 0.8, 'bagging_seed': 19950520,\n",
    "              'metric': 'mse', 'lambda_l1': 0.1, 'verbosity': -1}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================== 0 ==================================================\n",
      "LOSS 23.709206491955428\n",
      "================================================== 1 ==================================================\n",
      "LOSS 5.869069557844972\n",
      "================================================== 2 ==================================================\n",
      "LOSS 12.192113973144219\n",
      "================================================== 3 ==================================================\n",
      "LOSS 11.163614353518408\n",
      "================================================== 4 ==================================================\n",
      "LOSS 6.089440613104386\n",
      "\n",
      "CV LOSS: 11.828215712328586\n"
     ]
    }
   ],
   "source": [
    "#测试\n",
    "cat_features = []\n",
    "feature_names = train.columns.tolist()[1:-1]\n",
    "pred_lgb,oof_lgb= lgb_cv(train, test, params_lgb, fit_params,cat_features, feature_names)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### XGB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xgboost as xgb\n",
    "from sklearn.model_selection import KFold, RepeatedKFold\n",
    "from sklearn.metrics import mean_squared_error\n",
    "\n",
    "import warnings\n",
    "warnings.simplefilter('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ==============================================================================\n",
    "# ====== Model Train ===========================================================\n",
    "# ==============================================================================\n",
    "def xgb_cv(train, test, params, fit_params, feature_names, nfold = 5, seed = 0):\n",
    "    train_pred = pd.DataFrame({'true': train['target'],'pred': np.zeros(len(train))})\n",
    "    test_pred = pd.DataFrame({'pred': np.zeros(len(test))})\n",
    "    \n",
    "    kfolder = KFold(n_splits=nfold, shuffle=True, random_state=seed)\n",
    "    xgb_tst = xgb.DMatrix(data=test[feature_names])\n",
    "    \n",
    "    for fold_id, (trn_idx, val_idx) in enumerate(kfolder.split(train['target'])):\n",
    "        xgb_trn = xgb.DMatrix(\n",
    "            train.iloc[trn_idx][feature_names],\n",
    "            train.iloc[trn_idx]['target'])\n",
    "        xgb_val = xgb.DMatrix(\n",
    "            train.iloc[val_idx][feature_names],\n",
    "            train.iloc[val_idx]['target'])\n",
    "        \n",
    "        xgb_reg = xgb.train(params=params, dtrain=xgb_trn, **fit_params,\n",
    "                  evals=[(xgb_trn, 'train'), (xgb_val, 'valid')])\n",
    "        val_pred = xgb_reg.predict(\n",
    "            xgb.DMatrix(train.iloc[val_idx][feature_names]),\n",
    "            ntree_limit=xgb_reg.best_ntree_limit)\n",
    "        train_pred.loc[val_idx, 'pred'] = val_pred\n",
    "        \n",
    "        print('='*50,fold_id,'='*50)\n",
    "        print('LOSS',mse(train.iloc[val_idx]['target'], val_pred))\n",
    "        test_pred['pred'] += xgb_reg.predict(\n",
    "            xgb_tst, ntree_limit=xgb_reg.best_ntree_limit) / nfold\n",
    "    print('CV LOSS:', mse(train_pred['true'], train_pred['pred']))\n",
    "    return test_pred,train_pred\n",
    "\n",
    "\n",
    "# ==============================================================================\n",
    "# ====== 模型训练参数 ============================================================\n",
    "# ==============================================================================\n",
    "fit_params = {'num_boost_round': 10000,\n",
    "              'verbose_eval': False, #500 每隔500轮输出日志\n",
    "              'early_stopping_rounds': 500}\n",
    "\n",
    "\n",
    "params_xgb = {'eta': 0.005, 'max_depth': 7, 'subsample': 0.8,\n",
    "              'booster': 'gbtree', 'colsample_bytree': 0.8,\n",
    "              'reg_lambda': 0.1,\n",
    "              #  'reg_alpha': 0.01, 'gamma':0.01,\n",
    "              'objective': 'reg:linear', 'silent': True, 'nthread': 4}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================== 0 ==================================================\n",
      "LOSS 18.528215744762566\n",
      "================================================== 1 ==================================================\n",
      "LOSS 6.724081225985191\n",
      "================================================== 2 ==================================================\n",
      "LOSS 9.570231486771021\n",
      "================================================== 3 ==================================================\n",
      "LOSS 21.82931497440645\n",
      "================================================== 4 ==================================================\n",
      "LOSS 6.897535312270568\n",
      "CV LOSS: 12.721374444483276\n"
     ]
    }
   ],
   "source": [
    "#测试\n",
    "feature_names = train.columns.tolist()[1:-1]\n",
    "pred_xgb,oof_xgb = xgb_cv(train, test, params_xgb, fit_params, feature_names)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Stacking \n",
    "- 集成学习-模型融合学习笔记（附Python代码）  https://blog.csdn.net/u012735708/article/details/82349731"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import BayesianRidge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "The Stacking Loss 12.263240421771934\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "根据模型构建汇总LGB/XGB的输出结果 ， 需要model*2 + 1个参数 \n",
    "模型1训练集预测结果，测试集预测结果:   oof_lgb、pred_lgb\n",
    "模型2训练集预测结果，测试集预测结果:   oof_xgb、pred_xgb\n",
    "训练集真实结果:                        train['target']\n",
    "'''\n",
    "# ======   Stacking   ======\n",
    "pred_tst = pred_lgb.copy()\n",
    "stack_trn = np.vstack([oof_lgb['pred'], oof_xgb['pred']]).transpose()\n",
    "stack_tst = np.vstack([pred_lgb['pred'], pred_xgb['pred']]).transpose()\n",
    "\n",
    "stack_folds = KFold(n_splits=3, random_state=0)\n",
    "stack_oof = np.zeros(stack_trn.shape[0])\n",
    "pred_tst['pred'] = np.zeros(stack_tst.shape[0])\n",
    "\n",
    "for _fold, (trn_idx, val_idx) in enumerate(\n",
    "        stack_folds.split(stack_trn, train['target'])):\n",
    "    trn_x, trn_y = stack_trn[trn_idx], train['target'].iloc[trn_idx].values\n",
    "    val_x, val_y = stack_trn[val_idx], train['target'].iloc[val_idx].values\n",
    "\n",
    "    clf_3 = BayesianRidge()\n",
    "    clf_3.fit(trn_x, trn_y)\n",
    "\n",
    "    stack_oof[val_idx] = clf_3.predict(val_x)\n",
    "    pred_tst['pred'] += clf_3.predict(stack_tst) / 3\n",
    "print('\\nThe Stacking Loss', mse(train['target'].values, stack_oof))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>pred</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>24.718267</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>21.696088</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>34.845866</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>34.453115</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>35.155403</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>5</td>\n",
       "      <td>27.991400</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>6</td>\n",
       "      <td>21.884563</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7</td>\n",
       "      <td>23.174746</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>8</td>\n",
       "      <td>16.296411</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9</td>\n",
       "      <td>18.970363</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        pred\n",
       "0  24.718267\n",
       "1  21.696088\n",
       "2  34.845866\n",
       "3  34.453115\n",
       "4  35.155403\n",
       "5  27.991400\n",
       "6  21.884563\n",
       "7  23.174746\n",
       "8  16.296411\n",
       "9  18.970363"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_tst"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型融合 \n",
    "- 如果采用模型融合方式，则不能使用lgb_cv这种利用早停确定5个模型，分别预测并得到最终结果了（其实可行，但是需要改造）\n",
    "- 只能采用确定的模型，进行训练并得到最终结果"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Weight Average 权重平均"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import Ridge\n",
    "from sklearn.linear_model import Lasso\n",
    "from sklearn.svm import SVR\n",
    "from sklearn.kernel_ridge import KernelRidge\n",
    "from sklearn.linear_model import BayesianRidge\n",
    "from sklearn.linear_model import ElasticNet\n",
    "from lightgbm import LGBMRegressor\n",
    "\n",
    "from sklearn.base import BaseEstimator,TransformerMixin,RegressorMixin,clone"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "class AverageWeight(BaseEstimator,RegressorMixin):\n",
    "    def __init__(self,model,weight):\n",
    "        self.model = model\n",
    "        self.weight = weight\n",
    "        \n",
    "    def fit(self,X,y):\n",
    "        self.models_ = [clone(x) for x in self.model]\n",
    "        for model in self.models_:\n",
    "            model.fit(X,y)\n",
    "        return self\n",
    "    \n",
    "    def predict(self,X):\n",
    "        w = list()\n",
    "        pred = np.array([model.predict(X).ravel() for model in self.models_])\n",
    "        for data in range(pred.shape[1]):            \n",
    "            single = [pred[model,data]*weight for model,weight in zip(range(pred.shape[0]),self.weight)] \n",
    "            w.append(np.sum(single))   \n",
    "        return w"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "#设置参数\n",
    "lasso = Lasso(alpha=0.0005,max_iter=10000)\n",
    "ridge = Ridge(alpha=60)\n",
    "svr = SVR(gamma= 0.0004,kernel='rbf',C=13,epsilon=0.009)\n",
    "ker = KernelRidge(alpha=0.2 ,kernel='polynomial',degree=3 , coef0=0.8)\n",
    "ela = ElasticNet(alpha=0.005,l1_ratio=0.08,max_iter=10000)\n",
    "bay = BayesianRidge()\n",
    "lgbre = LGBMRegressor(learning_rate=0.005, max_depth=16, min_data_in_leaf= 7, num_boost_round= 1000, num_leaves= 32)\n",
    "# 设置权重 基于模型的gridsearch score\n",
    "w1 = 0.02\n",
    "w2 = 0.2\n",
    "w3 = 0.25\n",
    "w4 = 0.3\n",
    "w5 = 0.03\n",
    "w6 = 0.2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'weight': [0.2, 0.8]} -19.888901559152398\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[25.493693792555767,\n",
       " 22.52370569989302,\n",
       " 33.81084831182404,\n",
       " 32.475540550971616,\n",
       " 33.85851824170397,\n",
       " 27.65446478669144,\n",
       " 22.36438026228724,\n",
       " 22.910072191570887,\n",
       " 15.31607591435544,\n",
       " 18.990088293168267]"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 配置模型\n",
    "weight_avg = AverageWeight(model = [lasso,lgbre],weight=[0.2,0.8])\n",
    "grid(weight_avg).grid_get(x_train,y_train,{'weight': [[0.5,0.5],[0.1,0.9],[0.2,0.8],[0,1],[0.3,0.7]]})\n",
    "\n",
    "# 进行训练，得出结果\n",
    "weight_avg = AverageWeight(model = [lasso,lgbre],weight=[0.2,0.8])\n",
    "weight_avg.fit(x_train,y_train)\n",
    "pre = weight_avg.predict(test)\n",
    "pre"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "weight_avg: -8490.139179, 16275.6597,[-3.33548908e+01 -9.45653217e+02 -2.78560825e+02 -1.57776966e+02\n",
      " -4.10353500e+04]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[26.285965821634747,\n",
       " 24.13212173302493,\n",
       " 30.046751437920058,\n",
       " 30.82667472417655,\n",
       " 30.00886474727585,\n",
       " 27.04151018824699,\n",
       " 22.170853432310913,\n",
       " 19.45891170085843,\n",
       " 12.73781532055365,\n",
       " 19.36647547593597]"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 配置模型\n",
    "weight_avg = AverageWeight(model = [lasso,ridge,svr,ker,ela,bay],weight=[w1,w2,w3,w4,w5,w6])\n",
    "score = cross_val_score(weight_avg,x_train,y_train,scoring='neg_mean_squared_error',cv = 5) #相当于调用5次 得出5个得分list\n",
    "print(\"{}: {:.6f}, {:.4f},{}\".format('weight_avg',score.mean(),score.std(),score))\n",
    "\n",
    "# 进行训练，得出结果\n",
    "weight_avg = AverageWeight(model = [lasso,ridge,svr,ker,ela,bay],weight=[w1,w2,w3,w4,w5,w6])\n",
    "weight_avg.fit(x_train,y_train)\n",
    "pre = weight_avg.predict(test)\n",
    "pre"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Stacking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "class stacking(BaseEstimator,RegressorMixin,TransformerMixin):\n",
    "    def __init__(self,mod,meta_model):\n",
    "        self.mod = mod\n",
    "        self.meta_model = meta_model\n",
    "        self.kf = KFold(n_splits=5,random_state=42,shuffle=True)\n",
    "    \n",
    "    def fit(self,X,y):\n",
    "        self.saved_model = [list() for i in self.mod]\n",
    "        oof_train  =  np.zeros((X.shape[0],len(self.mod)))\n",
    "        \n",
    "        for i , model in enumerate(self.mod):\n",
    "            for train_index,val_index in self.kf.split(X,y): \n",
    "                renew_model = clone(model)\n",
    "                renew_model.fit(X[train_index], y[train_index])\n",
    "                self.saved_model[i].append(renew_model)\n",
    "                oof_train[val_index,i] = renew_model.predict(X[val_index])\n",
    "        self.meta_model.fit(oof_train,y)\n",
    "        return self\n",
    "    \n",
    "    def predict(self,X):\n",
    "        \n",
    "        whole_test = np.column_stack([np.column_stack([model.predict(X) for model  in single_model]).mean(axis = 1) for single_model in self.saved_model])\n",
    "        return self.meta_model.predict(whole_test)\n",
    "        #该步骤 分解见下面\n",
    "        '''\n",
    "        saved_model = [ ['lasso','lasso','lasso','lasso','lasso'],\n",
    "               ['lasso','ridge','ridge','ridge','lasso'] ,\n",
    "              ['lasso','ridge','ridge','ridge','lasso'] ,\n",
    "              ['lasso','ridge','ridge','ridge','lasso'] ,\n",
    "              ['lasso','ridge','ridge','ridge','lasso'] ,\n",
    "              ['lasso','ridge','ridge','ridge','lasso'] ,]\n",
    "        X = np.arange(10).reshape(-1,1)\n",
    "        [X for single in model]\n",
    "        np.column_stack([X for single in model])()\n",
    "        np.column_stack([X for single in model]).mean(axis = 1)\n",
    "        [np.column_stack([X for single in model]).mean(axis = 1) for model in saved_model]\n",
    "        np.column_stack([np.column_stack([X for single in model]).mean(axis = 1) for model in saved_model])\n",
    "        \n",
    "        '''\n",
    "          \n",
    "    def get_oof(self,X,y,test_X):\n",
    "        oof = np.zeros((X.shape[0],len(self.mod)))\n",
    "        test_single = np.zeros((test_X.shape[0],5))#5折交叉验证\n",
    "        test_mean = np.zeros((test_X.shape[0],len(self.mod))) #模型长度\n",
    "        for i , model in enumerate(self.mod):\n",
    "            for j , (train_index,val_index) in enumerate(self.kf.split(X,y)):\n",
    "                clone_model = clone(model)\n",
    "                clone_model.fit(X[train_index],y[train_index])\n",
    "                oof[val_index,i] = clone_model.predict(X[val_index])\n",
    "                test_single[:,j] = clone_model.predict(test_X)\n",
    "            test_mean[:,i] = test_single.mean(axis = 1)\n",
    "        return oof,test_mean        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "weight_avg: -33743517.422071, 67486601.2292,[-1.19212215e+01 -3.12912453e+02 -2.26921716e+02 -3.15474722e+02\n",
      " -1.68716720e+08]\n"
     ]
    }
   ],
   "source": [
    "stack_model = stacking(mod = [lasso,ridge,svr,ker,ela,bay],meta_model=ker)\n",
    "score = cross_val_score(stack_model,x_train.values,y_train.values.ravel(),scoring='neg_mean_squared_error',cv = 5) #相当于调用5次 得出5个得分list\n",
    "print(\"{}: {:.6f}, {:.4f},{}\".format('weight_avg',score.mean(),score.std(),score))\n",
    "\n",
    "# print(rmse_cv(stack_model,X_scaled,y_log.values))\n",
    "# print(rmse_cv(stack_model,X_scaled,y_log.values).mean())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取5折交叉验证每个模型的预测结果\n",
    "X_train_stack,X_test_stack = stack_model.get_oof(x_train.values,y_train.values.ravel(),test.values)\n",
    "X_test_stack"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型调参技巧\n",
    "- 只针对训练集 用cv策略进行调参"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.base import BaseEstimator,TransformerMixin,RegressorMixin,clone\n",
    "from sklearn.model_selection import cross_val_score,GridSearchCV,KFold\n",
    "from sklearn.linear_model import Lasso\n",
    "\n",
    "from lightgbm import LGBMRegressor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "class grid():\n",
    "    def __init__(self,model):\n",
    "        self.model = model\n",
    "    def grid_get(self,X,y,param_grid):\n",
    "        \n",
    "        grid_search = GridSearchCV(self.model,param_grid,cv=5,scoring='neg_mean_squared_error',refit = False)\n",
    "        #refit参数 交叉验证结束后是否还会用最好的参数进行fit，可以加入predict函数来测试一下\n",
    "        grid_search.fit(X,y)\n",
    "        \n",
    "        print(grid_search.best_params_,grid_search.best_score_)\n",
    "        \n",
    "        grid_search.cv_results_['mean_test_score'] = grid_search.cv_results_['mean_test_score']\n",
    "        return pd.DataFrame(grid_search.cv_results_)[['params','mean_test_score','std_test_score']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'alpha': 0.0009, 'max_iter': 10000} -36.98746282930138\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>params</th>\n",
       "      <th>mean_test_score</th>\n",
       "      <th>std_test_score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>{'alpha': 0.0004, 'max_iter': 10000}</td>\n",
       "      <td>-37.040276</td>\n",
       "      <td>23.107716</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>{'alpha': 0.0005, 'max_iter': 10000}</td>\n",
       "      <td>-37.029654</td>\n",
       "      <td>23.110846</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>{'alpha': 0.0007, 'max_iter': 10000}</td>\n",
       "      <td>-37.008501</td>\n",
       "      <td>23.117097</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>{'alpha': 0.0006, 'max_iter': 10000}</td>\n",
       "      <td>-37.019063</td>\n",
       "      <td>23.113974</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>{'alpha': 0.0009, 'max_iter': 10000}</td>\n",
       "      <td>-36.987463</td>\n",
       "      <td>23.123324</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>5</td>\n",
       "      <td>{'alpha': 0.0008, 'max_iter': 10000}</td>\n",
       "      <td>-36.997969</td>\n",
       "      <td>23.120215</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                 params  mean_test_score  std_test_score\n",
       "0  {'alpha': 0.0004, 'max_iter': 10000}       -37.040276       23.107716\n",
       "1  {'alpha': 0.0005, 'max_iter': 10000}       -37.029654       23.110846\n",
       "2  {'alpha': 0.0007, 'max_iter': 10000}       -37.008501       23.117097\n",
       "3  {'alpha': 0.0006, 'max_iter': 10000}       -37.019063       23.113974\n",
       "4  {'alpha': 0.0009, 'max_iter': 10000}       -36.987463       23.123324\n",
       "5  {'alpha': 0.0008, 'max_iter': 10000}       -36.997969       23.120215"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#用LASSO做测试\n",
    "grid(Lasso()).grid_get(x_train,y_train,{'alpha': [0.0004,0.0005,0.0007,0.0006,0.0009,0.0008],'max_iter':[10000]})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'learning_rate': 0.005, 'max_depth': 16, 'min_data_in_leaf': 7, 'n_estimators': 1000, 'num_leaves': 32} -20.584455976949044\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>params</th>\n",
       "      <th>mean_test_score</th>\n",
       "      <th>std_test_score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>{'learning_rate': 0.005, 'max_depth': 16, 'min...</td>\n",
       "      <td>-20.584456</td>\n",
       "      <td>12.958467</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>{'learning_rate': 0.005, 'max_depth': 16, 'min...</td>\n",
       "      <td>-21.132628</td>\n",
       "      <td>11.811723</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                              params  mean_test_score  \\\n",
       "0  {'learning_rate': 0.005, 'max_depth': 16, 'min...       -20.584456   \n",
       "1  {'learning_rate': 0.005, 'max_depth': 16, 'min...       -21.132628   \n",
       "\n",
       "   std_test_score  \n",
       "0       12.958467  \n",
       "1       11.811723  "
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#LGBMRegressor() 查看参数  用LGB进行测试\n",
    "grid(LGBMRegressor()).grid_get(x_train,y_train,\n",
    "                               {'learning_rate': [0.005],\n",
    "                                'n_estimators':[1000],\n",
    "                                'num_leaves':[2**5],\n",
    "                                'max_depth':[2**4],\n",
    "                                #'subsample':[0.7],\n",
    "                                #'bagging_fraction':[0.7],\n",
    "                                #'lambda_l1':[0],\n",
    "                                'min_data_in_leaf':[7,10],\n",
    "                                #'min_child_samples':[5,10],\n",
    "                               }\n",
    "                              )"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
