{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\n",
    "from utils import *\n",
    "\n",
    "\n",
    "\n",
    "%matplotlib inline\n",
    "data_path = 'data/'\n",
    "seed=1204\n",
    "\n",
    "submission_path=data_path+'submission/'\n",
    "fold_path = 'fold_data/'\n",
    "\n",
    "\n",
    "cv_loss_list=[]\n",
    "n_iteration_list=[]\n",
    "def score(params):\n",
    "    print(\"Training with params: \")\n",
    "    print(params)\n",
    "    cv_losses=[]\n",
    "    cv_iteration=[]\n",
    "    for (train_idx,val_idx) in cv:\n",
    "        cv_train = X.iloc[train_idx]\n",
    "        cv_val = X.iloc[val_idx]\n",
    "        cv_y_train = y[train_idx]\n",
    "        cv_y_val = y[val_idx]\n",
    "        \n",
    "        dtrain = xgb.DMatrix(cv_train,cv_y_train)\n",
    "        dval = xgb.DMatrix(cv_val,cv_y_val)\n",
    "        watchlist = [(dtrain, 'train'), (dval, 'valid')]\n",
    "        \n",
    "        xgb_model = xgb.train(params, dtrain, 2000, watchlist,\n",
    "                          verbose_eval=False, \n",
    "                          early_stopping_rounds=200)\n",
    "       \n",
    "        train_pred = xgb_model.predict(dtrain,ntree_limit=xgb_model.best_ntree_limit)\n",
    "        val_pred = xgb_model.predict(dval,ntree_limit=xgb_model.best_ntree_limit+1)\n",
    "        train_loss = root_mean_squared_error(cv_y_train,train_pred)\n",
    "        val_loss = root_mean_squared_error(cv_y_val,val_pred)\n",
    "        print('Train RMSE: {},Val RMSE: {}'.format(train_loss,val_loss))\n",
    "        print('Best iteration: {}'.format(xgb_model.best_ntree_limit))\n",
    "        cv_losses.append(val_loss)\n",
    "        cv_iteration.append(xgb_model.best_iteration)\n",
    "        \n",
    "        xgb_model.__del__()\n",
    "    print('6 fold results: {}'.format(cv_losses))\n",
    "    \n",
    "    cv_loss_list.append(cv_losses)\n",
    "    n_iteration_list.append(cv_iteration)\n",
    "    \n",
    "    mean_cv_loss = np.mean(cv_losses)\n",
    "    print('Average iterations: {}'.format(np.mean(cv_iteration)))\n",
    "    print(\"Mean Cross Validation RMSE: {}\\n\".format(mean_cv_loss))\n",
    "    return {'loss': mean_cv_loss, 'status': STATUS_OK}\n",
    "\n",
    "def optimize(space,seed=seed,max_evals=5):\n",
    "    \n",
    "    best = fmin(score, space, algo=tpe.suggest, \n",
    "        # trials=trials, \n",
    "        max_evals=max_evals)\n",
    "    return best\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "all_data = get_all_data(data_path,'new_sales_lag_after12.pickle')\n",
    "\n",
    "X,y = get_X_y(all_data,33)\n",
    "X.drop('date_block_num',axis=1,inplace=True)\n",
    "\n",
    "cv = get_cv_idxs(all_data,28,33)\n",
    "\n",
    "# vanila_params = {'eval_metric': 'rmse','objective': \"reg:linear\" , 'max_depth':11,'seed': 1204,'tree_method':'gpu_hist'}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Hyperopt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "all_data = get_all_data(data_path,'new_sales_lag_after12_month.pickle')\n",
    "\n",
    "X,y = get_X_y(all_data,33)\n",
    "X.drop('date_block_num',axis=1,inplace=True)\n",
    "\n",
    "cv = get_cv_idxs(all_data,28,33)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.05, 'learning_rate': 0.03, 'max_leaf_nodes': 126, 'min_child_weight': 139, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7692943150150477,Val RMSE: 0.8298628584037769\n",
      "Best iteration: 1586\n",
      "Train RMSE: 0.8150147987262512,Val RMSE: 0.7748194976537419\n",
      "Best iteration: 504\n",
      "Train RMSE: 0.7595488722059013,Val RMSE: 0.7014695069801841\n",
      "Best iteration: 1985\n",
      "Train RMSE: 0.7555471559093475,Val RMSE: 0.7630134729718234\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.7586298595127628,Val RMSE: 0.878838464454166\n",
      "Best iteration: 1861\n",
      "Train RMSE: 0.865694702867586,Val RMSE: 0.9411882330092844\n",
      "Best iteration: 98\n",
      "6 fold results: [0.8298628584037769, 0.7748194976537419, 0.7014695069801841, 0.7630134729718234, 0.878838464454166, 0.9411882330092844]\n",
      "Average iterations: 1338.0\n",
      "Mean Cross Validation RMSE: 0.8148653389121628\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.7000000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 111, 'min_child_weight': 137, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7754027320051141,Val RMSE: 0.8312497475989395\n",
      "Best iteration: 1628\n",
      "Train RMSE: 0.799860690325553,Val RMSE: 0.7716902832292488\n",
      "Best iteration: 820\n",
      "Train RMSE: 0.7743710750083191,Val RMSE: 0.7020497896084247\n",
      "Best iteration: 1609\n",
      "Train RMSE: 0.7782787278384727,Val RMSE: 0.7680910837679781\n",
      "Best iteration: 1318\n",
      "Train RMSE: 0.7799620120261355,Val RMSE: 0.8812422163599282\n",
      "Best iteration: 1222\n",
      "Train RMSE: 0.8268904210366085,Val RMSE: 0.9344103446067762\n",
      "Best iteration: 299\n",
      "6 fold results: [0.8312497475989395, 0.7716902832292488, 0.7020497896084247, 0.7680910837679781, 0.8812422163599282, 0.9344103446067762]\n",
      "Average iterations: 1148.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8147889108618825\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.25, 'learning_rate': 0.03, 'max_leaf_nodes': 105, 'min_child_weight': 138, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7793539308393655,Val RMSE: 0.8337359806695113\n",
      "Best iteration: 1481\n",
      "Train RMSE: 0.7986661938085838,Val RMSE: 0.771673097343372\n",
      "Best iteration: 881\n",
      "Train RMSE: 0.7679562789820248,Val RMSE: 0.6997111840134858\n",
      "Best iteration: 1962\n",
      "Train RMSE: 0.7656141397134119,Val RMSE: 0.7656405311099103\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.7890819131710374,Val RMSE: 0.8828148293253458\n",
      "Best iteration: 965\n",
      "Train RMSE: 0.8304137505076403,Val RMSE: 0.9320156853935837\n",
      "Best iteration: 277\n",
      "6 fold results: [0.8337359806695113, 0.771673097343372, 0.6997111840134858, 0.7656405311099103, 0.8828148293253458, 0.9320156853935837]\n",
      "Average iterations: 1260.0\n",
      "Mean Cross Validation RMSE: 0.8142652179758681\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'gamma': 0.55, 'learning_rate': 0.03, 'max_leaf_nodes': 128, 'min_child_weight': 129, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7687773521333519,Val RMSE: 0.83378480761388\n",
      "Best iteration: 1605\n",
      "Train RMSE: 0.8119088736876008,Val RMSE: 0.7781343513141341\n",
      "Best iteration: 535\n",
      "Train RMSE: 0.7594233038338865,Val RMSE: 0.6999571114412168\n",
      "Best iteration: 1920\n",
      "Train RMSE: 0.7553799313679845,Val RMSE: 0.7627124271597285\n",
      "Best iteration: 1968\n",
      "Train RMSE: 0.8083299953024721,Val RMSE: 0.8847708459830607\n",
      "Best iteration: 473\n",
      "Train RMSE: 0.8715109061490528,Val RMSE: 0.9412395915434948\n",
      "Best iteration: 87\n",
      "6 fold results: [0.83378480761388, 0.7781343513141341, 0.6999571114412168, 0.7627124271597285, 0.8847708459830607, 0.9412395915434948]\n",
      "Average iterations: 1097.0\n",
      "Mean Cross Validation RMSE: 0.8167665225092525\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'gamma': 0.9500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 127, 'min_child_weight': 108, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7861587046274701,Val RMSE: 0.8342405189407521\n",
      "Best iteration: 1168\n",
      "Train RMSE: 0.8057503948772906,Val RMSE: 0.7750817363459936\n",
      "Best iteration: 659\n",
      "Train RMSE: 0.7822961288169118,Val RMSE: 0.7026393484738402\n",
      "Best iteration: 1237\n",
      "Train RMSE: 0.7708878154230633,Val RMSE: 0.7637862488156305\n",
      "Best iteration: 1579\n",
      "Train RMSE: 0.8050009478095499,Val RMSE: 0.8899364486785644\n",
      "Best iteration: 556\n",
      "Train RMSE: 0.8654089206966625,Val RMSE: 0.9422867642014437\n",
      "Best iteration: 99\n",
      "6 fold results: [0.8342405189407521, 0.7750817363459936, 0.7026393484738402, 0.7637862488156305, 0.8899364486785644, 0.9422867642014437]\n",
      "Average iterations: 882.0\n",
      "Mean Cross Validation RMSE: 0.8179951775760373\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.25, 'learning_rate': 0.03, 'max_leaf_nodes': 120, 'min_child_weight': 139, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7748814215236325,Val RMSE: 0.8387752034000747\n",
      "Best iteration: 1455\n",
      "Train RMSE: 0.812518926546763,Val RMSE: 0.7767116718050296\n",
      "Best iteration: 563\n",
      "Train RMSE: 0.760476689168897,Val RMSE: 0.6967613255798841\n",
      "Best iteration: 1956\n",
      "Train RMSE: 0.756742134433949,Val RMSE: 0.7616019526215173\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.801554831901294,Val RMSE: 0.8851027362266158\n",
      "Best iteration: 589\n",
      "Train RMSE: 0.8693207971938123,Val RMSE: 0.9374518064191463\n",
      "Best iteration: 90\n",
      "6 fold results: [0.8387752034000747, 0.7767116718050296, 0.6967613255798841, 0.7616019526215173, 0.8851027362266158, 0.9374518064191463]\n",
      "Average iterations: 1107.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8160674493420447\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.30000000000000004, 'learning_rate': 0.03, 'max_leaf_nodes': 137, 'min_child_weight': 115, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7687465326494626,Val RMSE: 0.8311648088555169\n",
      "Best iteration: 1617\n",
      "Train RMSE: 0.7911782556703868,Val RMSE: 0.7721363644435956\n",
      "Best iteration: 911\n",
      "Train RMSE: 0.757899957703014,Val RMSE: 0.6987054315182555\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7586103741978129,Val RMSE: 0.7633786236002539\n",
      "Best iteration: 1896\n",
      "Train RMSE: 0.7936314411680023,Val RMSE: 0.8838472631691017\n",
      "Best iteration: 722\n",
      "Train RMSE: 0.8043093116691725,Val RMSE: 0.9370864273829851\n",
      "Best iteration: 578\n",
      "6 fold results: [0.8311648088555169, 0.7721363644435956, 0.6987054315182555, 0.7633786236002539, 0.8838472631691017, 0.9370864273829851]\n",
      "Average iterations: 1286.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8143864864949514\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.6000000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.784309879958552,Val RMSE: 0.8293230998653883\n",
      "Best iteration: 1200\n",
      "Train RMSE: 0.8126402513791826,Val RMSE: 0.772096145085777\n",
      "Best iteration: 556\n",
      "Train RMSE: 0.7768235502753048,Val RMSE: 0.6998989909387022\n",
      "Best iteration: 1444\n",
      "Train RMSE: 0.7630775265685362,Val RMSE: 0.7626597143405979\n",
      "Best iteration: 1993\n",
      "Train RMSE: 0.7756271193826569,Val RMSE: 0.8793894477964191\n",
      "Best iteration: 1361\n",
      "Train RMSE: 0.8627966992438452,Val RMSE: 0.9368021593031524\n",
      "Best iteration: 105\n",
      "6 fold results: [0.8293230998653883, 0.772096145085777, 0.6998989909387022, 0.7626597143405979, 0.8793894477964191, 0.9368021593031524]\n",
      "Average iterations: 1108.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8133615928883394\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.15000000000000002, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 118, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7647656269328734,Val RMSE: 0.8304538370194816\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.7977376270536112,Val RMSE: 0.775360837119202\n",
      "Best iteration: 819\n",
      "Train RMSE: 0.7735123646271265,Val RMSE: 0.6990630759730074\n",
      "Best iteration: 1516\n",
      "Train RMSE: 0.7612515704134072,Val RMSE: 0.7633207640869342\n",
      "Best iteration: 1908\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7820473988453849,Val RMSE: 0.8812068076404532\n",
      "Best iteration: 1050\n",
      "Train RMSE: 0.8499738941671597,Val RMSE: 0.9340865937204799\n",
      "Best iteration: 154\n",
      "6 fold results: [0.8304538370194816, 0.775360837119202, 0.6990630759730074, 0.7633207640869342, 0.8812068076404532, 0.9340865937204799]\n",
      "Average iterations: 1238.0\n",
      "Mean Cross Validation RMSE: 0.8139153192599266\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'gamma': 0.25, 'learning_rate': 0.03, 'max_leaf_nodes': 103, 'min_child_weight': 129, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7689951079477138,Val RMSE: 0.8338287708641317\n",
      "Best iteration: 1958\n",
      "Train RMSE: 0.8101831562279217,Val RMSE: 0.7762536691649575\n",
      "Best iteration: 619\n",
      "Train RMSE: 0.7699212572162072,Val RMSE: 0.7009607491313181\n",
      "Best iteration: 1835\n",
      "Train RMSE: 0.7632987825689573,Val RMSE: 0.7671243816445966\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7936126650249558,Val RMSE: 0.8850010101937601\n",
      "Best iteration: 806\n",
      "Train RMSE: 0.8722936807674032,Val RMSE: 0.94444563030343\n",
      "Best iteration: 87\n",
      "6 fold results: [0.8338287708641317, 0.7762536691649575, 0.7009607491313181, 0.7671243816445966, 0.8850010101937601, 0.94444563030343]\n",
      "Average iterations: 1216.3333333333333\n",
      "Mean Cross Validation RMSE: 0.817935701883699\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.1, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 127, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8061861693080499,Val RMSE: 0.8407390701441039\n",
      "Best iteration: 659\n",
      "Train RMSE: 0.8039267934949123,Val RMSE: 0.772017784657467\n",
      "Best iteration: 709\n",
      "Train RMSE: 0.7668505610494606,Val RMSE: 0.6994463800475561\n",
      "Best iteration: 1765\n",
      "Train RMSE: 0.7661451402389189,Val RMSE: 0.76483483314187\n",
      "Best iteration: 1655\n",
      "Train RMSE: 0.7702302420687412,Val RMSE: 0.8763978555310963\n",
      "Best iteration: 1494\n",
      "Train RMSE: 0.8506361446169078,Val RMSE: 0.9288647320759559\n",
      "Best iteration: 149\n",
      "6 fold results: [0.8407390701441039, 0.772017784657467, 0.6994463800475561, 0.76483483314187, 0.8763978555310963, 0.9288647320759559]\n",
      "Average iterations: 1070.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8137167759330083\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.15000000000000002, 'learning_rate': 0.03, 'max_leaf_nodes': 137, 'min_child_weight': 134, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7869231427337013,Val RMSE: 0.8348197594575684\n",
      "Best iteration: 1099\n",
      "Train RMSE: 0.8181162179310896,Val RMSE: 0.7760318823758704\n",
      "Best iteration: 479\n",
      "Train RMSE: 0.7618263755471713,Val RMSE: 0.699624801457145\n",
      "Best iteration: 1991\n",
      "Train RMSE: 0.7587767689616058,Val RMSE: 0.7637343515269567\n",
      "Best iteration: 1996\n",
      "Train RMSE: 0.7809857112173313,Val RMSE: 0.8793171917988581\n",
      "Best iteration: 1075\n",
      "Train RMSE: 0.8533921490705283,Val RMSE: 0.9366861307479433\n",
      "Best iteration: 140\n",
      "6 fold results: [0.8348197594575684, 0.7760318823758704, 0.699624801457145, 0.7637343515269567, 0.8793171917988581, 0.9366861307479433]\n",
      "Average iterations: 1129.0\n",
      "Mean Cross Validation RMSE: 0.8150356862273903\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.75, 'learning_rate': 0.03, 'max_leaf_nodes': 134, 'min_child_weight': 139, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7731024661901018,Val RMSE: 0.8360589554158245\n",
      "Best iteration: 1683\n",
      "Train RMSE: 0.8153585619944239,Val RMSE: 0.7735752792171986\n",
      "Best iteration: 511\n",
      "Train RMSE: 0.7647278648233263,Val RMSE: 0.6983196082242054\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.762278854632208,Val RMSE: 0.7669387756043586\n",
      "Best iteration: 1995\n",
      "Train RMSE: 0.7869639676484261,Val RMSE: 0.8833756465194278\n",
      "Best iteration: 977\n",
      "Train RMSE: 0.8169514783857433,Val RMSE: 0.9290769479541113\n",
      "Best iteration: 408\n",
      "6 fold results: [0.8360589554158245, 0.7735752792171986, 0.6983196082242054, 0.7669387756043586, 0.8833756465194278, 0.9290769479541113]\n",
      "Average iterations: 1261.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8145575354891877\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'gamma': 0.1, 'learning_rate': 0.03, 'max_leaf_nodes': 114, 'min_child_weight': 105, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7864728677850781,Val RMSE: 0.8355207028064088\n",
      "Best iteration: 1188\n",
      "Train RMSE: 0.7934911350843937,Val RMSE: 0.7732584196903249\n",
      "Best iteration: 1004\n",
      "Train RMSE: 0.7856843225847144,Val RMSE: 0.7031073250138479\n",
      "Best iteration: 1165\n",
      "Train RMSE: 0.763773489433229,Val RMSE: 0.7617669114757355\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.8007219618272221,Val RMSE: 0.8901489390896108\n",
      "Best iteration: 643\n",
      "Train RMSE: 0.860748961309271,Val RMSE: 0.9406261139923292\n",
      "Best iteration: 110\n",
      "6 fold results: [0.8355207028064088, 0.7732584196903249, 0.7031073250138479, 0.7617669114757355, 0.8901489390896108, 0.9406261139923292]\n",
      "Average iterations: 1017.3333333333334\n",
      "Mean Cross Validation RMSE: 0.8174047353447095\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.55, 'learning_rate': 0.03, 'max_leaf_nodes': 129, 'min_child_weight': 132, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7840974035358483,Val RMSE: 0.8310258548813768\n",
      "Best iteration: 1083\n",
      "Train RMSE: 0.8125174227093647,Val RMSE: 0.7744829452685849\n",
      "Best iteration: 527\n",
      "Train RMSE: 0.7586361057073896,Val RMSE: 0.6991866333474377\n",
      "Best iteration: 1994\n",
      "Train RMSE: 0.7548719555862418,Val RMSE: 0.7608430964444814\n",
      "Best iteration: 1995\n",
      "Train RMSE: 0.7728315734977613,Val RMSE: 0.8784455172601392\n",
      "Best iteration: 1286\n",
      "Train RMSE: 0.8719974500198877,Val RMSE: 0.941865516164276\n",
      "Best iteration: 85\n",
      "6 fold results: [0.8310258548813768, 0.7744829452685849, 0.6991866333474377, 0.7608430964444814, 0.8784455172601392, 0.941865516164276]\n",
      "Average iterations: 1160.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8143082605610493\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.9500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 135, 'min_child_weight': 107, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7873616914546444,Val RMSE: 0.8339336299421912\n",
      "Best iteration: 971\n",
      "Train RMSE: 0.7960012170624174,Val RMSE: 0.7771644193736921\n",
      "Best iteration: 802\n",
      "Train RMSE: 0.7570250834714823,Val RMSE: 0.6968812711288128\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7560091092151255,Val RMSE: 0.7622564129958435\n",
      "Best iteration: 1915\n",
      "Train RMSE: 0.8014701299259462,Val RMSE: 0.8864286847354363\n",
      "Best iteration: 571\n",
      "Train RMSE: 0.8626068726268261,Val RMSE: 0.9357513331740858\n",
      "Best iteration: 104\n",
      "6 fold results: [0.8339336299421912, 0.7771644193736921, 0.6968812711288128, 0.7622564129958435, 0.8864286847354363, 0.9357513331740858]\n",
      "Average iterations: 1059.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8154026252250103\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.9500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 126, 'min_child_weight': 137, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7966495269729933,Val RMSE: 0.8343163573269995\n",
      "Best iteration: 887\n",
      "Train RMSE: 0.8133034401687065,Val RMSE: 0.7736644989743724\n",
      "Best iteration: 584\n",
      "Train RMSE: 0.7798028894938853,Val RMSE: 0.700796808008432\n",
      "Best iteration: 1460\n",
      "Train RMSE: 0.7656955297253165,Val RMSE: 0.7646324964788498\n",
      "Best iteration: 1991\n",
      "Train RMSE: 0.7811721762996031,Val RMSE: 0.884477614795002\n",
      "Best iteration: 1239\n",
      "Train RMSE: 0.8521058685071102,Val RMSE: 0.9310755905580098\n",
      "Best iteration: 141\n",
      "6 fold results: [0.8343163573269995, 0.7736644989743724, 0.700796808008432, 0.7646324964788498, 0.884477614795002, 0.9310755905580098]\n",
      "Average iterations: 1049.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8148272276902776\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.1, 'learning_rate': 0.03, 'max_leaf_nodes': 121, 'min_child_weight': 126, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7786129888201233,Val RMSE: 0.8328703110545863\n",
      "Best iteration: 1390\n",
      "Train RMSE: 0.8005282490837061,Val RMSE: 0.7727447644207214\n",
      "Best iteration: 763\n",
      "Train RMSE: 0.7720638754489715,Val RMSE: 0.7020154675888078\n",
      "Best iteration: 1563\n",
      "Train RMSE: 0.7721313853852835,Val RMSE: 0.7658928383203124\n",
      "Best iteration: 1462\n",
      "Train RMSE: 0.7779602213352039,Val RMSE: 0.8842784222280834\n",
      "Best iteration: 1191\n",
      "Train RMSE: 0.869065219308384,Val RMSE: 0.9402616198165826\n",
      "Best iteration: 90\n",
      "6 fold results: [0.8328703110545863, 0.7727447644207214, 0.7020154675888078, 0.7658928383203124, 0.8842784222280834, 0.9402616198165826]\n",
      "Average iterations: 1075.5\n",
      "Mean Cross Validation RMSE: 0.816343903904849\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'gamma': 0.75, 'learning_rate': 0.03, 'max_leaf_nodes': 139, 'min_child_weight': 136, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7696735618404156,Val RMSE: 0.8313612411753102\n",
      "Best iteration: 1922\n",
      "Train RMSE: 0.8056450858915694,Val RMSE: 0.7710724708844396\n",
      "Best iteration: 684\n",
      "Train RMSE: 0.767357093106237,Val RMSE: 0.700752536691517\n",
      "Best iteration: 1916\n",
      "Train RMSE: 0.7674911876880826,Val RMSE: 0.7646470733669385\n",
      "Best iteration: 1771\n",
      "Train RMSE: 0.7808970607806305,Val RMSE: 0.882440032704621\n",
      "Best iteration: 1180\n",
      "Train RMSE: 0.8656728077160105,Val RMSE: 0.9423751909807576\n",
      "Best iteration: 99\n",
      "6 fold results: [0.8313612411753102, 0.7710724708844396, 0.700752536691517, 0.7646470733669385, 0.882440032704621, 0.9423751909807576]\n",
      "Average iterations: 1261.0\n",
      "Mean Cross Validation RMSE: 0.8154414243005973\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'gamma': 0.35000000000000003, 'learning_rate': 0.03, 'max_leaf_nodes': 127, 'min_child_weight': 134, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7695330702087253,Val RMSE: 0.8320184267873716\n",
      "Best iteration: 1627\n",
      "Train RMSE: 0.8026119240879563,Val RMSE: 0.7771089668920973\n",
      "Best iteration: 712\n",
      "Train RMSE: 0.7605660740290877,Val RMSE: 0.6999882986589346\n",
      "Best iteration: 1949\n",
      "Train RMSE: 0.7670019962702589,Val RMSE: 0.7657819703788926\n",
      "Best iteration: 1505\n",
      "Train RMSE: 0.7616764154651997,Val RMSE: 0.8766356235406012\n",
      "Best iteration: 1709\n",
      "Train RMSE: 0.8698109645568185,Val RMSE: 0.9379221601479258\n",
      "Best iteration: 88\n",
      "6 fold results: [0.8320184267873716, 0.7771089668920973, 0.6999882986589346, 0.7657819703788926, 0.8766356235406012, 0.9379221601479258]\n",
      "Average iterations: 1264.0\n",
      "Mean Cross Validation RMSE: 0.8149092410676372\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.45, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7738382861622273,Val RMSE: 0.833605892589906\n",
      "Best iteration: 1505\n",
      "Train RMSE: 0.8032432112918921,Val RMSE: 0.7739858898803759\n",
      "Best iteration: 708\n",
      "Train RMSE: 0.766759149183993,Val RMSE: 0.6996400299257161\n",
      "Best iteration: 1723\n",
      "Train RMSE: 0.7624898675339158,Val RMSE: 0.7647800064410374\n",
      "Best iteration: 1802\n",
      "Train RMSE: 0.7624941278450404,Val RMSE: 0.8778873556452518\n",
      "Best iteration: 1748\n",
      "Train RMSE: 0.8497252637377734,Val RMSE: 0.9318738921252905\n",
      "Best iteration: 152\n",
      "6 fold results: [0.833605892589906, 0.7739858898803759, 0.6996400299257161, 0.7647800064410374, 0.8778873556452518, 0.9318738921252905]\n",
      "Average iterations: 1272.0\n",
      "Mean Cross Validation RMSE: 0.8136288444345962\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.4, 'learning_rate': 0.03, 'max_leaf_nodes': 132, 'min_child_weight': 117, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7705080058940069,Val RMSE: 0.8292920508224174\n",
      "Best iteration: 1995\n",
      "Train RMSE: 0.815799980832619,Val RMSE: 0.7737186190723189\n",
      "Best iteration: 545\n",
      "Train RMSE: 0.7794487979581842,Val RMSE: 0.699822894750379\n",
      "Best iteration: 1435\n",
      "Train RMSE: 0.7667185700165449,Val RMSE: 0.768076029024272\n",
      "Best iteration: 1967\n",
      "Train RMSE: 0.7986314899958862,Val RMSE: 0.8880779922898421\n",
      "Best iteration: 706\n",
      "Train RMSE: 0.8490567218582503,Val RMSE: 0.9346412942651409\n",
      "Best iteration: 159\n",
      "6 fold results: [0.8292920508224174, 0.7737186190723189, 0.699822894750379, 0.768076029024272, 0.8880779922898421, 0.9346412942651409]\n",
      "Average iterations: 1133.5\n",
      "Mean Cross Validation RMSE: 0.8156048133707284\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.45, 'learning_rate': 0.03, 'max_leaf_nodes': 122, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7861478626398064,Val RMSE: 0.8377849805695419\n",
      "Best iteration: 1103\n",
      "Train RMSE: 0.8272327078415245,Val RMSE: 0.7782897943235615\n",
      "Best iteration: 359\n",
      "Train RMSE: 0.7614239244388271,Val RMSE: 0.6977892920755736\n",
      "Best iteration: 1985\n",
      "Train RMSE: 0.7746839786524911,Val RMSE: 0.7661812766093341\n",
      "Best iteration: 1324\n",
      "Train RMSE: 0.7858595473837955,Val RMSE: 0.8809868494063372\n",
      "Best iteration: 951\n",
      "Train RMSE: 0.8523108317786916,Val RMSE: 0.9332416141663609\n",
      "Best iteration: 141\n",
      "6 fold results: [0.8377849805695419, 0.7782897943235615, 0.6977892920755736, 0.7661812766093341, 0.8809868494063372, 0.9332416141663609]\n",
      "Average iterations: 976.1666666666666\n",
      "Mean Cross Validation RMSE: 0.8157123011917848\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'gamma': 0.65, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.769825757983405,Val RMSE: 0.8319253630263994\n",
      "Best iteration: 1995\n",
      "Train RMSE: 0.8086097089141647,Val RMSE: 0.7762527477443057\n",
      "Best iteration: 637\n",
      "Train RMSE: 0.7731999119058471,Val RMSE: 0.703962357261095\n",
      "Best iteration: 1786\n",
      "Train RMSE: 0.7650934047647557,Val RMSE: 0.765992329509268\n",
      "Best iteration: 1998\n",
      "Train RMSE: 0.7768033320052975,Val RMSE: 0.8799090958373234\n",
      "Best iteration: 1378\n",
      "Train RMSE: 0.8460198705652819,Val RMSE: 0.9338181038549149\n",
      "Best iteration: 176\n",
      "6 fold results: [0.8319253630263994, 0.7762527477443057, 0.703962357261095, 0.765992329509268, 0.8799090958373234, 0.9338181038549149]\n",
      "Average iterations: 1327.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8153099995388845\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.8500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 124, 'min_child_weight': 119, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7903048866179785,Val RMSE: 0.8359123297493815\n",
      "Best iteration: 975\n",
      "Train RMSE: 0.8131202384463324,Val RMSE: 0.7732995420779416\n",
      "Best iteration: 537\n",
      "Train RMSE: 0.7685707410519604,Val RMSE: 0.6996970006273029\n",
      "Best iteration: 1663\n",
      "Train RMSE: 0.776907678716282,Val RMSE: 0.767875400275465\n",
      "Best iteration: 1198\n",
      "Train RMSE: 0.769476409352204,Val RMSE: 0.8783743032699955\n",
      "Best iteration: 1455\n",
      "Train RMSE: 0.8183182954151056,Val RMSE: 0.9308360402219569\n",
      "Best iteration: 387\n",
      "6 fold results: [0.8359123297493815, 0.7732995420779416, 0.6996970006273029, 0.767875400275465, 0.8783743032699955, 0.9308360402219569]\n",
      "Average iterations: 1034.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8143324360370071\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'gamma': 0.55, 'learning_rate': 0.03, 'max_leaf_nodes': 119, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7775969739829552,Val RMSE: 0.8320349035048528\n",
      "Best iteration: 1507\n",
      "Train RMSE: 0.8050680279272098,Val RMSE: 0.7727527862814534\n",
      "Best iteration: 685\n",
      "Train RMSE: 0.7808050414394583,Val RMSE: 0.7008795381170845\n",
      "Best iteration: 1241\n",
      "Train RMSE: 0.7752552442770005,Val RMSE: 0.7674197356287346\n",
      "Best iteration: 1372\n",
      "Train RMSE: 0.7799826069262003,Val RMSE: 0.8828323159483533\n",
      "Best iteration: 1168\n",
      "Train RMSE: 0.8656468151111719,Val RMSE: 0.9385355951675215\n",
      "Best iteration: 99\n",
      "6 fold results: [0.8320349035048528, 0.7727527862814534, 0.7008795381170845, 0.7674197356287346, 0.8828323159483533, 0.9385355951675215]\n",
      "Average iterations: 1011.0\n",
      "Mean Cross Validation RMSE: 0.8157424791080001\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.65, 'learning_rate': 0.03, 'max_leaf_nodes': 100, 'min_child_weight': 109, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7753412341474909,Val RMSE: 0.8334675958875802\n",
      "Best iteration: 1665\n",
      "Train RMSE: 0.7965870131158769,Val RMSE: 0.7710846070401707\n",
      "Best iteration: 856\n",
      "Train RMSE: 0.7821053590824806,Val RMSE: 0.7028347475933053\n",
      "Best iteration: 1254\n",
      "Train RMSE: 0.7686692652913436,Val RMSE: 0.7647028059649402\n",
      "Best iteration: 1788\n",
      "Train RMSE: 0.7742598814292161,Val RMSE: 0.8813715627709033\n",
      "Best iteration: 1456\n",
      "Train RMSE: 0.8408162009463208,Val RMSE: 0.9402300819898033\n",
      "Best iteration: 194\n",
      "6 fold results: [0.8334675958875802, 0.7710846070401707, 0.7028347475933053, 0.7647028059649402, 0.8813715627709033, 0.9402300819898033]\n",
      "Average iterations: 1201.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8156152335411172\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.5, 'learning_rate': 0.03, 'max_leaf_nodes': 107, 'min_child_weight': 104, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7920756831059718,Val RMSE: 0.8391938644807643\n",
      "Best iteration: 934\n",
      "Train RMSE: 0.8137892251588363,Val RMSE: 0.7777817608716185\n",
      "Best iteration: 535\n",
      "Train RMSE: 0.7626934759726008,Val RMSE: 0.6997129090003038\n",
      "Best iteration: 1985\n",
      "Train RMSE: 0.7702367037300606,Val RMSE: 0.765314311613008\n",
      "Best iteration: 1543\n",
      "Train RMSE: 0.7811437534566225,Val RMSE: 0.8819976016334224\n",
      "Best iteration: 1108\n",
      "Train RMSE: 0.8256991862624414,Val RMSE: 0.9282026213480901\n",
      "Best iteration: 308\n",
      "6 fold results: [0.8391938644807643, 0.7777817608716185, 0.6997129090003038, 0.765314311613008, 0.8819976016334224, 0.9282026213480901]\n",
      "Average iterations: 1067.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8153671781578679\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.8500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 108, 'min_child_weight': 116, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7700984820020174,Val RMSE: 0.8302860134323653\n",
      "Best iteration: 1587\n",
      "Train RMSE: 0.7984103577063333,Val RMSE: 0.7755690208478792\n",
      "Best iteration: 756\n",
      "Train RMSE: 0.7595230539048794,Val RMSE: 0.697696242942935\n",
      "Best iteration: 1981\n",
      "Train RMSE: 0.7615896262251762,Val RMSE: 0.7649099163546217\n",
      "Best iteration: 1820\n",
      "Train RMSE: 0.8074614848703948,Val RMSE: 0.8880363791140741\n",
      "Best iteration: 487\n",
      "Train RMSE: 0.8629889360118669,Val RMSE: 0.9378229538647275\n",
      "Best iteration: 105\n",
      "6 fold results: [0.8302860134323653, 0.7755690208478792, 0.697696242942935, 0.7649099163546217, 0.8880363791140741, 0.9378229538647275]\n",
      "Average iterations: 1121.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8157200877594337\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.45, 'learning_rate': 0.03, 'max_leaf_nodes': 136, 'min_child_weight': 133, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7658613579706975,Val RMSE: 0.8353015227333273\n",
      "Best iteration: 1980\n",
      "Train RMSE: 0.8046615744553155,Val RMSE: 0.7731556619717711\n",
      "Best iteration: 719\n",
      "Train RMSE: 0.766537025796611,Val RMSE: 0.6980902446472856\n",
      "Best iteration: 1942\n",
      "Train RMSE: 0.7619673885565936,Val RMSE: 0.7635095526241926\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.8082358263792433,Val RMSE: 0.8890287583959278\n",
      "Best iteration: 525\n",
      "Train RMSE: 0.8511136123023149,Val RMSE: 0.9322310841800564\n",
      "Best iteration: 147\n",
      "6 fold results: [0.8353015227333273, 0.7731556619717711, 0.6980902446472856, 0.7635095526241926, 0.8890287583959278, 0.9322310841800564]\n",
      "Average iterations: 1217.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8152194707587602\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.6000000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 125, 'min_child_weight': 111, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7679788257019408,Val RMSE: 0.8351805637913436\n",
      "Best iteration: 1666\n",
      "Train RMSE: 0.7908112815365773,Val RMSE: 0.7745312365173499\n",
      "Best iteration: 916\n",
      "Train RMSE: 0.7591789746115527,Val RMSE: 0.6981268087174115\n",
      "Best iteration: 1945\n",
      "Train RMSE: 0.7571462474988723,Val RMSE: 0.762678431911464\n",
      "Best iteration: 1997\n",
      "Train RMSE: 0.8088330638037949,Val RMSE: 0.8898514182117879\n",
      "Best iteration: 455\n",
      "Train RMSE: 0.863095397275838,Val RMSE: 0.9394667658142671\n",
      "Best iteration: 104\n",
      "6 fold results: [0.8351805637913436, 0.7745312365173499, 0.6981268087174115, 0.762678431911464, 0.8898514182117879, 0.9394667658142671]\n",
      "Average iterations: 1179.5\n",
      "Mean Cross Validation RMSE: 0.816639204160604\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'gamma': 0.0, 'learning_rate': 0.03, 'max_leaf_nodes': 131, 'min_child_weight': 123, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7737787052801502,Val RMSE: 0.8304990172589922\n",
      "Best iteration: 1742\n",
      "Train RMSE: 0.8081483213005434,Val RMSE: 0.7727344284389278\n",
      "Best iteration: 648\n",
      "Train RMSE: 0.7727983704666269,Val RMSE: 0.7009267564512291\n",
      "Best iteration: 1631\n",
      "Train RMSE: 0.7651263578939144,Val RMSE: 0.763107090957783\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.8102044174898809,Val RMSE: 0.8895512516273011\n",
      "Best iteration: 484\n",
      "Train RMSE: 0.8782916823783669,Val RMSE: 0.9434109312386962\n",
      "Best iteration: 76\n",
      "6 fold results: [0.8304990172589922, 0.7727344284389278, 0.7009267564512291, 0.763107090957783, 0.8895512516273011, 0.9434109312386962]\n",
      "Average iterations: 1095.6666666666667\n",
      "Mean Cross Validation RMSE: 0.816704912662155\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'gamma': 0.8500000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 112, 'min_child_weight': 112, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7813746925027524,Val RMSE: 0.8360684016047163\n",
      "Best iteration: 1285\n",
      "Train RMSE: 0.8089053894120919,Val RMSE: 0.7795118451298176\n",
      "Best iteration: 610\n",
      "Train RMSE: 0.7669758460060059,Val RMSE: 0.7006335516415387\n",
      "Best iteration: 1893\n",
      "Train RMSE: 0.7625325086573939,Val RMSE: 0.7695438364616498\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.8003951840063738,Val RMSE: 0.8831440463377866\n",
      "Best iteration: 636\n",
      "Train RMSE: 0.8609828819318862,Val RMSE: 0.9423562476016163\n",
      "Best iteration: 111\n",
      "6 fold results: [0.8360684016047163, 0.7795118451298176, 0.7006335516415387, 0.7695438364616498, 0.8831440463377866, 0.9423562476016163]\n",
      "Average iterations: 1088.0\n",
      "Mean Cross Validation RMSE: 0.8185429881295209\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'gamma': 0.75, 'learning_rate': 0.03, 'max_leaf_nodes': 116, 'min_child_weight': 121, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7741136774078642,Val RMSE: 0.8310759168484126\n",
      "Best iteration: 1828\n",
      "Train RMSE: 0.7995480974698733,Val RMSE: 0.7717303306512387\n",
      "Best iteration: 847\n",
      "Train RMSE: 0.7900108077722369,Val RMSE: 0.7034129294972665\n",
      "Best iteration: 1104\n",
      "Train RMSE: 0.7667152271924463,Val RMSE: 0.765205852450171\n",
      "Best iteration: 1974\n",
      "Train RMSE: 0.8045686059911367,Val RMSE: 0.8885090102306146\n",
      "Best iteration: 593\n",
      "Train RMSE: 0.852931051600409,Val RMSE: 0.9354830662383903\n",
      "Best iteration: 142\n",
      "6 fold results: [0.8310759168484126, 0.7717303306512387, 0.7034129294972665, 0.765205852450171, 0.8885090102306146, 0.9354830662383903]\n",
      "Average iterations: 1080.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8159028509860157\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'gamma': 0.35000000000000003, 'learning_rate': 0.03, 'max_leaf_nodes': 130, 'min_child_weight': 131, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7813092781466127,Val RMSE: 0.8323272751569997\n",
      "Best iteration: 1392\n",
      "Train RMSE: 0.8017303053194705,Val RMSE: 0.7727296460722108\n",
      "Best iteration: 765\n",
      "Train RMSE: 0.7767480455858329,Val RMSE: 0.7022026157831849\n",
      "Best iteration: 1485\n",
      "Train RMSE: 0.7677143548723265,Val RMSE: 0.7665986079262331\n",
      "Best iteration: 1754\n",
      "Train RMSE: 0.7775357646627854,Val RMSE: 0.8823974781623731\n",
      "Best iteration: 1281\n",
      "Train RMSE: 0.865855422492378,Val RMSE: 0.9342526779993887\n",
      "Best iteration: 98\n",
      "6 fold results: [0.8323272751569997, 0.7727296460722108, 0.7022026157831849, 0.7665986079262331, 0.8823974781623731, 0.9342526779993887]\n",
      "Average iterations: 1128.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8150847168500651\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.65, 'learning_rate': 0.03, 'max_leaf_nodes': 106, 'min_child_weight': 125, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-3-bfc92be04407>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     12\u001b[0m     \u001b[0;34m'seed'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;36m1204\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m'tree_method'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m'gpu_hist'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     13\u001b[0m }\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mbest_hyperparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moptimize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspace\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmax_evals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m200\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     15\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"The best hyperparameters are: \"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbest_hyperparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-1-870c4b98b6f9>\u001b[0m in \u001b[0;36moptimize\u001b[0;34m(space, seed, max_evals)\u001b[0m\n\u001b[1;32m     59\u001b[0m     best = fmin(score, space, algo=tpe.suggest, \n\u001b[1;32m     60\u001b[0m         \u001b[0;31m# trials=trials,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 61\u001b[0;31m         max_evals=max_evals)\n\u001b[0m\u001b[1;32m     62\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mbest\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[0;34m(fn, space, algo, max_evals, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin)\u001b[0m\n\u001b[1;32m    318\u001b[0m                     verbose=verbose)\n\u001b[1;32m    319\u001b[0m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 320\u001b[0;31m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    321\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    322\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mexhaust\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    197\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    198\u001b[0m         \u001b[0mn_done\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax_evals\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mn_done\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mblock_until_done\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    200\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    201\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, N, block_until_done)\u001b[0m\n\u001b[1;32m    171\u001b[0m             \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    172\u001b[0m                 \u001b[0;31m# -- loop over trials and do the jobs directly\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 173\u001b[0;31m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mserial_evaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    174\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    175\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mstopped\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mserial_evaluate\u001b[0;34m(self, N)\u001b[0m\n\u001b[1;32m     90\u001b[0m                 \u001b[0mctrl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbase\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCtrl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcurrent_trial\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrial\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     91\u001b[0m                 \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 92\u001b[0;31m                     \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdomain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctrl\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     93\u001b[0m                 \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     94\u001b[0m                     \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'job exception: %s'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/base.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, config, ctrl, attach_attachments)\u001b[0m\n\u001b[1;32m    838\u001b[0m                 \u001b[0mmemo\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmemo\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    839\u001b[0m                 print_node_on_error=self.rec_eval_print_node_on_error)\n\u001b[0;32m--> 840\u001b[0;31m             \u001b[0mrval\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpyll_rval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    841\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    842\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumber\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-1-870c4b98b6f9>\u001b[0m in \u001b[0;36mscore\u001b[0;34m(params)\u001b[0m\n\u001b[1;32m     33\u001b[0m         xgb_model = xgb.train(params, dtrain, 2000, watchlist,\n\u001b[1;32m     34\u001b[0m                           \u001b[0mverbose_eval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m                           early_stopping_rounds=200)\n\u001b[0m\u001b[1;32m     36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     37\u001b[0m         \u001b[0mtrain_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mntree_limit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbest_ntree_limit\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, learning_rates)\u001b[0m\n\u001b[1;32m    202\u001b[0m                            \u001b[0mevals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    203\u001b[0m                            \u001b[0mobj\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeval\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 204\u001b[0;31m                            xgb_model=xgb_model, callbacks=callbacks)\n\u001b[0m\u001b[1;32m    205\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    206\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36m_train_internal\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, xgb_model, callbacks)\u001b[0m\n\u001b[1;32m     82\u001b[0m         \u001b[0;31m# check evaluation result.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     83\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 84\u001b[0;31m             \u001b[0mbst_eval_set\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meval_set\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     85\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbst_eval_set\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSTRING_TYPES\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     86\u001b[0m                 \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbst_eval_set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/core.py\u001b[0m in \u001b[0;36meval_set\u001b[0;34m(self, evals, iteration, feval)\u001b[0m\n\u001b[1;32m    957\u001b[0m                                               \u001b[0mdmats\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mevnames\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    958\u001b[0m                                               \u001b[0mc_bst_ulong\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 959\u001b[0;31m                                               ctypes.byref(msg)))\n\u001b[0m\u001b[1;32m    960\u001b[0m         \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmsg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    961\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfeval\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "space = {\n",
    "    #'n_estimators': hp.quniform('n_estimators', 50, 500, 5),\n",
    "#     'max_depth': hp.choice('max_depth', np.arange(5, 10, dtype=int)),\n",
    "    'subsample': hp.quniform('subsample', 0.7, 0.9, 0.05),\n",
    "    'colsample_bytree': hp.quniform('colsample_bytree', 0.7, 0.9, 0.05),\n",
    "    'gamma': hp.quniform('gamma', 0, 1, 0.05),\n",
    "    'max_leaf_nodes': hp.choice('max_leaf_nodes', np.arange(100,140, dtype=int)),\n",
    "    'min_child_weight': hp.choice('min_child_weight', np.arange(100,140, dtype=int)),\n",
    "    'learning_rate': 0.03,\n",
    "    'eval_metric': 'rmse',\n",
    "    'objective': 'reg:linear' , \n",
    "    'seed': 1204,'tree_method':'gpu_hist'\n",
    "}\n",
    "best_hyperparams = optimize(space,max_evals=200)\n",
    "print(\"The best hyperparameters are: \")\n",
    "print(best_hyperparams)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7965656876977267,Val RMSE: 0.8381787496082568\n",
      "Best iteration: 875\n",
      "Train RMSE: 0.8181629172704371,Val RMSE: 0.7767925512705117\n",
      "Best iteration: 481\n",
      "Train RMSE: 0.7665396306964919,Val RMSE: 0.6994470830865399\n",
      "Best iteration: 1995\n",
      "Train RMSE: 0.763078971618149,Val RMSE: 0.7622859701350537\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7889513749536918,Val RMSE: 0.8840298656536524\n",
      "Best iteration: 901\n",
      "Train RMSE: 0.8530734245723749,Val RMSE: 0.9329739672084194\n",
      "Best iteration: 139\n",
      "6 fold results: [0.8381787496082568, 0.7767925512705117, 0.6994470830865399, 0.7622859701350537, 0.8840298656536524, 0.9329739672084194]\n",
      "Average iterations: 1064.0\n",
      "Mean Cross Validation RMSE: 0.8156180311604055\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7833798558711778,Val RMSE: 0.8333407560653712\n",
      "Best iteration: 1391\n",
      "Train RMSE: 0.8057382630367155,Val RMSE: 0.7764955426522351\n",
      "Best iteration: 681\n",
      "Train RMSE: 0.7778408435578574,Val RMSE: 0.7025085714095595\n",
      "Best iteration: 1491\n",
      "Train RMSE: 0.7670512247350191,Val RMSE: 0.7644764985812756\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.8025990764267543,Val RMSE: 0.8861406096833645\n",
      "Best iteration: 628\n",
      "Train RMSE: 0.8536662791520738,Val RMSE: 0.9311621374886435\n",
      "Best iteration: 136\n",
      "6 fold results: [0.8333407560653712, 0.7764955426522351, 0.7025085714095595, 0.7644764985812756, 0.8861406096833645, 0.9311621374886435]\n",
      "Average iterations: 1053.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8156873526467415\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.762774630633693,Val RMSE: 0.8335737874895094\n",
      "Best iteration: 1851\n",
      "Train RMSE: 0.8169843096516294,Val RMSE: 0.7730975319031139\n",
      "Best iteration: 469\n",
      "Train RMSE: 0.7579442725694894,Val RMSE: 0.6972170687312351\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.7546683694061485,Val RMSE: 0.7626597143405979\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.768437106336793,Val RMSE: 0.8815357123858392\n",
      "Best iteration: 1429\n",
      "Train RMSE: 0.8476579024478961,Val RMSE: 0.9363412371542104\n",
      "Best iteration: 164\n",
      "6 fold results: [0.8335737874895094, 0.7730975319031139, 0.6972170687312351, 0.7626597143405979, 0.8815357123858392, 0.9363412371542104]\n",
      "Average iterations: 1315.6666666666667\n",
      "Mean Cross Validation RMSE: 0.814070842000751\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7833798558711778,Val RMSE: 0.8333407560653712\n",
      "Best iteration: 1391\n",
      "Train RMSE: 0.8057382630367155,Val RMSE: 0.7764955426522351\n",
      "Best iteration: 681\n",
      "Train RMSE: 0.7778408435578574,Val RMSE: 0.7025085714095595\n",
      "Best iteration: 1491\n",
      "Train RMSE: 0.7670512247350191,Val RMSE: 0.7644764985812756\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.8025990764267543,Val RMSE: 0.8861406096833645\n",
      "Best iteration: 628\n",
      "Train RMSE: 0.8536662791520738,Val RMSE: 0.9311621374886435\n",
      "Best iteration: 136\n",
      "6 fold results: [0.8333407560653712, 0.7764955426522351, 0.7025085714095595, 0.7644764985812756, 0.8861406096833645, 0.9311621374886435]\n",
      "Average iterations: 1053.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8156873526467415\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7952936975920829,Val RMSE: 0.8433251900964542\n",
      "Best iteration: 909\n",
      "Train RMSE: 0.8021302188859857,Val RMSE: 0.7751181481656346\n",
      "Best iteration: 751\n",
      "Train RMSE: 0.7646221677669388,Val RMSE: 0.6980723781441783\n",
      "Best iteration: 1938\n",
      "Train RMSE: 0.7617468413039277,Val RMSE: 0.7649930564714852\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7664474429571487,Val RMSE: 0.8791945599173328\n",
      "Best iteration: 1735\n",
      "Train RMSE: 0.8551936992054523,Val RMSE: 0.9336907563827195\n",
      "Best iteration: 129\n",
      "6 fold results: [0.8433251900964542, 0.7751181481656346, 0.6980723781441783, 0.7649930564714852, 0.8791945599173328, 0.9336907563827195]\n",
      "Average iterations: 1242.5\n",
      "Mean Cross Validation RMSE: 0.8157323481963008\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.762774630633693,Val RMSE: 0.8335737874895094\n",
      "Best iteration: 1851\n",
      "Train RMSE: 0.8169843096516294,Val RMSE: 0.7730975319031139\n",
      "Best iteration: 469\n",
      "Train RMSE: 0.7579442725694894,Val RMSE: 0.6972170687312351\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.7546683694061485,Val RMSE: 0.7626597143405979\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.768437106336793,Val RMSE: 0.8815357123858392\n",
      "Best iteration: 1429\n",
      "Train RMSE: 0.8476579024478961,Val RMSE: 0.9363412371542104\n",
      "Best iteration: 164\n",
      "6 fold results: [0.8335737874895094, 0.7730975319031139, 0.6972170687312351, 0.7626597143405979, 0.8815357123858392, 0.9363412371542104]\n",
      "Average iterations: 1315.6666666666667\n",
      "Mean Cross Validation RMSE: 0.814070842000751\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7752279115695954,Val RMSE: 0.8296558338164096\n",
      "Best iteration: 1651\n",
      "Train RMSE: 0.7971366004141628,Val RMSE: 0.7700344705916438\n",
      "Best iteration: 853\n",
      "Train RMSE: 0.7686196363990588,Val RMSE: 0.7011494965023802\n",
      "Best iteration: 1920\n",
      "Train RMSE: 0.7694360509553114,Val RMSE: 0.7650747073227778\n",
      "Best iteration: 1645\n",
      "Train RMSE: 0.7757006585094139,Val RMSE: 0.8813078895114421\n",
      "Best iteration: 1328\n",
      "Train RMSE: 0.8114369847856425,Val RMSE: 0.9380835941943848\n",
      "Best iteration: 489\n",
      "6 fold results: [0.8296558338164096, 0.7700344705916438, 0.7011494965023802, 0.7650747073227778, 0.8813078895114421, 0.9380835941943848]\n",
      "Average iterations: 1313.3333333333333\n",
      "Mean Cross Validation RMSE: 0.8142176653231731\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7735555924705648,Val RMSE: 0.8292124464026535\n",
      "Best iteration: 1543\n",
      "Train RMSE: 0.823676233592166,Val RMSE: 0.7748464601457404\n",
      "Best iteration: 413\n",
      "Train RMSE: 0.7824065993022905,Val RMSE: 0.7030409020887516\n",
      "Best iteration: 1220\n",
      "Train RMSE: 0.7612355582585233,Val RMSE: 0.7641839069417077\n",
      "Best iteration: 1980\n",
      "Train RMSE: 0.7754347858296903,Val RMSE: 0.882535772922362\n",
      "Best iteration: 1298\n",
      "Train RMSE: 0.8620511780745328,Val RMSE: 0.9384548728572061\n",
      "Best iteration: 108\n",
      "6 fold results: [0.8292124464026535, 0.7748464601457404, 0.7030409020887516, 0.7641839069417077, 0.882535772922362, 0.9384548728572061]\n",
      "Average iterations: 1092.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8153790602264035\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.772909851421186,Val RMSE: 0.8332878259313147\n",
      "Best iteration: 1493\n",
      "Train RMSE: 0.7911843955758572,Val RMSE: 0.7751579031516227\n",
      "Best iteration: 910\n",
      "Train RMSE: 0.7710140677948616,Val RMSE: 0.6992242269461322\n",
      "Best iteration: 1517\n",
      "Train RMSE: 0.7578212698202887,Val RMSE: 0.7644148624534306\n",
      "Best iteration: 1999\n",
      "Train RMSE: 0.7811002587819675,Val RMSE: 0.884716040967118\n",
      "Best iteration: 1058\n",
      "Train RMSE: 0.8684296518186007,Val RMSE: 0.9405036176260082\n",
      "Best iteration: 92\n",
      "6 fold results: [0.8332878259313147, 0.7751579031516227, 0.6992242269461322, 0.7644148624534306, 0.884716040967118, 0.9405036176260082]\n",
      "Average iterations: 1177.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8162174128459375\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7759319116865128,Val RMSE: 0.8320097226505544\n",
      "Best iteration: 1451\n",
      "Train RMSE: 0.8026895626581902,Val RMSE: 0.7749414944869222\n",
      "Best iteration: 714\n",
      "Train RMSE: 0.7623087627547279,Val RMSE: 0.6989193067348048\n",
      "Best iteration: 1958\n",
      "Train RMSE: 0.7700826924991517,Val RMSE: 0.76431676460316\n",
      "Best iteration: 1494\n",
      "Train RMSE: 0.7752535143873757,Val RMSE: 0.8770550029344238\n",
      "Best iteration: 1281\n",
      "Train RMSE: 0.8644767182272546,Val RMSE: 0.9350729679765752\n",
      "Best iteration: 99\n",
      "6 fold results: [0.8320097226505544, 0.7749414944869222, 0.6989193067348048, 0.76431676460316, 0.8770550029344238, 0.9350729679765752]\n",
      "Average iterations: 1165.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8137192098977399\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.78970012773955,Val RMSE: 0.8345347248027192\n",
      "Best iteration: 1113\n",
      "Train RMSE: 0.8028610018240826,Val RMSE: 0.7756917066268698\n",
      "Best iteration: 741\n",
      "Train RMSE: 0.7698956706224477,Val RMSE: 0.7016269195478732\n",
      "Best iteration: 1909\n",
      "Train RMSE: 0.7653747071622755,Val RMSE: 0.7633499676822899\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.7797985326566781,Val RMSE: 0.8824128790908105\n",
      "Best iteration: 1255\n",
      "Train RMSE: 0.8642460881531727,Val RMSE: 0.941704160793446\n",
      "Best iteration: 102\n",
      "6 fold results: [0.8345347248027192, 0.7756917066268698, 0.7016269195478732, 0.7633499676822899, 0.8824128790908105, 0.941704160793446]\n",
      "Average iterations: 1185.6666666666667\n",
      "Mean Cross Validation RMSE: 0.816553393090668\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7643981757904276,Val RMSE: 0.8295411291520355\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.797270620536947,Val RMSE: 0.7744059423956574\n",
      "Best iteration: 809\n",
      "Train RMSE: 0.7641045400837341,Val RMSE: 0.7008494748998257\n",
      "Best iteration: 1930\n",
      "Train RMSE: 0.7672894738323341,Val RMSE: 0.767048815838677\n",
      "Best iteration: 1631\n",
      "Train RMSE: 0.7967480203025217,Val RMSE: 0.8850856985205364\n",
      "Best iteration: 692\n",
      "Train RMSE: 0.8553786905340921,Val RMSE: 0.9347069460045807\n",
      "Best iteration: 128\n",
      "6 fold results: [0.8295411291520355, 0.7744059423956574, 0.7008494748998257, 0.767048815838677, 0.8850856985205364, 0.9347069460045807]\n",
      "Average iterations: 1195.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8152730011352188\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7643981757904276,Val RMSE: 0.8295411291520355\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.797270620536947,Val RMSE: 0.7744059423956574\n",
      "Best iteration: 809\n",
      "Train RMSE: 0.7641045400837341,Val RMSE: 0.7008494748998257\n",
      "Best iteration: 1930\n",
      "Train RMSE: 0.7672894738323341,Val RMSE: 0.767048815838677\n",
      "Best iteration: 1631\n",
      "Train RMSE: 0.7967480203025217,Val RMSE: 0.8850856985205364\n",
      "Best iteration: 692\n",
      "Train RMSE: 0.8553786905340921,Val RMSE: 0.9347069460045807\n",
      "Best iteration: 128\n",
      "6 fold results: [0.8295411291520355, 0.7744059423956574, 0.7008494748998257, 0.767048815838677, 0.8850856985205364, 0.9347069460045807]\n",
      "Average iterations: 1195.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8152730011352188\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7705489269740964,Val RMSE: 0.8301300391624561\n",
      "Best iteration: 1856\n",
      "Train RMSE: 0.8116707237802843,Val RMSE: 0.7728140659216145\n",
      "Best iteration: 574\n",
      "Train RMSE: 0.7693810098445885,Val RMSE: 0.6998271958747547\n",
      "Best iteration: 1889\n",
      "Train RMSE: 0.776044824239635,Val RMSE: 0.7657714625808993\n",
      "Best iteration: 1385\n",
      "Train RMSE: 0.7764452625099424,Val RMSE: 0.8801545158652219\n",
      "Best iteration: 1315\n",
      "Train RMSE: 0.8616767921504599,Val RMSE: 0.9377309514721895\n",
      "Best iteration: 108\n",
      "6 fold results: [0.8301300391624561, 0.7728140659216145, 0.6998271958747547, 0.7657714625808993, 0.8801545158652219, 0.9377309514721895]\n",
      "Average iterations: 1186.8333333333333\n",
      "Mean Cross Validation RMSE: 0.8144047051461892\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7735555924705648,Val RMSE: 0.8292124464026535\n",
      "Best iteration: 1543\n",
      "Train RMSE: 0.823676233592166,Val RMSE: 0.7748464601457404\n",
      "Best iteration: 413\n",
      "Train RMSE: 0.7824065993022905,Val RMSE: 0.7030409020887516\n",
      "Best iteration: 1220\n",
      "Train RMSE: 0.7612355582585233,Val RMSE: 0.7641839069417077\n",
      "Best iteration: 1980\n",
      "Train RMSE: 0.7754347858296903,Val RMSE: 0.882535772922362\n",
      "Best iteration: 1298\n",
      "Train RMSE: 0.8620511780745328,Val RMSE: 0.9384548728572061\n",
      "Best iteration: 108\n",
      "6 fold results: [0.8292124464026535, 0.7748464601457404, 0.7030409020887516, 0.7641839069417077, 0.882535772922362, 0.9384548728572061]\n",
      "Average iterations: 1092.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8153790602264035\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7734302173618428,Val RMSE: 0.8306181463758218\n",
      "Best iteration: 1710\n",
      "Train RMSE: 0.8051078587297388,Val RMSE: 0.7728084356476451\n",
      "Best iteration: 689\n",
      "Train RMSE: 0.7674805867973729,Val RMSE: 0.6973193065163753\n",
      "Best iteration: 1967\n",
      "Train RMSE: 0.7653225671208087,Val RMSE: 0.7621931506108167\n",
      "Best iteration: 1936\n",
      "Train RMSE: 0.7967272228771227,Val RMSE: 0.8879811044048687\n",
      "Best iteration: 734\n",
      "Train RMSE: 0.8098663046667834,Val RMSE: 0.9327203983097156\n",
      "Best iteration: 536\n",
      "6 fold results: [0.8306181463758218, 0.7728084356476451, 0.6973193065163753, 0.7621931506108167, 0.8879811044048687, 0.9327203983097156]\n",
      "Average iterations: 1261.0\n",
      "Mean Cross Validation RMSE: 0.8139400903108739\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7657282623408836,Val RMSE: 0.830595541862944\n",
      "Best iteration: 1874\n",
      "Train RMSE: 0.8131706331131164,Val RMSE: 0.7767208805181628\n",
      "Best iteration: 525\n",
      "Train RMSE: 0.7601978474886357,Val RMSE: 0.7010838232831816\n",
      "Best iteration: 1979\n",
      "Train RMSE: 0.756673093935127,Val RMSE: 0.7649150593044594\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.7989808102214229,Val RMSE: 0.8845129262713917\n",
      "Best iteration: 639\n",
      "Train RMSE: 0.8593529091509949,Val RMSE: 0.9334608805448182\n",
      "Best iteration: 111\n",
      "6 fold results: [0.830595541862944, 0.7767208805181628, 0.7010838232831816, 0.7649150593044594, 0.8845129262713917, 0.9334608805448182]\n",
      "Average iterations: 1187.0\n",
      "Mean Cross Validation RMSE: 0.8152148519641597\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.776730511180518,Val RMSE: 0.8285432874781852\n",
      "Best iteration: 1664\n",
      "Train RMSE: 0.8127395203477115,Val RMSE: 0.7747874568391637\n",
      "Best iteration: 580\n",
      "Train RMSE: 0.7694498396667873,Val RMSE: 0.7007684211024492\n",
      "Best iteration: 1960\n",
      "Train RMSE: 0.7685069514067547,Val RMSE: 0.7646837092384754\n",
      "Best iteration: 1880\n",
      "Train RMSE: 0.7813868974906726,Val RMSE: 0.8827741496440554\n",
      "Best iteration: 1195\n",
      "Train RMSE: 0.8700132639739819,Val RMSE: 0.9368069630272876\n",
      "Best iteration: 89\n",
      "6 fold results: [0.8285432874781852, 0.7747874568391637, 0.7007684211024492, 0.7646837092384754, 0.8827741496440554, 0.9368069630272876]\n",
      "Average iterations: 1227.0\n",
      "Mean Cross Validation RMSE: 0.8147273312216027\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7666825367141921,Val RMSE: 0.8311973655500142\n",
      "Best iteration: 1948\n",
      "Train RMSE: 0.8185005885537466,Val RMSE: 0.7753848981285482\n",
      "Best iteration: 480\n",
      "Train RMSE: 0.7713644200033555,Val RMSE: 0.699542711107383\n",
      "Best iteration: 1675\n",
      "Train RMSE: 0.7606152096747527,Val RMSE: 0.7641360538081902\n",
      "Best iteration: 1994\n",
      "Train RMSE: 0.7606396587655077,Val RMSE: 0.876602714554476\n",
      "Best iteration: 2000\n",
      "Train RMSE: 0.8547715433423312,Val RMSE: 0.9345408148978462\n",
      "Best iteration: 133\n",
      "6 fold results: [0.8311973655500142, 0.7753848981285482, 0.699542711107383, 0.7641360538081902, 0.876602714554476, 0.9345408148978462]\n",
      "Average iterations: 1370.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8135674263410763\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7735555924705648,Val RMSE: 0.8292124464026535\n",
      "Best iteration: 1543\n",
      "Train RMSE: 0.823676233592166,Val RMSE: 0.7748464601457404\n",
      "Best iteration: 413\n",
      "Train RMSE: 0.7824065993022905,Val RMSE: 0.7030409020887516\n",
      "Best iteration: 1220\n",
      "Train RMSE: 0.7612355582585233,Val RMSE: 0.7641839069417077\n",
      "Best iteration: 1980\n",
      "Train RMSE: 0.7754347858296903,Val RMSE: 0.882535772922362\n",
      "Best iteration: 1298\n",
      "Train RMSE: 0.8620511780745328,Val RMSE: 0.9384548728572061\n",
      "Best iteration: 108\n",
      "6 fold results: [0.8292124464026535, 0.7748464601457404, 0.7030409020887516, 0.7641839069417077, 0.882535772922362, 0.9384548728572061]\n",
      "Average iterations: 1092.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8153790602264035\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.764747077357683,Val RMSE: 0.8306087099635003\n",
      "Best iteration: 1744\n",
      "Train RMSE: 0.7972777227879102,Val RMSE: 0.7748373060810431\n",
      "Best iteration: 802\n",
      "Train RMSE: 0.7591451744515649,Val RMSE: 0.6963601952127545\n",
      "Best iteration: 1974\n",
      "Train RMSE: 0.7574797617534709,Val RMSE: 0.7614220848448778\n",
      "Best iteration: 1902\n",
      "Train RMSE: 0.7611206836825515,Val RMSE: 0.8805296422419289\n",
      "Best iteration: 1775\n",
      "Train RMSE: 0.8683768012820366,Val RMSE: 0.9375586173487852\n",
      "Best iteration: 92\n",
      "6 fold results: [0.8306087099635003, 0.7748373060810431, 0.6963601952127545, 0.7614220848448778, 0.8805296422419289, 0.9375586173487852]\n",
      "Average iterations: 1380.5\n",
      "Mean Cross Validation RMSE: 0.8135527592821483\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.764747077357683,Val RMSE: 0.8306087099635003\n",
      "Best iteration: 1744\n",
      "Train RMSE: 0.7972777227879102,Val RMSE: 0.7748373060810431\n",
      "Best iteration: 802\n",
      "Train RMSE: 0.7591451744515649,Val RMSE: 0.6963601952127545\n",
      "Best iteration: 1974\n",
      "Train RMSE: 0.7574797617534709,Val RMSE: 0.7614220848448778\n",
      "Best iteration: 1902\n",
      "Train RMSE: 0.7611206836825515,Val RMSE: 0.8805296422419289\n",
      "Best iteration: 1775\n",
      "Train RMSE: 0.8683768012820366,Val RMSE: 0.9375586173487852\n",
      "Best iteration: 92\n",
      "6 fold results: [0.8306087099635003, 0.7748373060810431, 0.6963601952127545, 0.7614220848448778, 0.8805296422419289, 0.9375586173487852]\n",
      "Average iterations: 1380.5\n",
      "Mean Cross Validation RMSE: 0.8135527592821483\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7595355315520353,Val RMSE: 0.828493072476199\n",
      "Best iteration: 1931\n",
      "Train RMSE: 0.8252906157406219,Val RMSE: 0.7790653200247688\n",
      "Best iteration: 383\n",
      "Train RMSE: 0.7582618325099767,Val RMSE: 0.699776496287434\n",
      "Best iteration: 1949\n",
      "Train RMSE: 0.7612410783792996,Val RMSE: 0.7637885119254212\n",
      "Best iteration: 1725\n",
      "Train RMSE: 0.8053099437787036,Val RMSE: 0.8857863981911892\n",
      "Best iteration: 520\n",
      "Train RMSE: 0.8571809330214815,Val RMSE: 0.9366342679854123\n",
      "Best iteration: 120\n",
      "6 fold results: [0.828493072476199, 0.7790653200247688, 0.699776496287434, 0.7637885119254212, 0.8857863981911892, 0.9366342679854123]\n",
      "Average iterations: 1103.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8155906778150707\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7995708342611098,Val RMSE: 0.8332289908570641\n",
      "Best iteration: 763\n",
      "Train RMSE: 0.8058310225912875,Val RMSE: 0.7731361187294942\n",
      "Best iteration: 642\n",
      "Train RMSE: 0.773512788441239,Val RMSE: 0.7006685155648441\n",
      "Best iteration: 1423\n",
      "Train RMSE: 0.7567563907289172,Val RMSE: 0.7648910196131984\n",
      "Best iteration: 1959\n",
      "Train RMSE: 0.7674477347555075,Val RMSE: 0.8788204914215562\n",
      "Best iteration: 1471\n",
      "Train RMSE: 0.8528689940049753,Val RMSE: 0.9348931945650198\n",
      "Best iteration: 136\n",
      "6 fold results: [0.8332289908570641, 0.7731361187294942, 0.7006685155648441, 0.7648910196131984, 0.8788204914215562, 0.9348931945650198]\n",
      "Average iterations: 1064.6666666666667\n",
      "Mean Cross Validation RMSE: 0.8142730551251961\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.764747077357683,Val RMSE: 0.8306087099635003\n",
      "Best iteration: 1744\n",
      "Train RMSE: 0.7972777227879102,Val RMSE: 0.7748373060810431\n",
      "Best iteration: 802\n",
      "Train RMSE: 0.7591451744515649,Val RMSE: 0.6963601952127545\n",
      "Best iteration: 1974\n",
      "Train RMSE: 0.7574797617534709,Val RMSE: 0.7614220848448778\n",
      "Best iteration: 1902\n",
      "Train RMSE: 0.7611206836825515,Val RMSE: 0.8805296422419289\n",
      "Best iteration: 1775\n",
      "Train RMSE: 0.8683768012820366,Val RMSE: 0.9375586173487852\n",
      "Best iteration: 92\n",
      "6 fold results: [0.8306087099635003, 0.7748373060810431, 0.6963601952127545, 0.7614220848448778, 0.8805296422419289, 0.9375586173487852]\n",
      "Average iterations: 1380.5\n",
      "Mean Cross Validation RMSE: 0.8135527592821483\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.764747077357683,Val RMSE: 0.8306087099635003\n",
      "Best iteration: 1744\n",
      "Train RMSE: 0.7972777227879102,Val RMSE: 0.7748373060810431\n",
      "Best iteration: 802\n",
      "Train RMSE: 0.7591451744515649,Val RMSE: 0.6963601952127545\n",
      "Best iteration: 1974\n",
      "Train RMSE: 0.7574797617534709,Val RMSE: 0.7614220848448778\n",
      "Best iteration: 1902\n",
      "Train RMSE: 0.7611206836825515,Val RMSE: 0.8805296422419289\n",
      "Best iteration: 1775\n",
      "Train RMSE: 0.8683768012820366,Val RMSE: 0.9375586173487852\n",
      "Best iteration: 92\n",
      "6 fold results: [0.8306087099635003, 0.7748373060810431, 0.6963601952127545, 0.7614220848448778, 0.8805296422419289, 0.9375586173487852]\n",
      "Average iterations: 1380.5\n",
      "Mean Cross Validation RMSE: 0.8135527592821483\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7643981757904276,Val RMSE: 0.8295411291520355\n",
      "Best iteration: 1987\n",
      "Train RMSE: 0.797270620536947,Val RMSE: 0.7744059423956574\n",
      "Best iteration: 809\n",
      "Train RMSE: 0.7641045400837341,Val RMSE: 0.7008494748998257\n",
      "Best iteration: 1930\n",
      "Train RMSE: 0.7672894738323341,Val RMSE: 0.767048815838677\n",
      "Best iteration: 1631\n",
      "Train RMSE: 0.7967480203025217,Val RMSE: 0.8850856985205364\n",
      "Best iteration: 692\n",
      "Train RMSE: 0.8553786905340921,Val RMSE: 0.9347069460045807\n",
      "Best iteration: 128\n",
      "6 fold results: [0.8295411291520355, 0.7744059423956574, 0.7008494748998257, 0.767048815838677, 0.8850856985205364, 0.9347069460045807]\n",
      "Average iterations: 1195.1666666666667\n",
      "Mean Cross Validation RMSE: 0.8152730011352188\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.03, 'min_child_weight': 128, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7900460411489715,Val RMSE: 0.8314961965133284\n",
      "Best iteration: 1001\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-3-a2552faec9b1>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0;34m'seed'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;36m1204\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m'tree_method'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m'gpu_hist'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     11\u001b[0m }\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mbest_hyperparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moptimize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspace\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmax_evals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m200\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     13\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"The best hyperparameters are: \"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbest_hyperparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-1-870c4b98b6f9>\u001b[0m in \u001b[0;36moptimize\u001b[0;34m(space, seed, max_evals)\u001b[0m\n\u001b[1;32m     59\u001b[0m     best = fmin(score, space, algo=tpe.suggest, \n\u001b[1;32m     60\u001b[0m         \u001b[0;31m# trials=trials,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 61\u001b[0;31m         max_evals=max_evals)\n\u001b[0m\u001b[1;32m     62\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mbest\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[0;34m(fn, space, algo, max_evals, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin)\u001b[0m\n\u001b[1;32m    318\u001b[0m                     verbose=verbose)\n\u001b[1;32m    319\u001b[0m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 320\u001b[0;31m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    321\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    322\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mexhaust\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    197\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    198\u001b[0m         \u001b[0mn_done\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax_evals\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mn_done\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mblock_until_done\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    200\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    201\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, N, block_until_done)\u001b[0m\n\u001b[1;32m    171\u001b[0m             \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    172\u001b[0m                 \u001b[0;31m# -- loop over trials and do the jobs directly\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 173\u001b[0;31m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mserial_evaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    174\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    175\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mstopped\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mserial_evaluate\u001b[0;34m(self, N)\u001b[0m\n\u001b[1;32m     90\u001b[0m                 \u001b[0mctrl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbase\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCtrl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcurrent_trial\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrial\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     91\u001b[0m                 \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 92\u001b[0;31m                     \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdomain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctrl\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     93\u001b[0m                 \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     94\u001b[0m                     \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'job exception: %s'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/base.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, config, ctrl, attach_attachments)\u001b[0m\n\u001b[1;32m    838\u001b[0m                 \u001b[0mmemo\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmemo\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    839\u001b[0m                 print_node_on_error=self.rec_eval_print_node_on_error)\n\u001b[0;32m--> 840\u001b[0;31m             \u001b[0mrval\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpyll_rval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    841\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    842\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumber\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-1-870c4b98b6f9>\u001b[0m in \u001b[0;36mscore\u001b[0;34m(params)\u001b[0m\n\u001b[1;32m     33\u001b[0m         xgb_model = xgb.train(params, dtrain, 2000, watchlist,\n\u001b[1;32m     34\u001b[0m                           \u001b[0mverbose_eval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m                           early_stopping_rounds=200)\n\u001b[0m\u001b[1;32m     36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     37\u001b[0m         \u001b[0mtrain_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mntree_limit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbest_ntree_limit\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, learning_rates)\u001b[0m\n\u001b[1;32m    202\u001b[0m                            \u001b[0mevals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    203\u001b[0m                            \u001b[0mobj\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeval\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 204\u001b[0;31m                            xgb_model=xgb_model, callbacks=callbacks)\n\u001b[0m\u001b[1;32m    205\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    206\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36m_train_internal\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, xgb_model, callbacks)\u001b[0m\n\u001b[1;32m     72\u001b[0m         \u001b[0;31m# Skip the first update if it is a recovery step.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     73\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mversion\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     75\u001b[0m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_rabit_checkpoint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m             \u001b[0mversion\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/core.py\u001b[0m in \u001b[0;36mupdate\u001b[0;34m(self, dtrain, iteration, fobj)\u001b[0m\n\u001b[1;32m    896\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfobj\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    897\u001b[0m             _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, ctypes.c_int(iteration),\n\u001b[0;32m--> 898\u001b[0;31m                                                     dtrain.handle))\n\u001b[0m\u001b[1;32m    899\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    900\u001b[0m             \u001b[0mpred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# space = {\n",
    "#     #'n_estimators': hp.quniform('n_estimators', 50, 500, 5),\n",
    "# #     'max_depth': hp.choice('max_depth', np.arange(5, 10, dtype=int)),\n",
    "#     'subsample': hp.quniform('subsample', 0.7, 0.9, 0.05),\n",
    "#     'colsample_bytree': hp.quniform('colsample_bytree', 0.7, 0.9, 0.05),\n",
    "#     'min_child_weight': 128,\n",
    "#     'learning_rate': 0.03,\n",
    "#     'eval_metric': 'rmse',\n",
    "#     'objective': 'reg:linear' , \n",
    "#     'seed': 1204,'tree_method':'gpu_hist'\n",
    "# }\n",
    "# best_hyperparams = optimize(space,max_evals=200)\n",
    "# print(\"The best hyperparameters are: \")\n",
    "# print(best_hyperparams)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 8, 'min_child_weight': 14, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 1.0, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7582230782966445,Val RMSE: 0.8403623177602558\n",
      "Best iteration: 73\n",
      "Train RMSE: 0.7959162985078891,Val RMSE: 0.7740933498600058\n",
      "Best iteration: 33\n",
      "Train RMSE: 0.7423294734686047,Val RMSE: 0.7076492559214119\n",
      "Best iteration: 82\n",
      "Train RMSE: 0.7278143240421381,Val RMSE: 0.7711094198596203\n",
      "Best iteration: 106\n",
      "Train RMSE: 0.7364223430064114,Val RMSE: 0.9060710204079657\n",
      "Best iteration: 94\n",
      "Train RMSE: 0.8119570311701314,Val RMSE: 0.9139115298544712\n",
      "Best iteration: 21\n",
      "6 fold results: [0.8403623177602558, 0.7740933498600058, 0.7076492559214119, 0.7711094198596203, 0.9060710204079657, 0.9139115298544712]\n",
      "Mean Cross Validation RMSE: 0.8188661489439553\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 5, 'min_child_weight': 2, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.788624708136001,Val RMSE: 0.8355307257805048\n",
      "Best iteration: 156\n",
      "Train RMSE: 0.8595680713516001,Val RMSE: 0.7869206810524011\n",
      "Best iteration: 23\n",
      "Train RMSE: 0.8252245294037229,Val RMSE: 0.7120589329969967\n",
      "Best iteration: 58\n",
      "Train RMSE: 0.7799885292921502,Val RMSE: 0.7758648092670937\n",
      "Best iteration: 182\n",
      "Train RMSE: 0.8114536590506837,Val RMSE: 0.907159414773195\n",
      "Best iteration: 81\n",
      "Train RMSE: 0.8817357273552802,Val RMSE: 0.9763293789134154\n",
      "Best iteration: 10\n",
      "6 fold results: [0.8355307257805048, 0.7869206810524011, 0.7120589329969967, 0.7758648092670937, 0.907159414773195, 0.9763293789134154]\n",
      "Mean Cross Validation RMSE: 0.832310657130601\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 1.0, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 7, 'min_child_weight': 16, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.848855010377979,Val RMSE: 0.8795927630240845\n",
      "Best iteration: 12\n",
      "Train RMSE: 0.7855906257605206,Val RMSE: 0.7864776802630647\n",
      "Best iteration: 56\n",
      "Train RMSE: 0.791056916842786,Val RMSE: 0.7093173839680942\n",
      "Best iteration: 44\n",
      "Train RMSE: 0.7783925634374912,Val RMSE: 0.7730536230717033\n",
      "Best iteration: 58\n",
      "Train RMSE: 0.8070066051283752,Val RMSE: 0.8978546905960981\n",
      "Best iteration: 26\n",
      "Train RMSE: 0.7714209035525124,Val RMSE: 0.9429090542611713\n",
      "Best iteration: 68\n",
      "6 fold results: [0.8795927630240845, 0.7864776802630647, 0.7093173839680942, 0.7730536230717033, 0.8978546905960981, 0.9429090542611713]\n",
      "Mean Cross Validation RMSE: 0.8315341991973693\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 8, 'min_child_weight': 16, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 1.0, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7730591744576445,Val RMSE: 0.8511377377643089\n",
      "Best iteration: 50\n",
      "Train RMSE: 0.822197519413132,Val RMSE: 0.7788524459881764\n",
      "Best iteration: 18\n",
      "Train RMSE: 0.7847785990408261,Val RMSE: 0.7056517447107381\n",
      "Best iteration: 36\n",
      "Train RMSE: 0.6855437449848418,Val RMSE: 0.7721581715687992\n",
      "Best iteration: 210\n",
      "Train RMSE: 0.832035440783625,Val RMSE: 0.9525999905825111\n",
      "Best iteration: 13\n",
      "Train RMSE: 0.793048460012458,Val RMSE: 0.9106229314211127\n",
      "Best iteration: 31\n",
      "6 fold results: [0.8511377377643089, 0.7788524459881764, 0.7056517447107381, 0.7721581715687992, 0.9525999905825111, 0.9106229314211127]\n",
      "Mean Cross Validation RMSE: 0.8285038370059411\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 8, 'min_child_weight': 11, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7769387882035717,Val RMSE: 0.8407795859651629\n",
      "Best iteration: 63\n",
      "Train RMSE: 0.7782648274975672,Val RMSE: 0.7727071608384254\n",
      "Best iteration: 64\n",
      "Train RMSE: 0.7094134669020191,Val RMSE: 0.7021411370122036\n",
      "Best iteration: 221\n",
      "Train RMSE: 0.7031049937474957,Val RMSE: 0.7678035570057788\n",
      "Best iteration: 222\n",
      "Train RMSE: 0.6574373856526353,Val RMSE: 0.87235469816028\n",
      "Best iteration: 526\n",
      "Train RMSE: 0.8284455524687812,Val RMSE: 0.9301342892341211\n",
      "Best iteration: 19\n",
      "6 fold results: [0.8407795859651629, 0.7727071608384254, 0.7021411370122036, 0.7678035570057788, 0.87235469816028, 0.9301342892341211]\n",
      "Mean Cross Validation RMSE: 0.8143200713693286\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 5, 'min_child_weight': 13, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7970672824225287,Val RMSE: 0.8435926290618746\n",
      "Best iteration: 229\n",
      "Train RMSE: 0.8466193189735916,Val RMSE: 0.7881824760443266\n",
      "Best iteration: 55\n",
      "Train RMSE: 0.8192100018913292,Val RMSE: 0.7063066341682144\n",
      "Best iteration: 111\n",
      "Train RMSE: 0.7264051779781973,Val RMSE: 0.7679562401747073\n",
      "Best iteration: 1135\n",
      "Train RMSE: 0.7808401558644813,Val RMSE: 0.8935049908268601\n",
      "Best iteration: 286\n",
      "Train RMSE: 0.8199387925262348,Val RMSE: 0.9520338723143277\n",
      "Best iteration: 99\n",
      "6 fold results: [0.8435926290618746, 0.7881824760443266, 0.7063066341682144, 0.7679562401747073, 0.8935049908268601, 0.9520338723143277]\n",
      "Mean Cross Validation RMSE: 0.8252628070983853\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 8, 'min_child_weight': 5, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8515889574892926,Val RMSE: 0.8545643454465998\n",
      "Best iteration: 7\n",
      "Train RMSE: 0.8064900550985926,Val RMSE: 0.7809625478256061\n",
      "Best iteration: 17\n",
      "Train RMSE: 0.7946998229817066,Val RMSE: 0.7129369851283819\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.7685412317283216,Val RMSE: 0.7797762512577594\n",
      "Best iteration: 37\n",
      "Train RMSE: 0.7775570370767618,Val RMSE: 0.8950298202597528\n",
      "Best iteration: 32\n",
      "Train RMSE: 0.8428342936480604,Val RMSE: 0.9309177113069202\n",
      "Best iteration: 7\n",
      "6 fold results: [0.8545643454465998, 0.7809625478256061, 0.7129369851283819, 0.7797762512577594, 0.8950298202597528, 0.9309177113069202]\n",
      "Mean Cross Validation RMSE: 0.8256979435375035\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 8, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 1.0, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.755687605391771,Val RMSE: 0.8443035499843311\n",
      "Best iteration: 49\n",
      "Train RMSE: 0.7725336592788566,Val RMSE: 0.7824506307756006\n",
      "Best iteration: 40\n",
      "Train RMSE: 0.7822209999753453,Val RMSE: 0.7153130330241673\n",
      "Best iteration: 33\n",
      "Train RMSE: 0.7605484408463874,Val RMSE: 0.7768702382734332\n",
      "Best iteration: 50\n",
      "Train RMSE: 0.8039416588046462,Val RMSE: 0.9216084822407595\n",
      "Best iteration: 19\n",
      "Train RMSE: 0.7897232612751817,Val RMSE: 0.9230584452610611\n",
      "Best iteration: 26\n",
      "6 fold results: [0.8443035499843311, 0.7824506307756006, 0.7153130330241673, 0.7768702382734332, 0.9216084822407595, 0.9230584452610611]\n",
      "Mean Cross Validation RMSE: 0.8272673965932255\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 7, 'min_child_weight': 7, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 1.0, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8028167905058052,Val RMSE: 0.8444344252450695\n",
      "Best iteration: 40\n",
      "Train RMSE: 0.8153761429253077,Val RMSE: 0.783969988878638\n",
      "Best iteration: 30\n",
      "Train RMSE: 0.7488235545130839,Val RMSE: 0.7128159157262673\n",
      "Best iteration: 114\n",
      "Train RMSE: 0.751544157506239,Val RMSE: 0.7743493686895383\n",
      "Best iteration: 101\n",
      "Train RMSE: 0.8255122371831723,Val RMSE: 0.8946933516172116\n",
      "Best iteration: 19\n",
      "Train RMSE: 0.8481869420685078,Val RMSE: 0.9391293354338011\n",
      "Best iteration: 11\n",
      "6 fold results: [0.8444344252450695, 0.783969988878638, 0.7128159157262673, 0.7743493686895383, 0.8946933516172116, 0.9391293354338011]\n",
      "Mean Cross Validation RMSE: 0.8248987309317544\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 9, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7595040231363349,Val RMSE: 0.842674488257171\n",
      "Best iteration: 31\n",
      "Train RMSE: 0.8183653838089552,Val RMSE: 0.7895589720726386\n",
      "Best iteration: 10\n",
      "Train RMSE: 0.7598488573314129,Val RMSE: 0.711099878568482\n",
      "Best iteration: 31\n",
      "Train RMSE: 0.706029741155187,Val RMSE: 0.7729019468676093\n",
      "Best iteration: 72\n",
      "Train RMSE: 0.6913824562245896,Val RMSE: 0.8845160934532448\n",
      "Best iteration: 96\n",
      "Train RMSE: 0.7763840007760797,Val RMSE: 0.9250501490221336\n",
      "Best iteration: 23\n",
      "6 fold results: [0.842674488257171, 0.7895589720726386, 0.711099878568482, 0.7729019468676093, 0.8845160934532448, 0.9250501490221336]\n",
      "Mean Cross Validation RMSE: 0.8209669213735467\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 6, 'min_child_weight': 2, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7582270088345027,Val RMSE: 0.8440419151284569\n",
      "Best iteration: 277\n",
      "Train RMSE: 0.8157996885812983,Val RMSE: 0.7742013723384409\n",
      "Best iteration: 78\n",
      "Train RMSE: 0.7556157865506867,Val RMSE: 0.7058118769376844\n",
      "Best iteration: 293\n",
      "Train RMSE: 0.779161063767414,Val RMSE: 0.7728093611749505\n",
      "Best iteration: 162\n",
      "Train RMSE: 0.7212575211950951,Val RMSE: 0.8787581255389242\n",
      "Best iteration: 529\n",
      "Train RMSE: 0.8755417577151996,Val RMSE: 0.9534335027990382\n",
      "Best iteration: 15\n",
      "6 fold results: [0.8440419151284569, 0.7742013723384409, 0.7058118769376844, 0.7728093611749505, 0.8787581255389242, 0.9534335027990382]\n",
      "Mean Cross Validation RMSE: 0.8215093589862491\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 8, 'min_child_weight': 1, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8296302933736005,Val RMSE: 0.844967352445231\n",
      "Best iteration: 16\n",
      "Train RMSE: 0.774714177246681,Val RMSE: 0.7760694784689524\n",
      "Best iteration: 49\n",
      "Train RMSE: 0.7692615016886953,Val RMSE: 0.7073874235058213\n",
      "Best iteration: 54\n",
      "Train RMSE: 0.7364059933233593,Val RMSE: 0.7827469788806453\n",
      "Best iteration: 95\n",
      "Train RMSE: 0.8022768893515833,Val RMSE: 0.8957843582309659\n",
      "Best iteration: 26\n",
      "Train RMSE: 0.8101917270113721,Val RMSE: 0.9158069234574235\n",
      "Best iteration: 22\n",
      "6 fold results: [0.844967352445231, 0.7760694784689524, 0.7073874235058213, 0.7827469788806453, 0.8957843582309659, 0.9158069234574235]\n",
      "Mean Cross Validation RMSE: 0.8204604191648399\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 5, 'min_child_weight': 5, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8004522624748072,Val RMSE: 0.8567764877240969\n",
      "Best iteration: 159\n",
      "Train RMSE: 0.8074083346489719,Val RMSE: 0.7836798456117721\n",
      "Best iteration: 114\n",
      "Train RMSE: 0.8589820657085829,Val RMSE: 0.7166064682525124\n",
      "Best iteration: 30\n",
      "Train RMSE: 0.761253606164468,Val RMSE: 0.7794178268914499\n",
      "Best iteration: 354\n",
      "Train RMSE: 0.7752844211670813,Val RMSE: 0.9027941351818801\n",
      "Best iteration: 261\n",
      "Train RMSE: 0.8798003671444602,Val RMSE: 0.9493522393196044\n",
      "Best iteration: 13\n",
      "6 fold results: [0.8567764877240969, 0.7836798456117721, 0.7166064682525124, 0.7794178268914499, 0.9027941351818801, 0.9493522393196044]\n",
      "Mean Cross Validation RMSE: 0.8314378338302193\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 9, 'min_child_weight': 5, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7611315689292109,Val RMSE: 0.8402673052475836\n",
      "Best iteration: 51\n",
      "Train RMSE: 0.7979709344508693,Val RMSE: 0.7753455007188242\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.7211697109629227,Val RMSE: 0.706764212914431\n",
      "Best iteration: 95\n",
      "Train RMSE: 0.6804118845464597,Val RMSE: 0.7723949225727524\n",
      "Best iteration: 170\n",
      "Train RMSE: 0.7317586515415003,Val RMSE: 0.8852742735113073\n",
      "Best iteration: 75\n",
      "Train RMSE: 0.7865665731828517,Val RMSE: 0.9297843049242506\n",
      "Best iteration: 28\n",
      "6 fold results: [0.8402673052475836, 0.7753455007188242, 0.706764212914431, 0.7723949225727524, 0.8852742735113073, 0.9297843049242506]\n",
      "Mean Cross Validation RMSE: 0.8183050866481915\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 6, 'min_child_weight': 6, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7903287189187509,Val RMSE: 0.8506346380961639\n",
      "Best iteration: 92\n",
      "Train RMSE: 0.8303483949367455,Val RMSE: 0.7763455753797092\n",
      "Best iteration: 30\n",
      "Train RMSE: 0.8046050909120727,Val RMSE: 0.7090889509436329\n",
      "Best iteration: 61\n",
      "Train RMSE: 0.7749560312903885,Val RMSE: 0.7746200768543773\n",
      "Best iteration: 109\n",
      "Train RMSE: 0.8393408755753109,Val RMSE: 0.9341338442895863\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.8130630596240189,Val RMSE: 0.959564064854976\n",
      "Best iteration: 47\n",
      "6 fold results: [0.8506346380961639, 0.7763455753797092, 0.7090889509436329, 0.7746200768543773, 0.9341338442895863, 0.959564064854976]\n",
      "Mean Cross Validation RMSE: 0.8340645250697408\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.25, 'max_depth': 6, 'min_child_weight': 2, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.802288813507097,Val RMSE: 0.8541489614807186\n",
      "Best iteration: 71\n",
      "Train RMSE: 0.801362510554077,Val RMSE: 0.7771789912918791\n",
      "Best iteration: 81\n",
      "Train RMSE: 0.7597948867929049,Val RMSE: 0.7113487395683153\n",
      "Best iteration: 167\n",
      "Train RMSE: 0.7705492750646998,Val RMSE: 0.7759878323377754\n",
      "Best iteration: 124\n",
      "Train RMSE: 0.7453577295810611,Val RMSE: 0.8816074485097221\n",
      "Best iteration: 214\n",
      "Train RMSE: 0.8693176432158535,Val RMSE: 0.9399045624881276\n",
      "Best iteration: 11\n",
      "6 fold results: [0.8541489614807186, 0.7771789912918791, 0.7113487395683153, 0.7759878323377754, 0.8816074485097221, 0.9399045624881276]\n",
      "Mean Cross Validation RMSE: 0.8233627559460897\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 6, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7771760769300994,Val RMSE: 0.8426272728547953\n",
      "Best iteration: 145\n",
      "Train RMSE: 0.8051636407514075,Val RMSE: 0.7732132095183432\n",
      "Best iteration: 79\n",
      "Train RMSE: 0.728110192930099,Val RMSE: 0.7096743418432444\n",
      "Best iteration: 407\n",
      "Train RMSE: 0.7148783941159998,Val RMSE: 0.7735119022841932\n",
      "Best iteration: 511\n",
      "Train RMSE: 0.8103501782114705,Val RMSE: 0.8982080581262804\n",
      "Best iteration: 59\n",
      "Train RMSE: 0.8287094141370521,Val RMSE: 0.9363410780117752\n",
      "Best iteration: 39\n",
      "6 fold results: [0.8426272728547953, 0.7732132095183432, 0.7096743418432444, 0.7735119022841932, 0.8982080581262804, 0.9363410780117752]\n",
      "Mean Cross Validation RMSE: 0.8222626437731053\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.7000000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.1, 'max_depth': 7, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7849432053643008,Val RMSE: 0.8385321022262225\n",
      "Best iteration: 137\n",
      "Train RMSE: 0.8178071045396926,Val RMSE: 0.778602922122586\n",
      "Best iteration: 65\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-5-e543421e4d2b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0;34m'seed'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;36m1204\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m'tree_method'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m'gpu_hist'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     11\u001b[0m }\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mbest_hyperparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moptimize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspace\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmax_evals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m200\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     13\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"The best hyperparameters are: \"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbest_hyperparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-4-405c76f0d8c8>\u001b[0m in \u001b[0;36moptimize\u001b[0;34m(space, seed, max_evals)\u001b[0m\n\u001b[1;32m     58\u001b[0m     best = fmin(score, space, algo=tpe.suggest, \n\u001b[1;32m     59\u001b[0m         \u001b[0;31m# trials=trials,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 60\u001b[0;31m         max_evals=max_evals)\n\u001b[0m\u001b[1;32m     61\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mbest\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[0;34m(fn, space, algo, max_evals, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin)\u001b[0m\n\u001b[1;32m    318\u001b[0m                     verbose=verbose)\n\u001b[1;32m    319\u001b[0m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 320\u001b[0;31m     \u001b[0mrval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    321\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    322\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mexhaust\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    197\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mexhaust\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    198\u001b[0m         \u001b[0mn_done\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax_evals\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mn_done\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mblock_until_done\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    200\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    201\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, N, block_until_done)\u001b[0m\n\u001b[1;32m    171\u001b[0m             \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    172\u001b[0m                 \u001b[0;31m# -- loop over trials and do the jobs directly\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 173\u001b[0;31m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mserial_evaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    174\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    175\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mstopped\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/fmin.py\u001b[0m in \u001b[0;36mserial_evaluate\u001b[0;34m(self, N)\u001b[0m\n\u001b[1;32m     90\u001b[0m                 \u001b[0mctrl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbase\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCtrl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcurrent_trial\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrial\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     91\u001b[0m                 \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 92\u001b[0;31m                     \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdomain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctrl\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     93\u001b[0m                 \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     94\u001b[0m                     \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'job exception: %s'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/xgb/lib/python3.6/site-packages/hyperopt/base.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, config, ctrl, attach_attachments)\u001b[0m\n\u001b[1;32m    838\u001b[0m                 \u001b[0mmemo\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmemo\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    839\u001b[0m                 print_node_on_error=self.rec_eval_print_node_on_error)\n\u001b[0;32m--> 840\u001b[0;31m             \u001b[0mrval\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpyll_rval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    841\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    842\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumber\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-4-405c76f0d8c8>\u001b[0m in \u001b[0;36mscore\u001b[0;34m(params)\u001b[0m\n\u001b[1;32m     33\u001b[0m         xgb_model = xgb.train(params, dtrain, 2000, watchlist,\n\u001b[1;32m     34\u001b[0m                           \u001b[0mverbose_eval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m                           early_stopping_rounds=250)\n\u001b[0m\u001b[1;32m     36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     37\u001b[0m         \u001b[0mtrain_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mntree_limit\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mxgb_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbest_ntree_limit\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, learning_rates)\u001b[0m\n\u001b[1;32m    202\u001b[0m                            \u001b[0mevals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    203\u001b[0m                            \u001b[0mobj\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeval\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 204\u001b[0;31m                            xgb_model=xgb_model, callbacks=callbacks)\n\u001b[0m\u001b[1;32m    205\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    206\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/training.py\u001b[0m in \u001b[0;36m_train_internal\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, xgb_model, callbacks)\u001b[0m\n\u001b[1;32m     72\u001b[0m         \u001b[0;31m# Skip the first update if it is a recovery step.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     73\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mversion\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     75\u001b[0m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_rabit_checkpoint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m             \u001b[0mversion\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/xgboost/python-package/xgboost/core.py\u001b[0m in \u001b[0;36mupdate\u001b[0;34m(self, dtrain, iteration, fobj)\u001b[0m\n\u001b[1;32m    896\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfobj\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    897\u001b[0m             _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, ctypes.c_int(iteration),\n\u001b[0;32m--> 898\u001b[0;31m                                                     dtrain.handle))\n\u001b[0m\u001b[1;32m    899\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    900\u001b[0m             \u001b[0mpred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# space = {\n",
    "#     #'n_estimators': hp.quniform('n_estimators', 50, 500, 5),\n",
    "#     'max_depth': hp.choice('max_depth', np.arange(5, 10, dtype=int)),\n",
    "#     'subsample': hp.quniform('subsample', 0.7, 1, 0.1),\n",
    "#     'colsample_bytree': hp.quniform('colsample_bytree', 0.7, 1, 0.1),\n",
    "#     'min_child_weight': hp.choice('min_child_weight',np.arange(1,17,1, dtype=int)),\n",
    "#     'learning_rate': hp.quniform('learning_rate', 0.1, 0.3, 0.05),\n",
    "#     'eval_metric': 'rmse',\n",
    "#     'objective': 'reg:linear' , \n",
    "#     'seed': 1204,'tree_method':'gpu_hist'\n",
    "# }\n",
    "# best_hyperparams = optimize(space,max_evals=200)\n",
    "# print(\"The best hyperparameters are: \")\n",
    "# print(best_hyperparams)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.1, 'max_depth': 6, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8104449104611019,Val RMSE: 0.8348437846128616\n",
      "Best iteration: 136\n",
      "Train RMSE: 0.7775579186237014,Val RMSE: 0.7757464152310767\n",
      "Best iteration: 274\n",
      "Train RMSE: 0.7779337498594463,Val RMSE: 0.7051185321588994\n",
      "Best iteration: 257\n",
      "Train RMSE: 0.7262385476763841,Val RMSE: 0.7640047250446783\n",
      "Best iteration: 652\n",
      "Train RMSE: 0.7100954623586775,Val RMSE: 0.8654629166546479\n",
      "Best iteration: 924\n",
      "Train RMSE: 0.8471344427241345,Val RMSE: 0.9305673811210164\n",
      "Best iteration: 45\n",
      "6 fold results: [0.8348437846128616, 0.7757464152310767, 0.7051185321588994, 0.7640047250446783, 0.8654629166546479, 0.9305673811210164]\n",
      "Mean Cross Validation RMSE: 0.8126239591371968\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 3, 'min_child_weight': 9, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8210252481009683,Val RMSE: 0.8510075782165017\n",
      "Best iteration: 827\n",
      "Train RMSE: 0.8344425845545215,Val RMSE: 0.7826485894350528\n",
      "Best iteration: 502\n",
      "Train RMSE: 0.8535984793456421,Val RMSE: 0.7136212511069392\n",
      "Best iteration: 228\n",
      "Train RMSE: 0.7977963151795504,Val RMSE: 0.7773460119780021\n",
      "Best iteration: 1504\n",
      "Train RMSE: 0.8220184027011028,Val RMSE: 0.896596132846264\n",
      "Best iteration: 553\n",
      "Train RMSE: 0.9083183625646994,Val RMSE: 0.9627423278633653\n",
      "Best iteration: 16\n",
      "6 fold results: [0.8510075782165017, 0.7826485894350528, 0.7136212511069392, 0.7773460119780021, 0.896596132846264, 0.9627423278633653]\n",
      "Mean Cross Validation RMSE: 0.8306603152410209\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.35000000000000003, 'max_depth': 7, 'min_child_weight': 9, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8577525341982695,Val RMSE: 0.8619099768829848\n",
      "Best iteration: 8\n",
      "Train RMSE: 0.8462664906009298,Val RMSE: 0.7878030602548957\n",
      "Best iteration: 10\n",
      "Train RMSE: 0.8108885630870946,Val RMSE: 0.7152354933658609\n",
      "Best iteration: 25\n",
      "Train RMSE: 0.7644797342432326,Val RMSE: 0.7763760164346646\n",
      "Best iteration: 63\n",
      "Train RMSE: 0.8028211337910087,Val RMSE: 0.8955996267660402\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.8209688376554104,Val RMSE: 0.9187368872089527\n",
      "Best iteration: 17\n",
      "6 fold results: [0.8619099768829848, 0.7878030602548957, 0.7152354933658609, 0.7763760164346646, 0.8955996267660402, 0.9187368872089527]\n",
      "Mean Cross Validation RMSE: 0.8259435101522331\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 7, 'min_child_weight': 11, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8164841829182351,Val RMSE: 0.8640363678938419\n",
      "Best iteration: 27\n",
      "Train RMSE: 0.7922187603382493,Val RMSE: 0.7735001509751984\n",
      "Best iteration: 48\n",
      "Train RMSE: 0.7984844111865863,Val RMSE: 0.7113057953683044\n",
      "Best iteration: 35\n",
      "Train RMSE: 0.7552247845841953,Val RMSE: 0.7758442202977761\n",
      "Best iteration: 101\n",
      "Train RMSE: 0.7970484750453469,Val RMSE: 0.8939673299580462\n",
      "Best iteration: 37\n",
      "Train RMSE: 0.8532505976967963,Val RMSE: 0.9356903093639953\n",
      "Best iteration: 8\n",
      "6 fold results: [0.8640363678938419, 0.7735001509751984, 0.7113057953683044, 0.7758442202977761, 0.8939673299580462, 0.9356903093639953]\n",
      "Mean Cross Validation RMSE: 0.8257240289761937\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 13, 'min_child_weight': 6, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7239977370774748,Val RMSE: 0.8537933456453786\n",
      "Best iteration: 14\n",
      "Train RMSE: 0.6784002018133124,Val RMSE: 0.7821170954122706\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.6675636446182014,Val RMSE: 0.7114403591612966\n",
      "Best iteration: 30\n",
      "Train RMSE: 0.6633006718715978,Val RMSE: 0.7738918934812669\n",
      "Best iteration: 32\n",
      "Train RMSE: 0.6672493256057755,Val RMSE: 0.9029361382183253\n",
      "Best iteration: 29\n",
      "Train RMSE: 0.7684065834828756,Val RMSE: 0.9460519467985148\n",
      "Best iteration: 10\n",
      "6 fold results: [0.8537933456453786, 0.7821170954122706, 0.7114403591612966, 0.7738918934812669, 0.9029361382183253, 0.9460519467985148]\n",
      "Mean Cross Validation RMSE: 0.8283717964528421\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.4, 'max_depth': 5, 'min_child_weight': 1, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8795951347577348,Val RMSE: 0.8710401030986201\n",
      "Best iteration: 11\n",
      "Train RMSE: 0.8495069069292501,Val RMSE: 0.7838660880330266\n",
      "Best iteration: 27\n",
      "Train RMSE: 0.8315727510791655,Val RMSE: 0.7278754973346807\n",
      "Best iteration: 45\n",
      "Train RMSE: 0.8244716382882836,Val RMSE: 0.7952786331610373\n",
      "Best iteration: 47\n",
      "Train RMSE: 0.806670403104068,Val RMSE: 0.8928912926001427\n",
      "Best iteration: 74\n",
      "Train RMSE: 0.8730930597775055,Val RMSE: 0.943069982270082\n",
      "Best iteration: 11\n",
      "6 fold results: [0.8710401030986201, 0.7838660880330266, 0.7278754973346807, 0.7952786331610373, 0.8928912926001427, 0.943069982270082]\n",
      "Mean Cross Validation RMSE: 0.8356702660829316\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.4, 'max_depth': 11, 'min_child_weight': 17, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8184513231576136,Val RMSE: 0.8469063742481195\n",
      "Best iteration: 4\n",
      "Train RMSE: 0.7826020557529619,Val RMSE: 0.7948563757302907\n",
      "Best iteration: 7\n",
      "Train RMSE: 0.7820670242587217,Val RMSE: 0.7322200649702755\n",
      "Best iteration: 7\n",
      "Train RMSE: 0.8006670986783686,Val RMSE: 0.8473753573603593\n",
      "Best iteration: 5\n",
      "Train RMSE: 0.7988033524008802,Val RMSE: 0.9181652751525907\n",
      "Best iteration: 5\n",
      "Train RMSE: 0.8007196542264086,Val RMSE: 0.9351153563160389\n",
      "Best iteration: 5\n",
      "6 fold results: [0.8469063742481195, 0.7948563757302907, 0.7322200649702755, 0.8473753573603593, 0.9181652751525907, 0.9351153563160389]\n",
      "Mean Cross Validation RMSE: 0.8457731339629456\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 11, 'min_child_weight': 14, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7823159384655342,Val RMSE: 0.842792426787172\n",
      "Best iteration: 16\n",
      "Train RMSE: 0.7075309884122382,Val RMSE: 0.7751256071868668\n",
      "Best iteration: 62\n",
      "Train RMSE: 0.7349876526684259,Val RMSE: 0.7120853422109423\n",
      "Best iteration: 37\n",
      "Train RMSE: 0.7320377410633454,Val RMSE: 0.7811155203471294\n",
      "Best iteration: 36\n",
      "Train RMSE: 0.7412253316856208,Val RMSE: 0.8900984161054291\n",
      "Best iteration: 29\n",
      "Train RMSE: 0.7269325954484005,Val RMSE: 0.9236596822913964\n",
      "Best iteration: 44\n",
      "6 fold results: [0.842792426787172, 0.7751256071868668, 0.7120853422109423, 0.7811155203471294, 0.8900984161054291, 0.9236596822913964]\n",
      "Mean Cross Validation RMSE: 0.8208128324881562\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 11, 'min_child_weight': 13, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7841348789662449,Val RMSE: 0.8397272428385689\n",
      "Best iteration: 8\n",
      "Train RMSE: 0.76541006224193,Val RMSE: 0.7857489555004823\n",
      "Best iteration: 11\n",
      "Train RMSE: 0.7749697602432849,Val RMSE: 0.709989691444566\n",
      "Best iteration: 9\n",
      "Train RMSE: 0.7451481843952994,Val RMSE: 0.777227153372359\n",
      "Best iteration: 15\n",
      "Train RMSE: 0.8154517256448399,Val RMSE: 0.9247876400368283\n",
      "Best iteration: 5\n",
      "Train RMSE: 0.8005439592808876,Val RMSE: 0.9747691663729864\n",
      "Best iteration: 6\n",
      "6 fold results: [0.8397272428385689, 0.7857489555004823, 0.709989691444566, 0.777227153372359, 0.9247876400368283, 0.9747691663729864]\n",
      "Mean Cross Validation RMSE: 0.8353749749276319\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.35000000000000003, 'max_depth': 5, 'min_child_weight': 5, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train RMSE: 0.7950262052787478,Val RMSE: 0.844506489746291\n",
      "Best iteration: 131\n",
      "Train RMSE: 0.8659788764861783,Val RMSE: 0.7988794958483234\n",
      "Best iteration: 19\n",
      "Train RMSE: 0.7782046664103704,Val RMSE: 0.7151547781937673\n",
      "Best iteration: 171\n",
      "Train RMSE: 0.7974259584908526,Val RMSE: 0.7711188887157481\n",
      "Best iteration: 108\n",
      "Train RMSE: 0.7752094201197631,Val RMSE: 0.8976584996393632\n",
      "Best iteration: 185\n",
      "Train RMSE: 0.9188485008428039,Val RMSE: 0.9889926385538049\n",
      "Best iteration: 4\n",
      "6 fold results: [0.844506489746291, 0.7988794958483234, 0.7151547781937673, 0.7711188887157481, 0.8976584996393632, 0.9889926385538049]\n",
      "Mean Cross Validation RMSE: 0.8360517984495496\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.30000000000000004, 'max_depth': 7, 'min_child_weight': 4, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.862410818547969,Val RMSE: 0.851106504089474\n",
      "Best iteration: 8\n",
      "Train RMSE: 0.8455283532805535,Val RMSE: 0.7806329579249307\n",
      "Best iteration: 12\n",
      "Train RMSE: 0.8023875060217437,Val RMSE: 0.7169493219483208\n",
      "Best iteration: 35\n",
      "Train RMSE: 0.7651237092314952,Val RMSE: 0.7718155162300411\n",
      "Best iteration: 73\n",
      "Train RMSE: 0.8126860185571675,Val RMSE: 0.8947582373334497\n",
      "Best iteration: 24\n",
      "Train RMSE: 0.8188871079587556,Val RMSE: 0.9248317243255333\n",
      "Best iteration: 21\n",
      "6 fold results: [0.851106504089474, 0.7806329579249307, 0.7169493219483208, 0.7718155162300411, 0.8947582373334497, 0.9248317243255333]\n",
      "Mean Cross Validation RMSE: 0.8233490436419583\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 1.0, 'eval_metric': 'rmse', 'learning_rate': 0.2, 'max_depth': 4, 'min_child_weight': 15, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.7000000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.8189253204401906,Val RMSE: 0.8440434687254463\n",
      "Best iteration: 288\n",
      "Train RMSE: 0.8325824255132882,Val RMSE: 0.7754653395009566\n",
      "Best iteration: 178\n",
      "Train RMSE: 0.8330544283167682,Val RMSE: 0.7116429947066001\n",
      "Best iteration: 160\n",
      "Train RMSE: 0.8046303145449573,Val RMSE: 0.7745133440950352\n",
      "Best iteration: 336\n",
      "Train RMSE: 0.8187426122666979,Val RMSE: 0.8950903532037461\n",
      "Best iteration: 194\n",
      "Train RMSE: 0.8878664495933708,Val RMSE: 0.9514151072234726\n",
      "Best iteration: 20\n",
      "6 fold results: [0.8440434687254463, 0.7754653395009566, 0.7116429947066001, 0.7745133440950352, 0.8950903532037461, 0.9514151072234726]\n",
      "Mean Cross Validation RMSE: 0.8253617679092096\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.75, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 14, 'min_child_weight': 6, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.6922213604225025,Val RMSE: 0.8484849526639561\n",
      "Best iteration: 16\n",
      "Train RMSE: 0.6738953708873482,Val RMSE: 0.7844023619966869\n",
      "Best iteration: 19\n",
      "Train RMSE: 0.6211008275426518,Val RMSE: 0.7026841795097356\n",
      "Best iteration: 38\n",
      "Train RMSE: 0.6395031480092821,Val RMSE: 0.7937028991060874\n",
      "Best iteration: 29\n",
      "Train RMSE: 0.6096684165169598,Val RMSE: 0.9022344217496115\n",
      "Best iteration: 48\n",
      "Train RMSE: 0.7289687478440019,Val RMSE: 0.9325224024354624\n",
      "Best iteration: 12\n",
      "6 fold results: [0.8484849526639561, 0.7844023619966869, 0.7026841795097356, 0.7937028991060874, 0.9022344217496115, 0.9325224024354624]\n",
      "Mean Cross Validation RMSE: 0.82733853624359\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.1, 'max_depth': 7, 'min_child_weight': 14, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.8, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7788448695999092,Val RMSE: 0.83957202862748\n",
      "Best iteration: 165\n",
      "Train RMSE: 0.8461224791491638,Val RMSE: 0.7816167352124442\n",
      "Best iteration: 33\n",
      "Train RMSE: 0.7376979158217691,Val RMSE: 0.7014564425498949\n",
      "Best iteration: 346\n",
      "Train RMSE: 0.7218772194568154,Val RMSE: 0.7915174234825681\n",
      "Best iteration: 476\n",
      "Train RMSE: 0.7857752016221201,Val RMSE: 0.8880346675626757\n",
      "Best iteration: 125\n",
      "Train RMSE: 0.8472801815768114,Val RMSE: 0.9392833278694069\n",
      "Best iteration: 28\n",
      "6 fold results: [0.83957202862748, 0.7816167352124442, 0.7014564425498949, 0.7915174234825681, 0.8880346675626757, 0.9392833278694069]\n",
      "Mean Cross Validation RMSE: 0.8235801042174117\n",
      "\n",
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'learning_rate': 0.15000000000000002, 'max_depth': 11, 'min_child_weight': 6, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7609369422863556,Val RMSE: 0.8417980787087174\n",
      "Best iteration: 18\n",
      "Train RMSE: 0.736192604768146,Val RMSE: 0.776373751630564\n",
      "Best iteration: 26\n",
      "Train RMSE: 0.6924941561207892,Val RMSE: 0.7118310029564738\n",
      "Best iteration: 53\n"
     ]
    }
   ],
   "source": [
    "# space = {\n",
    "#     #'n_estimators': hp.quniform('n_estimators', 50, 500, 5),\n",
    "#     'max_depth': hp.choice('max_depth', np.arange(3, 15, dtype=int)),\n",
    "#     'subsample': hp.quniform('subsample', 0.7, 1, 0.05),\n",
    "#     'colsample_bytree': hp.quniform('colsample_bytree', 0.7, 1, 0.05),\n",
    "#     'min_child_weight': hp.choice('min_child_weight',np.arange(1,20,1, dtype=int)),\n",
    "#     'learning_rate': hp.quniform('learning_rate', 0.1, 0.4, 0.05),\n",
    "#     'eval_metric': 'rmse',\n",
    "#     'objective': 'reg:linear' , \n",
    "#     'seed': 1204,'tree_method':'gpu_hist'\n",
    "# }\n",
    "# best_hyperparams = optimize(space,max_evals=200)\n",
    "# print(\"The best hyperparameters are: \")\n",
    "# print(best_hyperparams)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Get oof prediction + test prediction\n",
    "# Method: Grow tree by tuning max depth"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "all_data = get_all_data(data_path,'new_sales_lag_after12.pickle')\n",
    "\n",
    "X,y = get_X_y(all_data,33)\n",
    "X.drop('date_block_num',axis=1,inplace=True)\n",
    "\n",
    "cv = get_cv_idxs(all_data,28,33)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "xgb_params1={'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.1, 'max_depth': 6, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9500000000000001, 'tree_method': 'gpu_hist'}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.9, 'eval_metric': 'rmse', 'learning_rate': 0.1, 'max_depth': 6, 'min_child_weight': 3, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.9500000000000001, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7639927884929891. Val RMSE: 0.8402815985964945\n",
      "Best iteration: 366\n",
      "Train RMSE: 0.7727458057246543. Val RMSE: 0.7779882624949396\n",
      "Best iteration: 293\n",
      "Train RMSE: 0.7593112165248824. Val RMSE: 0.7016614941801245\n",
      "Best iteration: 365\n",
      "Train RMSE: 0.7544848341735331. Val RMSE: 0.7671823427605784\n",
      "Best iteration: 392\n",
      "Train RMSE: 0.707509000606371. Val RMSE: 0.8822958793374427\n",
      "Best iteration: 993\n",
      "Train RMSE: 0.7987938759324034. Val RMSE: 0.9338645063609026\n",
      "Best iteration: 154\n",
      "n validation fold results: [0.8402815985964945, 0.7779882624949396, 0.7016614941801245, 0.7671823427605784, 0.8822958793374427, 0.9338645063609026]\n",
      "Average iterations: 427\n",
      "Mean Cross Validation RMSE: 0.8172123472884136\n",
      "\n"
     ]
    }
   ],
   "source": [
    "oof_train,_=timeseries_cv('xgb',X,y,xgb_params1,cv,root_mean_squared_error,300,True)\n",
    "oof_df = pd.Series(oof_train)\n",
    "oof_df.to_pickle(data_path+'oof/xgb3.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-rmse:1.18035\tvalid-rmse:1.18035\n",
      "[10]\ttrain-rmse:1.01782\tvalid-rmse:1.01782\n",
      "[20]\ttrain-rmse:0.941191\tvalid-rmse:0.941191\n",
      "[30]\ttrain-rmse:0.904478\tvalid-rmse:0.904478\n",
      "[40]\ttrain-rmse:0.885363\tvalid-rmse:0.885363\n",
      "[50]\ttrain-rmse:0.874585\tvalid-rmse:0.874585\n",
      "[60]\ttrain-rmse:0.865649\tvalid-rmse:0.865649\n",
      "[70]\ttrain-rmse:0.858993\tvalid-rmse:0.858993\n",
      "[80]\ttrain-rmse:0.854599\tvalid-rmse:0.854599\n",
      "[90]\ttrain-rmse:0.851668\tvalid-rmse:0.851668\n",
      "[100]\ttrain-rmse:0.847219\tvalid-rmse:0.847219\n",
      "[110]\ttrain-rmse:0.843129\tvalid-rmse:0.843129\n",
      "[120]\ttrain-rmse:0.839317\tvalid-rmse:0.839317\n",
      "[130]\ttrain-rmse:0.836417\tvalid-rmse:0.836417\n",
      "[140]\ttrain-rmse:0.833421\tvalid-rmse:0.833421\n",
      "[150]\ttrain-rmse:0.830579\tvalid-rmse:0.830579\n",
      "[160]\ttrain-rmse:0.827843\tvalid-rmse:0.827843\n",
      "[170]\ttrain-rmse:0.825729\tvalid-rmse:0.825729\n",
      "[180]\ttrain-rmse:0.823402\tvalid-rmse:0.823402\n",
      "[190]\ttrain-rmse:0.821494\tvalid-rmse:0.821494\n",
      "[200]\ttrain-rmse:0.819402\tvalid-rmse:0.819402\n",
      "[210]\ttrain-rmse:0.81683\tvalid-rmse:0.81683\n",
      "[220]\ttrain-rmse:0.814929\tvalid-rmse:0.814929\n",
      "[230]\ttrain-rmse:0.813407\tvalid-rmse:0.813407\n",
      "[240]\ttrain-rmse:0.811569\tvalid-rmse:0.811569\n",
      "[250]\ttrain-rmse:0.809974\tvalid-rmse:0.809974\n",
      "[260]\ttrain-rmse:0.8084\tvalid-rmse:0.8084\n",
      "[270]\ttrain-rmse:0.806908\tvalid-rmse:0.806908\n",
      "[280]\ttrain-rmse:0.805805\tvalid-rmse:0.805805\n",
      "[290]\ttrain-rmse:0.804675\tvalid-rmse:0.804675\n",
      "[300]\ttrain-rmse:0.803595\tvalid-rmse:0.803595\n",
      "[310]\ttrain-rmse:0.802444\tvalid-rmse:0.802444\n",
      "[320]\ttrain-rmse:0.800766\tvalid-rmse:0.800766\n",
      "[330]\ttrain-rmse:0.799827\tvalid-rmse:0.799827\n",
      "[340]\ttrain-rmse:0.798743\tvalid-rmse:0.798743\n",
      "[350]\ttrain-rmse:0.79787\tvalid-rmse:0.79787\n",
      "[360]\ttrain-rmse:0.7967\tvalid-rmse:0.7967\n",
      "[370]\ttrain-rmse:0.79552\tvalid-rmse:0.79552\n",
      "[380]\ttrain-rmse:0.794443\tvalid-rmse:0.794443\n",
      "[390]\ttrain-rmse:0.793599\tvalid-rmse:0.793599\n",
      "[400]\ttrain-rmse:0.792897\tvalid-rmse:0.792897\n",
      "[410]\ttrain-rmse:0.791992\tvalid-rmse:0.791992\n",
      "[420]\ttrain-rmse:0.790997\tvalid-rmse:0.790997\n",
      "[430]\ttrain-rmse:0.789527\tvalid-rmse:0.789527\n",
      "[440]\ttrain-rmse:0.788682\tvalid-rmse:0.788682\n",
      "[450]\ttrain-rmse:0.787487\tvalid-rmse:0.787487\n",
      "[460]\ttrain-rmse:0.786733\tvalid-rmse:0.786733\n",
      "[470]\ttrain-rmse:0.785711\tvalid-rmse:0.785711\n",
      "[480]\ttrain-rmse:0.784787\tvalid-rmse:0.784787\n",
      "[490]\ttrain-rmse:0.783236\tvalid-rmse:0.783236\n",
      "[500]\ttrain-rmse:0.782287\tvalid-rmse:0.782287\n",
      "[510]\ttrain-rmse:0.781558\tvalid-rmse:0.781558\n",
      "[520]\ttrain-rmse:0.780575\tvalid-rmse:0.780575\n",
      "[530]\ttrain-rmse:0.779765\tvalid-rmse:0.779765\n",
      "[540]\ttrain-rmse:0.778976\tvalid-rmse:0.778976\n",
      "[550]\ttrain-rmse:0.778254\tvalid-rmse:0.778254\n",
      "[560]\ttrain-rmse:0.777621\tvalid-rmse:0.777621\n",
      "[570]\ttrain-rmse:0.776476\tvalid-rmse:0.776476\n",
      "[580]\ttrain-rmse:0.775895\tvalid-rmse:0.775895\n",
      "[590]\ttrain-rmse:0.775057\tvalid-rmse:0.775057\n",
      "[600]\ttrain-rmse:0.774138\tvalid-rmse:0.774138\n",
      "[610]\ttrain-rmse:0.773334\tvalid-rmse:0.773334\n",
      "[620]\ttrain-rmse:0.772684\tvalid-rmse:0.772684\n",
      "[630]\ttrain-rmse:0.771994\tvalid-rmse:0.771994\n",
      "[640]\ttrain-rmse:0.771328\tvalid-rmse:0.771328\n",
      "[650]\ttrain-rmse:0.770825\tvalid-rmse:0.770825\n",
      "[660]\ttrain-rmse:0.770118\tvalid-rmse:0.770118\n",
      "[670]\ttrain-rmse:0.768989\tvalid-rmse:0.768989\n",
      "[680]\ttrain-rmse:0.768309\tvalid-rmse:0.768309\n",
      "[690]\ttrain-rmse:0.767518\tvalid-rmse:0.767518\n",
      "[700]\ttrain-rmse:0.766554\tvalid-rmse:0.766554\n",
      "[710]\ttrain-rmse:0.766104\tvalid-rmse:0.766104\n",
      "[720]\ttrain-rmse:0.765541\tvalid-rmse:0.765541\n",
      "[730]\ttrain-rmse:0.765004\tvalid-rmse:0.765004\n",
      "[740]\ttrain-rmse:0.764359\tvalid-rmse:0.764359\n",
      "[750]\ttrain-rmse:0.763718\tvalid-rmse:0.763718\n",
      "[760]\ttrain-rmse:0.763397\tvalid-rmse:0.763397\n",
      "[761]\ttrain-rmse:0.763205\tvalid-rmse:0.763205\n",
      "CPU times: user 8min 5s, sys: 48.3 s, total: 8min 53s\n",
      "Wall time: 2min 23s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "dtrain = xgb.DMatrix(X,y)\n",
    "xgb_full = xgb.train(xgb_params1, dtrain,381*2, [(dtrain, 'train'), (dtrain, 'valid')],\n",
    "                      verbose_eval=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ID</th>\n",
       "      <th>item_cnt_month</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>0.635656</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>0.277100</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>1.193923</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>0.362728</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>2.800084</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>5</td>\n",
       "      <td>0.511515</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>6</td>\n",
       "      <td>0.471175</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>7</td>\n",
       "      <td>0.187598</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>8</td>\n",
       "      <td>0.877072</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>9</td>\n",
       "      <td>0.254353</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>10</td>\n",
       "      <td>2.595156</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>11</td>\n",
       "      <td>0.266830</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>12</td>\n",
       "      <td>0.096741</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>13</td>\n",
       "      <td>0.541164</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>14</td>\n",
       "      <td>1.591287</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>15</td>\n",
       "      <td>2.449679</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>16</td>\n",
       "      <td>0.028337</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>17</td>\n",
       "      <td>0.201622</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>18</td>\n",
       "      <td>1.064272</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>19</td>\n",
       "      <td>0.104389</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>20</td>\n",
       "      <td>0.453105</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>21</td>\n",
       "      <td>0.249691</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>22</td>\n",
       "      <td>1.299425</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>23</td>\n",
       "      <td>0.522313</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>24</td>\n",
       "      <td>1.443442</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>25</td>\n",
       "      <td>0.667443</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>26</td>\n",
       "      <td>0.457253</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>27</td>\n",
       "      <td>0.532207</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>28</td>\n",
       "      <td>0.918968</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>29</td>\n",
       "      <td>4.414254</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214170</th>\n",
       "      <td>214170</td>\n",
       "      <td>0.004861</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214171</th>\n",
       "      <td>214171</td>\n",
       "      <td>0.176108</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214172</th>\n",
       "      <td>214172</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214173</th>\n",
       "      <td>214173</td>\n",
       "      <td>0.029360</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214174</th>\n",
       "      <td>214174</td>\n",
       "      <td>0.033133</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214175</th>\n",
       "      <td>214175</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214176</th>\n",
       "      <td>214176</td>\n",
       "      <td>0.109115</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214177</th>\n",
       "      <td>214177</td>\n",
       "      <td>0.018310</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214178</th>\n",
       "      <td>214178</td>\n",
       "      <td>0.058974</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214179</th>\n",
       "      <td>214179</td>\n",
       "      <td>0.867412</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214180</th>\n",
       "      <td>214180</td>\n",
       "      <td>0.177878</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214181</th>\n",
       "      <td>214181</td>\n",
       "      <td>0.092265</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214182</th>\n",
       "      <td>214182</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214183</th>\n",
       "      <td>214183</td>\n",
       "      <td>0.116432</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214184</th>\n",
       "      <td>214184</td>\n",
       "      <td>0.013891</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214185</th>\n",
       "      <td>214185</td>\n",
       "      <td>0.024176</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214186</th>\n",
       "      <td>214186</td>\n",
       "      <td>0.043624</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214187</th>\n",
       "      <td>214187</td>\n",
       "      <td>0.103891</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214188</th>\n",
       "      <td>214188</td>\n",
       "      <td>0.021398</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214189</th>\n",
       "      <td>214189</td>\n",
       "      <td>0.061439</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214190</th>\n",
       "      <td>214190</td>\n",
       "      <td>0.027627</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214191</th>\n",
       "      <td>214191</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214192</th>\n",
       "      <td>214192</td>\n",
       "      <td>0.002852</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214193</th>\n",
       "      <td>214193</td>\n",
       "      <td>0.055628</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214194</th>\n",
       "      <td>214194</td>\n",
       "      <td>0.026542</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214195</th>\n",
       "      <td>214195</td>\n",
       "      <td>0.083775</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214196</th>\n",
       "      <td>214196</td>\n",
       "      <td>0.028974</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214197</th>\n",
       "      <td>214197</td>\n",
       "      <td>0.037754</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214198</th>\n",
       "      <td>214198</td>\n",
       "      <td>0.014154</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>214199</th>\n",
       "      <td>214199</td>\n",
       "      <td>0.015572</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>214200 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            ID  item_cnt_month\n",
       "0            0        0.635656\n",
       "1            1        0.277100\n",
       "2            2        1.193923\n",
       "3            3        0.362728\n",
       "4            4        2.800084\n",
       "5            5        0.511515\n",
       "6            6        0.471175\n",
       "7            7        0.187598\n",
       "8            8        0.877072\n",
       "9            9        0.254353\n",
       "10          10        2.595156\n",
       "11          11        0.266830\n",
       "12          12        0.096741\n",
       "13          13        0.541164\n",
       "14          14        1.591287\n",
       "15          15        2.449679\n",
       "16          16        0.028337\n",
       "17          17        0.201622\n",
       "18          18        1.064272\n",
       "19          19        0.104389\n",
       "20          20        0.453105\n",
       "21          21        0.249691\n",
       "22          22        1.299425\n",
       "23          23        0.522313\n",
       "24          24        1.443442\n",
       "25          25        0.667443\n",
       "26          26        0.457253\n",
       "27          27        0.532207\n",
       "28          28        0.918968\n",
       "29          29        4.414254\n",
       "...        ...             ...\n",
       "214170  214170        0.004861\n",
       "214171  214171        0.176108\n",
       "214172  214172        0.000000\n",
       "214173  214173        0.029360\n",
       "214174  214174        0.033133\n",
       "214175  214175        0.000000\n",
       "214176  214176        0.109115\n",
       "214177  214177        0.018310\n",
       "214178  214178        0.058974\n",
       "214179  214179        0.867412\n",
       "214180  214180        0.177878\n",
       "214181  214181        0.092265\n",
       "214182  214182        0.000000\n",
       "214183  214183        0.116432\n",
       "214184  214184        0.013891\n",
       "214185  214185        0.024176\n",
       "214186  214186        0.043624\n",
       "214187  214187        0.103891\n",
       "214188  214188        0.021398\n",
       "214189  214189        0.061439\n",
       "214190  214190        0.027627\n",
       "214191  214191        0.000000\n",
       "214192  214192        0.002852\n",
       "214193  214193        0.055628\n",
       "214194  214194        0.026542\n",
       "214195  214195        0.083775\n",
       "214196  214196        0.028974\n",
       "214197  214197        0.037754\n",
       "214198  214198        0.014154\n",
       "214199  214199        0.015572\n",
       "\n",
       "[214200 rows x 2 columns]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test = pd.read_csv(os.path.join(data_path, 'test_lag.csv'))\n",
    "\n",
    "test.drop(['ID','item_name','date_block_num'],axis=1,inplace=True)\n",
    "dtest= xgb.DMatrix(test)\n",
    "test_pred = xgb_full.predict(dtest,381*2)\n",
    "get_submission(test_pred,'tuned_xgb_basicfeatures_6folds_8126');\n",
    "xgb_full.__del__()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Get oof prediction + test prediction\n",
    "\n",
    "# Grow tree max_leaf_node instead of max depth"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "all_data = get_all_data(data_path,'new_sales_lag_after12.pickle')\n",
    "\n",
    "X,y = get_X_y(all_data,33)\n",
    "X.drop('date_block_num',axis=1,inplace=True)\n",
    "\n",
    "cv = get_cv_idxs(all_data,28,33)\n",
    "xgb_params2={'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.6000000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training with params: \n",
      "{'colsample_bytree': 0.8500000000000001, 'eval_metric': 'rmse', 'gamma': 0.6000000000000001, 'learning_rate': 0.03, 'max_leaf_nodes': 113, 'min_child_weight': 122, 'objective': 'reg:linear', 'seed': 1204, 'subsample': 0.75, 'tree_method': 'gpu_hist'}\n",
      "Train RMSE: 0.7855304187054425. Val RMSE: 0.8357879290953042\n",
      "Best iteration: 1217\n",
      "Train RMSE: 0.8043950114098816. Val RMSE: 0.7744203738019145\n",
      "Best iteration: 727\n",
      "Train RMSE: 0.7686397209773999. Val RMSE: 0.7013283345129842\n",
      "Best iteration: 1904\n",
      "Train RMSE: 0.7641108195310308. Val RMSE: 0.7641784470779217\n",
      "Best iteration: 1962\n",
      "Train RMSE: 0.7842155628857831. Val RMSE: 0.8876253106745638\n",
      "Best iteration: 1041\n",
      "Train RMSE: 0.8177810847159686. Val RMSE: 0.9364247831983158\n",
      "Best iteration: 404\n",
      "n validation fold results: [0.8357879290953042, 0.7744203738019145, 0.7013283345129842, 0.7641784470779217, 0.8876253106745638, 0.9364247831983158]\n",
      "Average iterations: 1209\n",
      "Mean Cross Validation RMSE: 0.8166275297268341\n",
      "\n"
     ]
    }
   ],
   "source": [
    "oof_train,_=timeseries_cv('xgb',X,y,xgb_params2,cv,root_mean_squared_error,200,True)\n",
    "oof_df = pd.Series(oof_train)\n",
    "oof_df.to_pickle(data_path+'oof/xgb2.pickle')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\n",
    "%%time\n",
    "dtrain = xgb.DMatrix(X,y)\n",
    "xgb_full = xgb.train(xgb_params2, dtrain,1109, [(dtrain, 'train'), (dtrain, 'valid')],\n",
    "                      verbose_eval=10)\n",
    "\n",
    "test = pd.read_csv(os.path.join(data_path, 'test_lag.csv'))\n",
    "\n",
    "test.drop(['ID','item_name','date_block_num'],axis=1,inplace=True)\n",
    "dtest= xgb.DMatrix(test)\n",
    "test_pred = xgb_full.predict(dtest,1109)\n",
    "get_submission(test_pred,'tuned_xgb_basicfeatures_6folds_8136');\n",
    "xgb_full.__del__()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
