{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import torch\n",
    "import math\n",
    "import matplotlib.pyplot as plt\n",
    "from torch import nn\n",
    "from sklearn.ensemble import VotingRegressor\n",
    "import lightgbm as lgb\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.base import BaseEstimator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 风机数据文件名列表，后续有新的风机数据只需要添加到列表中\n",
    "# 列表内还是列表，表示同一台风机不同天数的数据\n",
    "filenames = []\n",
    "\n",
    "for i in range(1, 16):\n",
    "    if i == 9:\n",
    "        continue\n",
    "    turbine_data = [\n",
    "        '../data/WuXingLing_WuXingLing0' + '{:02d}'.format(i) + '_20231105000000_20231106000000_SCADA温数据.csv', \n",
    "        '../data/WuXingLing_WuXingLing0' + '{:02d}'.format(i) + '_20231106000000_20231107000000_SCADA温数据.csv'\n",
    "    ]\n",
    "    \n",
    "    filenames.append(turbine_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 风机数据列表，由每个风机数据的DataFrame组成\n",
    "df_data = []\n",
    "for turbines in filenames:\n",
    "    data = pd.DataFrame({})\n",
    "    for f in turbines:\n",
    "        data = pd.concat((data, pd.read_csv(f)), ignore_index=True, axis=0)\n",
    "    df_data.append(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def wind_uv(series):\n",
    "    alpha = math.radians(series['风向2'])\n",
    "    x = series['短滤波风速']\n",
    "    return x * math.cos(alpha), x * math.sin(alpha)\n",
    "\n",
    "\n",
    "for i in range(len(df_data)):\n",
    "    df_data[i]['time'] = df_data[i]['时间']\n",
    "    turbine_id = df_data[i]['设备'][0]\n",
    "    # 生成两个方向的风向并命名为“<设备名> + u”和“<设备名> + v”\n",
    "    df_data[i][turbine_id + '_u'], df_data[i][turbine_id + '_v'] = zip(*df_data[i].apply(wind_uv, axis=1))\n",
    "    df_data[i][turbine_id + '_speed'] = df_data[i]['短滤波风速']\n",
    "    df_data[i][turbine_id + '_direct'] = df_data[i]['风向2']\n",
    "    df_data[i] = df_data[i].drop(['设备', '时间', '风向1', '风向2', '短滤波风速'], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = df_data[0]\n",
    "for i in range(1, len(df_data)):\n",
    "    data = pd.merge(data, df_data[i], on=['time'], how='inner')\n",
    "\n",
    "train_data = data.drop(['time', 'WuXingLing001_speed'], axis=1).to_numpy().astype(np.float32)\n",
    "target_data = data['WuXingLing001_speed'].to_numpy().astype(np.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "features = 55\n",
    "device = 'cuda'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GRU_Estimator(BaseEstimator):\n",
    "    def __init__(self, seqlen=None, hidden_size=None, num_layers=None, epoch=None, lr=None):\n",
    "        super(GRU_Estimator, self).__init__()\n",
    "        self.seqlen = seqlen\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "        self.epoch = epoch\n",
    "        self.lr = lr\n",
    "        self.gru = None\n",
    "        self.fc = None\n",
    "        self.batch=128\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        self.gru = nn.GRU(\n",
    "            input_size=features, \n",
    "            hidden_size=self.hidden_size, \n",
    "            num_layers=self.num_layers, \n",
    "            batch_first=True\n",
    "        ).to(device)\n",
    "        self.fc = nn.Linear(self.hidden_size, 1).to(device)\n",
    "        self.optimizer = torch.optim.Adam(self.gru.parameters(), self.lr)\n",
    "        self.criterion = nn.MSELoss()\n",
    "        \n",
    "        for i in range(self.epoch):\n",
    "            loss = self.train_epoch(X, y)\n",
    "            print(f'training {i} epoch, loss is {loss}')\n",
    "        return self\n",
    "\n",
    "    def train_epoch(self, X, y):\n",
    "        total_loss = 0.0\n",
    "        trainlen = len(X) - self.seqlen\n",
    "        trainlen = trainlen // self.batch * self.batch\n",
    "        for i in range(0, trainlen, self.batch):\n",
    "            inputs = torch.stack(\n",
    "                [torch.tensor(X[j:j + self.seqlen], dtype=torch.float32).to(device) for j in range(i, i + self.batch)]\n",
    "            )\n",
    "            targets = torch.stack(\n",
    "                [torch.tensor(y[j + self.seqlen:j + self.seqlen + 1], dtype=torch.float32).to(device) for j in range(i, i + self.batch)]\n",
    "            )\n",
    "            total_loss += self.train_step(inputs, targets)\n",
    "        return total_loss / (trainlen // self.batch)\n",
    "\n",
    "    def train_step(self, inputs, targets):\n",
    "        self.gru.train()\n",
    "        self.optimizer.zero_grad()\n",
    "        outputs = self.fc(self.gru(inputs)[0][:, -1:, :].squeeze(1))\n",
    "        loss = self.criterion(outputs, targets)\n",
    "        loss.backward()\n",
    "        self.optimizer.step()\n",
    "        return loss.item()\n",
    "\n",
    "    def predict(self, x):\n",
    "        return self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
    "\n",
    "    def score(self, X, y):\n",
    "        self.gru.eval()\n",
    "        loss = 0.0\n",
    "        testlen = len(X) - self.seqlen\n",
    "        testlen = testlen // self.batch * self.batch\n",
    "        for i in range(0, testlen, self.batch):\n",
    "            x = torch.stack(\n",
    "                [torch.tensor(X[j:j + self.seqlen], dtype=torch.float32).to(device) for j in range(i, i + self.batch)]\n",
    "            )\n",
    "            y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
    "            y_true = torch.stack(\n",
    "                [torch.tensor(y[j + self.seqlen:j + self.seqlen + 1], dtype=torch.float32).to(device) for j in range(i, i + self.batch)]\n",
    "            )\n",
    "            loss += self.criterion(y_hat, y_true)\n",
    "        return loss / (testlen // self.batch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "param_grid = {\n",
    "    'seqlen': [64, 96, 128, 144], \n",
    "    'hidden_size': [128, 256, 512], \n",
    "    'num_layers': [3, 5, 8], \n",
    "    'epoch': [10], \n",
    "    'lr': [0.1, 0.03, 0.001], \n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = GRU_Estimator()\n",
    "gridsearch = GridSearchCV(estimator=model, param_grid=param_grid, cv=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 40.09054361593299\n",
      "training 1 epoch, loss is 37.845312087136136\n",
      "training 2 epoch, loss is 50.03971739416218\n",
      "training 3 epoch, loss is 44.85464716178727\n",
      "training 4 epoch, loss is 43.252356386768355\n",
      "training 5 epoch, loss is 43.8420915273336\n",
      "training 6 epoch, loss is 42.90032862785662\n",
      "training 7 epoch, loss is 42.903182076063636\n",
      "training 8 epoch, loss is 50.8339903479337\n",
      "training 9 epoch, loss is 55.07689156529192\n",
      "training 0 epoch, loss is 44.48158091916882\n",
      "training 1 epoch, loss is 85.81298199605517\n",
      "training 2 epoch, loss is 48.10644662159133\n",
      "training 3 epoch, loss is 29.52238175206616\n",
      "training 4 epoch, loss is 40.41278259212964\n",
      "training 5 epoch, loss is 31.82145579022187\n",
      "training 6 epoch, loss is 25.10481657238141\n",
      "training 7 epoch, loss is 21.74890771174378\n",
      "training 8 epoch, loss is 20.10343468133968\n",
      "training 9 epoch, loss is 18.66844389720292\n",
      "training 0 epoch, loss is 38.169027348200984\n",
      "training 1 epoch, loss is 45.8356801091475\n",
      "training 2 epoch, loss is 50.14110055081657\n",
      "training 3 epoch, loss is 54.310780951150804\n",
      "training 4 epoch, loss is 55.691710211093415\n",
      "training 5 epoch, loss is 55.37957669973639\n",
      "training 6 epoch, loss is 56.7152833574692\n",
      "training 7 epoch, loss is 56.339659865470246\n",
      "training 8 epoch, loss is 56.24095058624663\n",
      "training 9 epoch, loss is 56.35264305976007\n",
      "training 0 epoch, loss is 9.841822917432212\n",
      "training 1 epoch, loss is 11.9073843535268\n",
      "training 2 epoch, loss is 16.44933211018583\n",
      "training 3 epoch, loss is 14.69756808850461\n",
      "training 4 epoch, loss is 13.813096586990692\n",
      "training 5 epoch, loss is 13.435162012329883\n",
      "training 6 epoch, loss is 12.876513358459805\n",
      "training 7 epoch, loss is 12.753262708897672\n",
      "training 8 epoch, loss is 12.751576865782813\n",
      "training 9 epoch, loss is 12.93807732959441\n",
      "training 0 epoch, loss is 34.53865911484384\n",
      "training 1 epoch, loss is 35.68642893216907\n",
      "training 2 epoch, loss is 45.20461447492728\n",
      "training 3 epoch, loss is 49.95939076285069\n",
      "training 4 epoch, loss is 53.19474245541527\n",
      "training 5 epoch, loss is 49.37456149534938\n",
      "training 6 epoch, loss is 41.15012583043703\n",
      "training 7 epoch, loss is 43.84696137367883\n",
      "training 8 epoch, loss is 47.67200914257447\n",
      "training 9 epoch, loss is 48.361102638815915\n",
      "training 0 epoch, loss is 14.4956928216341\n",
      "training 1 epoch, loss is 21.671346858740737\n",
      "training 2 epoch, loss is 33.261647501755895\n",
      "training 3 epoch, loss is 26.389102315557817\n",
      "training 4 epoch, loss is 24.202370885306365\n",
      "training 5 epoch, loss is 23.570871859921546\n",
      "training 6 epoch, loss is 22.35219989847447\n",
      "training 7 epoch, loss is 21.13916170964078\n",
      "training 8 epoch, loss is 20.52419623102473\n",
      "training 9 epoch, loss is 18.393819548101206\n",
      "training 0 epoch, loss is 33.68916125013251\n",
      "training 1 epoch, loss is 49.02451579429951\n",
      "training 2 epoch, loss is 48.832921918730676\n",
      "training 3 epoch, loss is 47.699856039374666\n",
      "training 4 epoch, loss is 46.12427987973102\n",
      "training 5 epoch, loss is 41.74801783321251\n",
      "training 6 epoch, loss is 43.34582952001474\n",
      "training 7 epoch, loss is 42.793895268909644\n",
      "training 8 epoch, loss is 42.45516039079047\n",
      "training 9 epoch, loss is 43.03763157465837\n",
      "training 0 epoch, loss is 17.59206179523893\n",
      "training 1 epoch, loss is 50.52868907412661\n",
      "training 2 epoch, loss is 51.437341454124734\n",
      "training 3 epoch, loss is 58.78992939420476\n",
      "training 4 epoch, loss is 58.71371605421069\n",
      "training 5 epoch, loss is 58.49708601978523\n",
      "training 6 epoch, loss is 58.36491702322088\n",
      "training 7 epoch, loss is 58.306915434827424\n",
      "training 8 epoch, loss is 58.38976886120098\n",
      "training 9 epoch, loss is 58.401524359545775\n",
      "training 0 epoch, loss is 87.39870859644975\n",
      "training 1 epoch, loss is 74.61160354031034\n",
      "training 2 epoch, loss is 74.29473256159474\n",
      "training 3 epoch, loss is 73.28244078240029\n",
      "training 4 epoch, loss is 69.54330333694816\n",
      "training 5 epoch, loss is 55.349601881440385\n",
      "training 6 epoch, loss is 55.51056930652712\n",
      "training 7 epoch, loss is 55.76141986407848\n",
      "training 8 epoch, loss is 55.62516300999269\n",
      "training 9 epoch, loss is 54.02389411243558\n",
      "training 0 epoch, loss is 14.801103124278942\n",
      "training 1 epoch, loss is 30.11405335017411\n",
      "training 2 epoch, loss is 50.27028728734138\n",
      "training 3 epoch, loss is 59.236889093495265\n",
      "training 4 epoch, loss is 56.264175984201515\n",
      "training 5 epoch, loss is 53.73444523125091\n",
      "training 6 epoch, loss is 48.93418221834505\n",
      "training 7 epoch, loss is 46.38446492115892\n",
      "training 8 epoch, loss is 44.660375254798005\n",
      "training 9 epoch, loss is 43.23779347494731\n",
      "training 0 epoch, loss is 32.88796930264914\n",
      "training 1 epoch, loss is 37.164791688787304\n",
      "training 2 epoch, loss is 37.407617434041256\n",
      "training 3 epoch, loss is 36.67937089633252\n",
      "training 4 epoch, loss is 36.90429583587615\n",
      "training 5 epoch, loss is 37.79220531794632\n",
      "training 6 epoch, loss is 38.027011934325145\n",
      "training 7 epoch, loss is 38.369208703077334\n",
      "training 8 epoch, loss is 38.5331805332036\n",
      "training 9 epoch, loss is 45.95818174320884\n",
      "training 0 epoch, loss is 34.30042822555105\n",
      "training 1 epoch, loss is 51.885363374336535\n",
      "training 2 epoch, loss is 44.997759517293304\n",
      "training 3 epoch, loss is 33.068175610873396\n",
      "training 4 epoch, loss is 27.04597549516299\n",
      "training 5 epoch, loss is 25.853756104405274\n",
      "training 6 epoch, loss is 24.885401056985472\n",
      "training 7 epoch, loss is 23.834409214445675\n",
      "training 8 epoch, loss is 22.279490022303797\n",
      "training 9 epoch, loss is 21.483596006380697\n",
      "training 0 epoch, loss is 89.00904426907219\n",
      "training 1 epoch, loss is nan\n",
      "training 2 epoch, loss is nan\n",
      "training 3 epoch, loss is nan\n",
      "training 4 epoch, loss is nan\n",
      "training 5 epoch, loss is nan\n",
      "training 6 epoch, loss is nan\n",
      "training 7 epoch, loss is nan\n",
      "training 8 epoch, loss is nan\n",
      "training 9 epoch, loss is nan\n",
      "training 0 epoch, loss is 12.146616539577968\n",
      "training 1 epoch, loss is 12.356364883349805\n",
      "training 2 epoch, loss is 12.34454366760964\n",
      "training 3 epoch, loss is 12.43570636955673\n",
      "training 4 epoch, loss is 13.059536239436278\n",
      "training 5 epoch, loss is 12.877531196325133\n",
      "training 6 epoch, loss is 12.856764134130156\n",
      "training 7 epoch, loss is 12.927725961249465\n",
      "training 8 epoch, loss is 13.12193307812251\n",
      "training 9 epoch, loss is 12.862969768883035\n",
      "training 0 epoch, loss is 98.48216948874106\n",
      "training 1 epoch, loss is 63.1654295125121\n",
      "training 2 epoch, loss is 46.639558032066475\n",
      "training 3 epoch, loss is 45.73065973353333\n",
      "training 4 epoch, loss is 42.46689652249682\n",
      "training 5 epoch, loss is 43.308421627307006\n",
      "training 6 epoch, loss is 46.79581759276663\n",
      "training 7 epoch, loss is 47.13421820544736\n",
      "training 8 epoch, loss is 47.809857759508475\n",
      "training 9 epoch, loss is 48.00566328997775\n",
      "training 0 epoch, loss is 19.37635629348468\n",
      "training 1 epoch, loss is 47.11150517647901\n",
      "training 2 epoch, loss is 47.850788239738385\n",
      "training 3 epoch, loss is 46.63048836841271\n",
      "training 4 epoch, loss is 43.559801133213924\n",
      "training 5 epoch, loss is 45.216164768893506\n",
      "training 6 epoch, loss is 51.906983734771366\n",
      "training 7 epoch, loss is 47.83699107736925\n",
      "training 8 epoch, loss is 47.16357353821198\n",
      "training 9 epoch, loss is 51.37107768668108\n",
      "training 0 epoch, loss is 73.21976698857652\n",
      "training 1 epoch, loss is 58.92422620673562\n",
      "training 2 epoch, loss is 57.72658589077721\n",
      "training 3 epoch, loss is 57.51843849607144\n",
      "training 4 epoch, loss is 57.518436313161864\n",
      "training 5 epoch, loss is 57.518432476599244\n",
      "training 6 epoch, loss is 57.518432080281414\n",
      "training 7 epoch, loss is 57.51843145410102\n",
      "training 8 epoch, loss is 57.51843316817673\n",
      "training 9 epoch, loss is 57.51843365129271\n",
      "training 0 epoch, loss is 15.768454784919848\n",
      "training 1 epoch, loss is 40.32026979229924\n",
      "training 2 epoch, loss is 55.30793874341585\n",
      "training 3 epoch, loss is 55.46128581610207\n",
      "training 4 epoch, loss is 56.639104666271265\n",
      "training 5 epoch, loss is 56.12432324072727\n",
      "training 6 epoch, loss is 55.91667158879583\n",
      "training 7 epoch, loss is 58.3464650789425\n",
      "training 8 epoch, loss is 61.1869845920925\n",
      "training 9 epoch, loss is 60.633309132975\n",
      "training 0 epoch, loss is 45.82740752558149\n",
      "training 1 epoch, loss is 83.56684420246752\n",
      "training 2 epoch, loss is 83.56678778569847\n",
      "training 3 epoch, loss is 83.56678778499099\n",
      "training 4 epoch, loss is 83.56678778499099\n",
      "training 5 epoch, loss is 83.56678778499099\n",
      "training 6 epoch, loss is 83.56678778499099\n",
      "training 7 epoch, loss is 83.56678778499099\n",
      "training 8 epoch, loss is 83.56678778499099\n",
      "training 9 epoch, loss is 83.56678778499099\n",
      "training 0 epoch, loss is 58.53767562867626\n",
      "training 1 epoch, loss is 59.53091164088037\n",
      "training 2 epoch, loss is 66.67822901502207\n",
      "training 3 epoch, loss is 72.22711503682575\n",
      "training 4 epoch, loss is 70.12542142075672\n",
      "training 5 epoch, loss is 67.27125008240651\n",
      "training 6 epoch, loss is 67.22659020890822\n",
      "training 7 epoch, loss is 67.22353383412347\n",
      "training 8 epoch, loss is 67.22083974096824\n",
      "training 9 epoch, loss is 67.21694210658087\n",
      "training 0 epoch, loss is 40.00732119713056\n",
      "training 1 epoch, loss is 43.921619048325525\n",
      "training 2 epoch, loss is 50.10813585049763\n",
      "training 3 epoch, loss is 45.53009664207608\n",
      "training 4 epoch, loss is 45.469123868321454\n",
      "training 5 epoch, loss is 45.46935781031758\n",
      "training 6 epoch, loss is 45.51449220446256\n",
      "training 7 epoch, loss is 45.79944018732193\n",
      "training 8 epoch, loss is 46.26075480984474\n",
      "training 9 epoch, loss is 47.58792039316468\n",
      "training 0 epoch, loss is 28.49157475573992\n",
      "training 1 epoch, loss is 72.79807093447677\n",
      "training 2 epoch, loss is 65.6739021451254\n",
      "training 3 epoch, loss is 68.0148884648739\n",
      "training 4 epoch, loss is 71.42658485503155\n",
      "training 5 epoch, loss is 66.83818642604952\n",
      "training 6 epoch, loss is 64.48673307011319\n",
      "training 7 epoch, loss is 67.81407991573435\n",
      "training 8 epoch, loss is 70.6555330576458\n",
      "training 9 epoch, loss is 63.62126753521249\n",
      "training 0 epoch, loss is 38.84666633342474\n",
      "training 1 epoch, loss is 59.322516531095374\n",
      "training 2 epoch, loss is 63.07943713276427\n",
      "training 3 epoch, loss is 70.59604128056039\n",
      "training 4 epoch, loss is 70.50722420207858\n",
      "training 5 epoch, loss is 70.50699016492797\n",
      "training 6 epoch, loss is 70.50680286145007\n",
      "training 7 epoch, loss is 70.5066219578659\n",
      "training 8 epoch, loss is 70.5065427029403\n",
      "training 9 epoch, loss is 70.56456151264781\n",
      "training 0 epoch, loss is 33.525728050171\n",
      "training 1 epoch, loss is 56.70272376880958\n",
      "training 2 epoch, loss is 54.901793399147614\n",
      "training 3 epoch, loss is 53.32611184098923\n",
      "training 4 epoch, loss is 51.074407671039914\n",
      "training 5 epoch, loss is 56.40447227667912\n",
      "training 6 epoch, loss is 56.058372086614234\n",
      "training 7 epoch, loss is 55.3291322281598\n",
      "training 8 epoch, loss is 55.07384945131163\n",
      "training 9 epoch, loss is 54.776816258990216\n",
      "training 0 epoch, loss is 36.16471710423068\n",
      "training 1 epoch, loss is 42.391869017539285\n",
      "training 2 epoch, loss is 44.93272916503936\n",
      "training 3 epoch, loss is 50.71899144004378\n",
      "training 4 epoch, loss is 47.12041376951186\n",
      "training 5 epoch, loss is 44.67061914289121\n",
      "training 6 epoch, loss is 45.44630344822251\n",
      "training 7 epoch, loss is 48.483185178912535\n",
      "training 8 epoch, loss is 48.28362636842961\n",
      "training 9 epoch, loss is 48.071190108973475\n",
      "training 0 epoch, loss is 11.61575128988139\n",
      "training 1 epoch, loss is 11.527144604337675\n",
      "training 2 epoch, loss is 11.93881310526978\n",
      "training 3 epoch, loss is 11.628595158070857\n",
      "training 4 epoch, loss is 10.84985872309445\n",
      "training 5 epoch, loss is 10.76923611645175\n",
      "training 6 epoch, loss is 10.928215492771004\n",
      "training 7 epoch, loss is 11.215066810544768\n",
      "training 8 epoch, loss is 10.927734278429863\n",
      "training 9 epoch, loss is 11.35920460527702\n",
      "training 0 epoch, loss is 33.29273773703271\n",
      "training 1 epoch, loss is 40.436098890196675\n",
      "training 2 epoch, loss is 44.50744841077561\n",
      "training 3 epoch, loss is 51.56204305080703\n",
      "training 4 epoch, loss is 51.11496499832996\n",
      "training 5 epoch, loss is 50.98304459121001\n",
      "training 6 epoch, loss is 48.04836065974363\n",
      "training 7 epoch, loss is 45.73418285282058\n",
      "training 8 epoch, loss is 49.54772147668929\n",
      "training 9 epoch, loss is 46.079322835325485\n",
      "training 0 epoch, loss is 11.571219168028598\n",
      "training 1 epoch, loss is 11.18217403890471\n",
      "training 2 epoch, loss is 12.020246392187804\n",
      "training 3 epoch, loss is 11.904196234149111\n",
      "training 4 epoch, loss is 11.689579188713157\n",
      "training 5 epoch, loss is 11.567361577505174\n",
      "training 6 epoch, loss is 11.747696327360607\n",
      "training 7 epoch, loss is 11.97826946904822\n",
      "training 8 epoch, loss is 11.799856385066883\n",
      "training 9 epoch, loss is 11.896158945810281\n",
      "training 0 epoch, loss is 34.03026052597546\n",
      "training 1 epoch, loss is 35.504407765139284\n",
      "training 2 epoch, loss is 37.88692897981591\n",
      "training 3 epoch, loss is 36.06188975219291\n",
      "training 4 epoch, loss is 37.365484850917795\n",
      "training 5 epoch, loss is 42.564174729479525\n",
      "training 6 epoch, loss is 35.522989164916275\n",
      "training 7 epoch, loss is 37.35419915837598\n",
      "training 8 epoch, loss is 43.19988914050759\n",
      "training 9 epoch, loss is 45.7728697209666\n",
      "training 0 epoch, loss is 11.892830405660659\n",
      "training 1 epoch, loss is 12.45778977500623\n",
      "training 2 epoch, loss is 11.420490214573523\n",
      "training 3 epoch, loss is 12.786762869775915\n",
      "training 4 epoch, loss is 11.277624066288995\n",
      "training 5 epoch, loss is 10.898309314043125\n",
      "training 6 epoch, loss is 10.760442773316841\n",
      "training 7 epoch, loss is 10.687889028506392\n",
      "training 8 epoch, loss is 10.68409663012102\n",
      "training 9 epoch, loss is 10.595367573710153\n",
      "training 0 epoch, loss is 35.718762754452975\n",
      "training 1 epoch, loss is 38.35531353877434\n",
      "training 2 epoch, loss is 38.10594818115589\n",
      "training 3 epoch, loss is 41.474992012552484\n",
      "training 4 epoch, loss is 46.85899734875688\n",
      "training 5 epoch, loss is 48.18780622825049\n",
      "training 6 epoch, loss is 48.18887334711086\n",
      "training 7 epoch, loss is 41.22322856947049\n",
      "training 8 epoch, loss is 45.65375798280845\n",
      "training 9 epoch, loss is 46.63395408615138\n",
      "training 0 epoch, loss is 9.753202872215196\n",
      "training 1 epoch, loss is 10.992190796499974\n",
      "training 2 epoch, loss is 15.598077377838596\n",
      "training 3 epoch, loss is 25.04943886981588\n",
      "training 4 epoch, loss is 39.56153733769285\n",
      "training 5 epoch, loss is 44.35277809962121\n",
      "training 6 epoch, loss is 46.64705830484762\n",
      "training 7 epoch, loss is 46.7378724453885\n",
      "training 8 epoch, loss is 46.36726388676205\n",
      "training 9 epoch, loss is 45.93931939662123\n",
      "training 0 epoch, loss is 35.288230038096\n",
      "training 1 epoch, loss is 38.44172753712013\n",
      "training 2 epoch, loss is 41.15209570593402\n",
      "training 3 epoch, loss is 43.62359730300192\n",
      "training 4 epoch, loss is 42.61575214956122\n",
      "training 5 epoch, loss is 40.91248057596938\n",
      "training 6 epoch, loss is 52.551562507512486\n",
      "training 7 epoch, loss is 54.24436910271379\n",
      "training 8 epoch, loss is 48.02458077322039\n",
      "training 9 epoch, loss is 42.4318029273423\n",
      "training 0 epoch, loss is 9.51878019782662\n",
      "training 1 epoch, loss is 9.295083961739738\n",
      "training 2 epoch, loss is 10.05852609841866\n",
      "training 3 epoch, loss is 11.686444354926177\n",
      "training 4 epoch, loss is 12.173183180579711\n",
      "training 5 epoch, loss is 11.99174084810634\n",
      "training 6 epoch, loss is 11.967507349266407\n",
      "training 7 epoch, loss is 13.346807478808243\n",
      "training 8 epoch, loss is 13.327998481348201\n",
      "training 9 epoch, loss is 13.874759598284164\n",
      "training 0 epoch, loss is 33.384200988519794\n",
      "training 1 epoch, loss is 33.334916943833456\n",
      "training 2 epoch, loss is 33.33478636993233\n",
      "training 3 epoch, loss is 33.334749711572414\n",
      "training 4 epoch, loss is 33.33470697855737\n",
      "training 5 epoch, loss is 35.66864546131929\n",
      "training 6 epoch, loss is 46.91032222890536\n",
      "training 7 epoch, loss is 44.70933253647489\n",
      "training 8 epoch, loss is 42.820116489042334\n",
      "training 9 epoch, loss is 36.13095005855893\n",
      "training 0 epoch, loss is 9.972413123914148\n",
      "training 1 epoch, loss is 10.669635371025928\n",
      "training 2 epoch, loss is 12.340201552072955\n",
      "training 3 epoch, loss is 12.451625641236408\n",
      "training 4 epoch, loss is 11.897648924757181\n",
      "training 5 epoch, loss is 11.788095497678224\n",
      "training 6 epoch, loss is 11.584053746836387\n",
      "training 7 epoch, loss is 11.791016483309095\n",
      "training 8 epoch, loss is 12.00085776668451\n",
      "training 9 epoch, loss is 12.239743570888344\n",
      "training 0 epoch, loss is 35.762449658810205\n",
      "training 1 epoch, loss is 49.48908169947852\n",
      "training 2 epoch, loss is 42.632578524863156\n",
      "training 3 epoch, loss is 33.14631850661527\n",
      "training 4 epoch, loss is 33.14627214503483\n",
      "training 5 epoch, loss is 37.716794763810555\n",
      "training 6 epoch, loss is 57.82661805057605\n",
      "training 7 epoch, loss is 57.768298243877176\n",
      "training 8 epoch, loss is 57.768298243877176\n",
      "training 9 epoch, loss is 57.768298243877176\n",
      "training 0 epoch, loss is 10.912745087719813\n",
      "training 1 epoch, loss is 11.043318338338452\n",
      "training 2 epoch, loss is 10.971949752630218\n",
      "training 3 epoch, loss is 11.268570451259349\n",
      "training 4 epoch, loss is 11.275531239288055\n",
      "training 5 epoch, loss is 11.27560842841952\n",
      "training 6 epoch, loss is 12.301924574224815\n",
      "training 7 epoch, loss is 15.202561945673796\n",
      "training 8 epoch, loss is 15.026539105280836\n",
      "training 9 epoch, loss is 14.418237531364229\n",
      "training 0 epoch, loss is 33.126047782378514\n",
      "training 1 epoch, loss is 36.33535646858665\n",
      "training 2 epoch, loss is 37.06787685163175\n",
      "training 3 epoch, loss is 37.067813824812426\n",
      "training 4 epoch, loss is 37.0910417425597\n",
      "training 5 epoch, loss is 37.37251027692303\n",
      "training 6 epoch, loss is 37.37254458404579\n",
      "training 7 epoch, loss is 37.289835879797835\n",
      "training 8 epoch, loss is 37.253239585122685\n",
      "training 9 epoch, loss is 39.112655528941445\n",
      "training 0 epoch, loss is 10.789626773186637\n",
      "training 1 epoch, loss is 10.842855908214647\n",
      "training 2 epoch, loss is 10.16369904671833\n",
      "training 3 epoch, loss is 10.46821195596793\n",
      "training 4 epoch, loss is 10.432696126578556\n",
      "training 5 epoch, loss is 10.541761950344776\n",
      "training 6 epoch, loss is 10.775404076098864\n",
      "training 7 epoch, loss is 10.96864868062832\n",
      "training 8 epoch, loss is 12.028366609289689\n",
      "training 9 epoch, loss is 12.143827165262483\n",
      "training 0 epoch, loss is 122.61017832062718\n",
      "training 1 epoch, loss is 123.0370975969807\n",
      "training 2 epoch, loss is 123.03709721635994\n",
      "training 3 epoch, loss is 123.03709720504037\n",
      "training 4 epoch, loss is 123.03709720504037\n",
      "training 5 epoch, loss is 123.03709720504037\n",
      "training 6 epoch, loss is 123.03709720504037\n",
      "training 7 epoch, loss is 123.03709720504037\n",
      "training 8 epoch, loss is 123.03709720504037\n",
      "training 9 epoch, loss is 123.03709720504037\n",
      "training 0 epoch, loss is 80.33893578696322\n",
      "training 1 epoch, loss is 12.114637531008494\n",
      "training 2 epoch, loss is 12.21036617750805\n",
      "training 3 epoch, loss is 12.210549880836147\n",
      "training 4 epoch, loss is 12.21026799426544\n",
      "training 5 epoch, loss is 12.210255140145737\n",
      "training 6 epoch, loss is 12.210245770863768\n",
      "training 7 epoch, loss is 12.210238649037278\n",
      "training 8 epoch, loss is 12.21023454318149\n",
      "training 9 epoch, loss is 12.210232031020372\n",
      "training 0 epoch, loss is 32.44719740909798\n",
      "training 1 epoch, loss is 41.11648792846444\n",
      "training 2 epoch, loss is 39.19608776622869\n",
      "training 3 epoch, loss is 39.055587135122686\n",
      "training 4 epoch, loss is 39.07808006548209\n",
      "training 5 epoch, loss is 39.222361601645936\n",
      "training 6 epoch, loss is 39.2223615224089\n",
      "training 7 epoch, loss is 39.2223615224089\n",
      "training 8 epoch, loss is 39.22236152099396\n",
      "training 9 epoch, loss is 39.22236148986512\n",
      "training 0 epoch, loss is 69.96364374276395\n",
      "training 1 epoch, loss is 11.688713884417753\n",
      "training 2 epoch, loss is 11.63563167998009\n",
      "training 3 epoch, loss is 11.635625807153895\n",
      "training 4 epoch, loss is 11.579657761905155\n",
      "training 5 epoch, loss is 11.565860623853055\n",
      "training 6 epoch, loss is 11.565860621111595\n",
      "training 7 epoch, loss is 11.565860621200029\n",
      "training 8 epoch, loss is 11.565860621597983\n",
      "training 9 epoch, loss is 11.565859189096738\n",
      "training 0 epoch, loss is 34.74686272964102\n",
      "training 1 epoch, loss is 34.58284936429662\n",
      "training 2 epoch, loss is 34.58283828977131\n",
      "training 3 epoch, loss is 34.5828379175518\n",
      "training 4 epoch, loss is 34.582836770029736\n",
      "training 5 epoch, loss is 34.58283676719984\n",
      "training 6 epoch, loss is 34.58283676578489\n",
      "training 7 epoch, loss is 34.582836764369944\n",
      "training 8 epoch, loss is 34.58283600706375\n",
      "training 9 epoch, loss is 34.58275280271599\n",
      "training 0 epoch, loss is 11.354280382284248\n",
      "training 1 epoch, loss is 12.730434863886158\n",
      "training 2 epoch, loss is 13.175470619741551\n",
      "training 3 epoch, loss is 13.17547671064646\n",
      "training 4 epoch, loss is 13.175517372145851\n",
      "training 5 epoch, loss is 13.175426740740987\n",
      "training 6 epoch, loss is 13.175483367331216\n",
      "training 7 epoch, loss is 12.81996035532755\n",
      "training 8 epoch, loss is 11.392839249676696\n",
      "training 9 epoch, loss is 14.134713558551997\n",
      "training 0 epoch, loss is 37.40956180946788\n",
      "training 1 epoch, loss is 45.13929805179496\n",
      "training 2 epoch, loss is 46.85844616100391\n",
      "training 3 epoch, loss is 46.67974146496488\n",
      "training 4 epoch, loss is 49.75371897552733\n",
      "training 5 epoch, loss is 46.84286515551992\n",
      "training 6 epoch, loss is 44.74814892076454\n",
      "training 7 epoch, loss is 44.057043345332325\n",
      "training 8 epoch, loss is 44.18852805298646\n",
      "training 9 epoch, loss is 44.457797407041156\n",
      "training 0 epoch, loss is 8.30076469762187\n",
      "training 1 epoch, loss is 9.231999857818922\n",
      "training 2 epoch, loss is 9.136682967573714\n",
      "training 3 epoch, loss is 9.12084475920202\n",
      "training 4 epoch, loss is 9.12079059314896\n",
      "training 5 epoch, loss is 9.140382413546185\n",
      "training 6 epoch, loss is 9.107206356671554\n",
      "training 7 epoch, loss is 8.991018782243863\n",
      "training 8 epoch, loss is 8.98076262474503\n",
      "training 9 epoch, loss is 8.837925801892249\n",
      "training 0 epoch, loss is 32.9633761418414\n",
      "training 1 epoch, loss is 32.10602015747691\n",
      "training 2 epoch, loss is 29.896755462473685\n",
      "training 3 epoch, loss is 27.96604254435007\n",
      "training 4 epoch, loss is 27.682253746498233\n",
      "training 5 epoch, loss is 27.518229645021947\n",
      "training 6 epoch, loss is 27.307768416333978\n",
      "training 7 epoch, loss is 27.347932877986295\n",
      "training 8 epoch, loss is 27.19275227474264\n",
      "training 9 epoch, loss is 27.094452045902123\n",
      "training 0 epoch, loss is 10.273531954912386\n",
      "training 1 epoch, loss is 9.689194479819044\n",
      "training 2 epoch, loss is 9.775579049431606\n",
      "training 3 epoch, loss is 9.785713979460365\n",
      "training 4 epoch, loss is 9.683785211283835\n",
      "training 5 epoch, loss is 9.811349289751549\n",
      "training 6 epoch, loss is 9.68222004063538\n",
      "training 7 epoch, loss is 9.682064449751767\n",
      "training 8 epoch, loss is 9.682025711817628\n",
      "training 9 epoch, loss is 9.682007460503975\n",
      "training 0 epoch, loss is 35.30268423643947\n",
      "training 1 epoch, loss is 32.86205656585513\n",
      "training 2 epoch, loss is 31.059647291213363\n",
      "training 3 epoch, loss is 30.666558816807914\n",
      "training 4 epoch, loss is 30.44303062025358\n",
      "training 5 epoch, loss is 30.3591233086869\n",
      "training 6 epoch, loss is 30.30123923407199\n",
      "training 7 epoch, loss is 30.245927429572852\n",
      "training 8 epoch, loss is 30.200465648924208\n",
      "training 9 epoch, loss is 30.13071241794646\n",
      "training 0 epoch, loss is 11.10092384067505\n",
      "training 1 epoch, loss is 10.361196547923527\n",
      "training 2 epoch, loss is 10.360703028849041\n",
      "training 3 epoch, loss is 10.360515496718424\n",
      "training 4 epoch, loss is 10.360419117489094\n",
      "training 5 epoch, loss is 10.360362268674798\n",
      "training 6 epoch, loss is 10.36032622566209\n",
      "training 7 epoch, loss is 10.36030214447915\n",
      "training 8 epoch, loss is 10.36028582441347\n",
      "training 9 epoch, loss is 10.360275127347393\n",
      "training 0 epoch, loss is 34.191751301354934\n",
      "training 1 epoch, loss is 32.78994699746636\n",
      "training 2 epoch, loss is 31.33564573232339\n",
      "training 3 epoch, loss is 32.76780667649002\n",
      "training 4 epoch, loss is 31.595886736312323\n",
      "training 5 epoch, loss is 30.891223979633004\n",
      "training 6 epoch, loss is 28.972755910579988\n",
      "training 7 epoch, loss is 28.86928508525786\n",
      "training 8 epoch, loss is 28.74046753466306\n",
      "training 9 epoch, loss is 28.63197887523061\n",
      "training 0 epoch, loss is 11.293781098895858\n",
      "training 1 epoch, loss is 10.681748221929597\n",
      "training 2 epoch, loss is 10.681564613778797\n",
      "training 3 epoch, loss is 10.68150203137794\n",
      "training 4 epoch, loss is 10.681467406694896\n",
      "training 5 epoch, loss is 10.681444027546782\n",
      "training 6 epoch, loss is 10.681426929812934\n",
      "training 7 epoch, loss is 10.681413277873123\n",
      "training 8 epoch, loss is 10.681402683258057\n",
      "training 9 epoch, loss is 10.68139360814137\n",
      "training 0 epoch, loss is 34.448923202450175\n",
      "training 1 epoch, loss is 31.791650707342207\n",
      "training 2 epoch, loss is 31.414055718975295\n",
      "training 3 epoch, loss is 29.490848381531716\n",
      "training 4 epoch, loss is 29.207700465267514\n",
      "training 5 epoch, loss is 29.14478691694995\n",
      "training 6 epoch, loss is 29.070660972500008\n",
      "training 7 epoch, loss is 29.03461909351696\n",
      "training 8 epoch, loss is 28.97697808853673\n",
      "training 9 epoch, loss is 28.918709184515\n",
      "training 0 epoch, loss is 10.973097201453363\n",
      "training 1 epoch, loss is 10.406365476139056\n",
      "training 2 epoch, loss is 10.405953239854343\n",
      "training 3 epoch, loss is 10.40578687007236\n",
      "training 4 epoch, loss is 10.405697316794834\n",
      "training 5 epoch, loss is 10.405644426794852\n",
      "training 6 epoch, loss is 10.405610374752072\n",
      "training 7 epoch, loss is 10.4055878130434\n",
      "training 8 epoch, loss is 10.405571937029642\n",
      "training 9 epoch, loss is 10.405561707614966\n",
      "training 0 epoch, loss is 33.33060952879555\n",
      "training 1 epoch, loss is 37.20214837210292\n",
      "training 2 epoch, loss is 37.38149517610979\n",
      "training 3 epoch, loss is 36.826482845520054\n",
      "training 4 epoch, loss is 36.39294646034786\n",
      "training 5 epoch, loss is 35.78613129426181\n",
      "training 6 epoch, loss is 35.647840521500086\n",
      "training 7 epoch, loss is 35.57049015845318\n",
      "training 8 epoch, loss is 34.69889511059671\n",
      "training 9 epoch, loss is 34.33645161292143\n",
      "training 0 epoch, loss is 9.883541436495607\n",
      "training 1 epoch, loss is 8.695489636492482\n",
      "training 2 epoch, loss is 8.658551385344312\n",
      "training 3 epoch, loss is 8.559779379505786\n",
      "training 4 epoch, loss is 8.527497184028965\n",
      "training 5 epoch, loss is 8.515992431041926\n",
      "training 6 epoch, loss is 8.534608000751241\n",
      "training 7 epoch, loss is 8.52522475820709\n",
      "training 8 epoch, loss is 9.093500877047948\n",
      "training 9 epoch, loss is 10.741645940366679\n",
      "training 0 epoch, loss is 35.38586749936778\n",
      "training 1 epoch, loss is 49.20397773214812\n",
      "training 2 epoch, loss is 36.472233149701125\n",
      "training 3 epoch, loss is 34.84231761734634\n",
      "training 4 epoch, loss is 34.19282783307025\n",
      "training 5 epoch, loss is 34.23356122839699\n",
      "training 6 epoch, loss is 34.211493117917186\n",
      "training 7 epoch, loss is 34.07190998106399\n",
      "training 8 epoch, loss is 34.03839385078374\n",
      "training 9 epoch, loss is 33.449740308284404\n",
      "training 0 epoch, loss is 10.95270015622105\n",
      "training 1 epoch, loss is 10.370380738700355\n",
      "training 2 epoch, loss is 10.369692566243584\n",
      "training 3 epoch, loss is 10.36947343153575\n",
      "training 4 epoch, loss is 10.369372398037054\n",
      "training 5 epoch, loss is 10.369317753268103\n",
      "training 6 epoch, loss is 10.369285343043527\n",
      "training 7 epoch, loss is 10.369264381951501\n",
      "training 8 epoch, loss is 10.369251565228407\n",
      "training 9 epoch, loss is 10.369242096955416\n",
      "training 0 epoch, loss is 33.9618702991382\n",
      "training 1 epoch, loss is 33.24242844134127\n",
      "training 2 epoch, loss is 38.50988935031417\n",
      "training 3 epoch, loss is 32.665207755362246\n",
      "training 4 epoch, loss is 34.014885263467754\n",
      "training 5 epoch, loss is 32.470977313551245\n",
      "training 6 epoch, loss is 32.9550353380401\n",
      "training 7 epoch, loss is 39.25812462339238\n",
      "training 8 epoch, loss is 34.18359291288195\n",
      "training 9 epoch, loss is 33.73537317758498\n",
      "training 0 epoch, loss is 9.988935799887873\n",
      "training 1 epoch, loss is 9.670194312510045\n",
      "training 2 epoch, loss is 9.468050566775508\n",
      "training 3 epoch, loss is 9.467909982212396\n",
      "training 4 epoch, loss is 9.507450885838589\n",
      "training 5 epoch, loss is 9.517708133364467\n",
      "training 6 epoch, loss is 9.467766438972879\n",
      "training 7 epoch, loss is 9.467760187935758\n",
      "training 8 epoch, loss is 9.46775581913196\n",
      "training 9 epoch, loss is 9.467752065244255\n",
      "training 0 epoch, loss is 35.03130818724101\n",
      "training 1 epoch, loss is 33.57884574056026\n",
      "training 2 epoch, loss is 33.3422445595973\n",
      "training 3 epoch, loss is 33.3437115648349\n",
      "training 4 epoch, loss is 33.651207853966625\n",
      "training 5 epoch, loss is 33.94310563897927\n",
      "training 6 epoch, loss is 35.123506669480946\n",
      "training 7 epoch, loss is 37.38580922217815\n",
      "training 8 epoch, loss is 33.66666543335387\n",
      "training 9 epoch, loss is 36.359397978022315\n",
      "training 0 epoch, loss is 8.588496867561766\n",
      "training 1 epoch, loss is 8.060822216243128\n",
      "training 2 epoch, loss is 8.501847517902217\n",
      "training 3 epoch, loss is 8.038132067221767\n",
      "training 4 epoch, loss is 8.037975145041678\n",
      "training 5 epoch, loss is 8.037904482650367\n",
      "training 6 epoch, loss is 8.037862879516112\n",
      "training 7 epoch, loss is 8.037835799700083\n",
      "training 8 epoch, loss is 8.037817221561124\n",
      "training 9 epoch, loss is 8.037804453147999\n",
      "training 0 epoch, loss is 36.29026658302422\n",
      "training 1 epoch, loss is 34.82933390224546\n",
      "training 2 epoch, loss is 34.5530355028388\n",
      "training 3 epoch, loss is 38.5745999710054\n",
      "training 4 epoch, loss is 36.85006451038561\n",
      "training 5 epoch, loss is 34.80730086242233\n",
      "training 6 epoch, loss is 34.807278690820986\n",
      "training 7 epoch, loss is 34.8072693128393\n",
      "training 8 epoch, loss is 34.80726354421069\n",
      "training 9 epoch, loss is 34.80726097210225\n",
      "training 0 epoch, loss is 10.393345880623388\n",
      "training 1 epoch, loss is 8.3718890026875\n",
      "training 2 epoch, loss is 8.27410163927149\n",
      "training 3 epoch, loss is 8.27396828444669\n",
      "training 4 epoch, loss is 8.273888095638519\n",
      "training 5 epoch, loss is 8.273834447721876\n",
      "training 6 epoch, loss is 8.273797456429337\n",
      "training 7 epoch, loss is 8.273771798862725\n",
      "training 8 epoch, loss is 8.273754138355793\n",
      "training 9 epoch, loss is 8.273741197183684\n",
      "training 0 epoch, loss is 33.71476442815642\n",
      "training 1 epoch, loss is 36.507943744232286\n",
      "training 2 epoch, loss is 32.641769915730734\n",
      "training 3 epoch, loss is 32.371625703900435\n",
      "training 4 epoch, loss is 32.371291707151016\n",
      "training 5 epoch, loss is 32.37115433935949\n",
      "training 6 epoch, loss is 32.37106341097051\n",
      "training 7 epoch, loss is 32.37101134175894\n",
      "training 8 epoch, loss is 32.37097614928417\n",
      "training 9 epoch, loss is 32.37095115804708\n",
      "training 0 epoch, loss is 11.017338809797689\n",
      "training 1 epoch, loss is 9.761242057862816\n",
      "training 2 epoch, loss is 9.758553536625397\n",
      "training 3 epoch, loss is 9.758384011321057\n",
      "training 4 epoch, loss is 9.75830053418698\n",
      "training 5 epoch, loss is 9.758236290173997\n",
      "training 6 epoch, loss is 9.738669117984504\n",
      "training 7 epoch, loss is 9.737451708325647\n",
      "training 8 epoch, loss is 9.737436306870125\n",
      "training 9 epoch, loss is 9.737426776870068\n",
      "training 0 epoch, loss is 35.578027786593054\n",
      "training 1 epoch, loss is 34.64786902616127\n",
      "training 2 epoch, loss is 35.45276148108771\n",
      "training 3 epoch, loss is 33.89326559292368\n",
      "training 4 epoch, loss is 33.8906119843663\n",
      "training 5 epoch, loss is 33.89031471776325\n",
      "training 6 epoch, loss is 33.890169822011224\n",
      "training 7 epoch, loss is 33.890088321414474\n",
      "training 8 epoch, loss is 33.890039027179384\n",
      "training 9 epoch, loss is 33.89000697993295\n",
      "training 0 epoch, loss is 10.757578151864317\n",
      "training 1 epoch, loss is 8.993383931034751\n",
      "training 2 epoch, loss is 9.392199188448732\n",
      "training 3 epoch, loss is 9.10744809573765\n",
      "training 4 epoch, loss is 9.029573480656664\n",
      "training 5 epoch, loss is 9.1069841701261\n",
      "training 6 epoch, loss is 9.141513257444966\n",
      "training 7 epoch, loss is 8.988106258125086\n",
      "training 8 epoch, loss is 8.98808513822736\n",
      "training 9 epoch, loss is 8.988073189541122\n",
      "training 0 epoch, loss is 34.04667490579044\n",
      "training 1 epoch, loss is 33.86904075312225\n",
      "training 2 epoch, loss is 35.77444954369256\n",
      "training 3 epoch, loss is 34.00797813431115\n",
      "training 4 epoch, loss is 33.92911869756888\n",
      "training 5 epoch, loss is 33.92877658887214\n",
      "training 6 epoch, loss is 33.928669839831734\n",
      "training 7 epoch, loss is 33.92861314345965\n",
      "training 8 epoch, loss is 33.928574683009074\n",
      "training 9 epoch, loss is 33.92855087865869\n",
      "training 0 epoch, loss is 10.329171501531821\n",
      "training 1 epoch, loss is 9.533020120252472\n",
      "training 2 epoch, loss is 9.531708251989349\n",
      "training 3 epoch, loss is 9.531482087489755\n",
      "training 4 epoch, loss is 9.531381999135549\n",
      "training 5 epoch, loss is 9.531328781210384\n",
      "training 6 epoch, loss is 9.531297163016552\n",
      "training 7 epoch, loss is 9.531277547652707\n",
      "training 8 epoch, loss is 9.531265197712015\n",
      "training 9 epoch, loss is 9.531256676163043\n",
      "training 0 epoch, loss is 30.08673649495716\n",
      "training 1 epoch, loss is 55.71186958324131\n",
      "training 2 epoch, loss is 48.27570418208749\n",
      "training 3 epoch, loss is 42.65791945358412\n",
      "training 4 epoch, loss is 41.02562833318724\n",
      "training 5 epoch, loss is 39.69270345455903\n",
      "training 6 epoch, loss is 39.854004697227516\n",
      "training 7 epoch, loss is 54.96735650128533\n",
      "training 8 epoch, loss is 53.56370993018239\n",
      "training 9 epoch, loss is 53.174673303652234\n",
      "training 0 epoch, loss is 7.626193192397805\n",
      "training 1 epoch, loss is 7.498849623878031\n",
      "training 2 epoch, loss is 8.106487576482204\n",
      "training 3 epoch, loss is 8.458769501438391\n",
      "training 4 epoch, loss is 8.269548288339235\n",
      "training 5 epoch, loss is 8.727419293864191\n",
      "training 6 epoch, loss is 9.577085712830637\n",
      "training 7 epoch, loss is 9.549847440490117\n",
      "training 8 epoch, loss is 9.43916091442285\n",
      "training 9 epoch, loss is 10.637202311981502\n",
      "training 0 epoch, loss is 26.476116553634316\n",
      "training 1 epoch, loss is 38.06975516290003\n",
      "training 2 epoch, loss is 44.09211354430776\n",
      "training 3 epoch, loss is 42.886984715741185\n",
      "training 4 epoch, loss is 43.77472691655778\n",
      "training 5 epoch, loss is 41.685615278868895\n",
      "training 6 epoch, loss is 40.99349355834705\n",
      "training 7 epoch, loss is 49.822441437853726\n",
      "training 8 epoch, loss is 45.27718737031657\n",
      "training 9 epoch, loss is 44.89474770325051\n",
      "training 0 epoch, loss is 13.595859779680607\n",
      "training 1 epoch, loss is 50.53992173438256\n",
      "training 2 epoch, loss is 50.27757067425669\n",
      "training 3 epoch, loss is 49.80505272188951\n",
      "training 4 epoch, loss is 48.38844856557931\n",
      "training 5 epoch, loss is 45.02025811891527\n",
      "training 6 epoch, loss is 41.86816047773163\n",
      "training 7 epoch, loss is 37.69994109568327\n",
      "training 8 epoch, loss is 37.37274632804118\n",
      "training 9 epoch, loss is 36.50218925030366\n",
      "training 0 epoch, loss is 31.856688712776062\n",
      "training 1 epoch, loss is 31.651602931519644\n",
      "training 2 epoch, loss is 46.13771545241424\n",
      "training 3 epoch, loss is 44.83931450161099\n",
      "training 4 epoch, loss is 45.16446150574026\n",
      "training 5 epoch, loss is 44.210955283342024\n",
      "training 6 epoch, loss is 41.793863729394154\n",
      "training 7 epoch, loss is 41.54016027546513\n",
      "training 8 epoch, loss is 42.773650920223055\n",
      "training 9 epoch, loss is 53.14428813165214\n",
      "training 0 epoch, loss is 13.41802143597373\n",
      "training 1 epoch, loss is 46.4498220699831\n",
      "training 2 epoch, loss is 45.98867708285414\n",
      "training 3 epoch, loss is 46.23033160413408\n",
      "training 4 epoch, loss is 50.26304420802289\n",
      "training 5 epoch, loss is 53.09397584783571\n",
      "training 6 epoch, loss is 52.584606331604526\n",
      "training 7 epoch, loss is 55.353247764202536\n",
      "training 8 epoch, loss is 58.50130351360898\n",
      "training 9 epoch, loss is 52.115869360437024\n",
      "training 0 epoch, loss is 29.626534200872655\n",
      "training 1 epoch, loss is 45.32975902203553\n",
      "training 2 epoch, loss is 43.52721689504199\n",
      "training 3 epoch, loss is 49.45936311420502\n",
      "training 4 epoch, loss is 51.024229189328416\n",
      "training 5 epoch, loss is 50.130070109601185\n",
      "training 6 epoch, loss is 48.16776423551485\n",
      "training 7 epoch, loss is 48.302208895403425\n",
      "training 8 epoch, loss is 49.18948600057064\n",
      "training 9 epoch, loss is 49.818565287858384\n",
      "training 0 epoch, loss is 9.683938445742028\n",
      "training 1 epoch, loss is 18.96436068247367\n",
      "training 2 epoch, loss is 25.716189189948263\n",
      "training 3 epoch, loss is 20.03392650926538\n",
      "training 4 epoch, loss is 18.085011922121932\n",
      "training 5 epoch, loss is 16.41917137941138\n",
      "training 6 epoch, loss is 15.729332996114403\n",
      "training 7 epoch, loss is 14.480303914329095\n",
      "training 8 epoch, loss is 13.70670211963898\n",
      "training 9 epoch, loss is 12.741889350226959\n",
      "training 0 epoch, loss is 63.157832535354075\n",
      "training 1 epoch, loss is 61.147227189639025\n",
      "training 2 epoch, loss is 60.13330361799776\n",
      "training 3 epoch, loss is 58.75495420668263\n",
      "training 4 epoch, loss is 51.564314024672136\n",
      "training 5 epoch, loss is 48.585082993265075\n",
      "training 6 epoch, loss is 49.52450391040004\n",
      "training 7 epoch, loss is 49.385975690685676\n",
      "training 8 epoch, loss is 51.65906592314781\n",
      "training 9 epoch, loss is 55.32424237444245\n",
      "training 0 epoch, loss is 9.153597358094656\n",
      "training 1 epoch, loss is 13.05038837006299\n",
      "training 2 epoch, loss is 14.030074063511648\n",
      "training 3 epoch, loss is 11.672410936998507\n",
      "training 4 epoch, loss is 10.590080154888353\n",
      "training 5 epoch, loss is 11.095537005616933\n",
      "training 6 epoch, loss is 11.271175486231858\n",
      "training 7 epoch, loss is 10.895537599390622\n",
      "training 8 epoch, loss is 10.944723376324427\n",
      "training 9 epoch, loss is 11.304402238426825\n",
      "training 0 epoch, loss is 28.85588149770608\n",
      "training 1 epoch, loss is 40.06612911683339\n",
      "training 2 epoch, loss is 41.17940570911509\n",
      "training 3 epoch, loss is 51.36536879786663\n",
      "training 4 epoch, loss is 50.92180286774561\n",
      "training 5 epoch, loss is 51.383103447847446\n",
      "training 6 epoch, loss is 51.75036690454897\n",
      "training 7 epoch, loss is 52.90520167151792\n",
      "training 8 epoch, loss is 52.87063392815498\n",
      "training 9 epoch, loss is 54.51306652812735\n",
      "training 0 epoch, loss is 11.650041133961267\n",
      "training 1 epoch, loss is 18.669684803516823\n",
      "training 2 epoch, loss is 12.027076679229383\n",
      "training 3 epoch, loss is 10.870426229845169\n",
      "training 4 epoch, loss is 10.737334098434767\n",
      "training 5 epoch, loss is 10.831972996917075\n",
      "training 6 epoch, loss is 11.06817160741335\n",
      "training 7 epoch, loss is 12.00618717875829\n",
      "training 8 epoch, loss is 12.319027031730386\n",
      "training 9 epoch, loss is 12.773011919033634\n",
      "training 0 epoch, loss is 62.46279150183459\n",
      "training 1 epoch, loss is 68.6000376351465\n",
      "training 2 epoch, loss is 68.60004436808364\n",
      "training 3 epoch, loss is 68.60001811126484\n",
      "training 4 epoch, loss is 68.60016083927377\n",
      "training 5 epoch, loss is 68.60011297157062\n",
      "training 6 epoch, loss is 68.5161226950807\n",
      "training 7 epoch, loss is 68.26011403242143\n",
      "training 8 epoch, loss is 68.26037585392639\n",
      "training 9 epoch, loss is 68.27354954057083\n",
      "training 0 epoch, loss is 24.457012641412213\n",
      "training 1 epoch, loss is 7.528572327393806\n",
      "training 2 epoch, loss is 7.795855794643613\n",
      "training 3 epoch, loss is 7.788022367642966\n",
      "training 4 epoch, loss is 7.662314530384647\n",
      "training 5 epoch, loss is 7.659907420638407\n",
      "training 6 epoch, loss is 8.211270363962702\n",
      "training 7 epoch, loss is 8.361924158359493\n",
      "training 8 epoch, loss is 8.300528685951976\n",
      "training 9 epoch, loss is 8.217869267796196\n",
      "training 0 epoch, loss is 30.40908264500514\n",
      "training 1 epoch, loss is 39.63348443175141\n",
      "training 2 epoch, loss is 35.90511912337738\n",
      "training 3 epoch, loss is 35.90511912337738\n",
      "training 4 epoch, loss is 35.905119114875085\n",
      "training 5 epoch, loss is 35.9051180705096\n",
      "training 6 epoch, loss is 35.90532227172717\n",
      "training 7 epoch, loss is 35.92315340498227\n",
      "training 8 epoch, loss is 35.92317974226436\n",
      "training 9 epoch, loss is 35.92317974226436\n",
      "training 0 epoch, loss is 10.112598761849737\n",
      "training 1 epoch, loss is 19.227614362894336\n",
      "training 2 epoch, loss is 18.807356216068786\n",
      "training 3 epoch, loss is 20.290018614364946\n",
      "training 4 epoch, loss is 19.163476529017778\n",
      "training 5 epoch, loss is 19.565382386387547\n",
      "training 6 epoch, loss is 20.789494268474396\n",
      "training 7 epoch, loss is 20.473223207366235\n",
      "training 8 epoch, loss is 20.028825767526477\n",
      "training 9 epoch, loss is 23.528058601169672\n",
      "training 0 epoch, loss is 73.21654635488721\n",
      "training 1 epoch, loss is 72.21478213694841\n",
      "training 2 epoch, loss is 69.21847331877632\n",
      "training 3 epoch, loss is 56.89255283487745\n",
      "training 4 epoch, loss is 57.093251247080566\n",
      "training 5 epoch, loss is 57.010398951500036\n",
      "training 6 epoch, loss is 57.06161068574434\n",
      "training 7 epoch, loss is 57.061592084938766\n",
      "training 8 epoch, loss is 56.600820102527166\n",
      "training 9 epoch, loss is 54.95312253617922\n",
      "training 0 epoch, loss is 50.549022552521365\n",
      "training 1 epoch, loss is 33.966162219776244\n",
      "training 2 epoch, loss is 46.626725322768664\n",
      "training 3 epoch, loss is 45.97964740896084\n",
      "training 4 epoch, loss is 44.72914530182097\n",
      "training 5 epoch, loss is 60.62438711638974\n",
      "training 6 epoch, loss is 58.67287509589945\n",
      "training 7 epoch, loss is 58.45808246510673\n",
      "training 8 epoch, loss is 58.263926787616946\n",
      "training 9 epoch, loss is 58.12454042519589\n",
      "training 0 epoch, loss is 110.49644543915899\n",
      "training 1 epoch, loss is 87.3342442317957\n",
      "training 2 epoch, loss is 85.11562385315004\n",
      "training 3 epoch, loss is 79.2220806709383\n",
      "training 4 epoch, loss is 79.08273221799988\n",
      "training 5 epoch, loss is 78.80812007113627\n",
      "training 6 epoch, loss is 78.30852080448268\n",
      "training 7 epoch, loss is 76.9664541267146\n",
      "training 8 epoch, loss is 76.73039577861567\n",
      "training 9 epoch, loss is 75.70340675433948\n",
      "training 0 epoch, loss is 7.661972477921569\n",
      "training 1 epoch, loss is 12.17419323790542\n",
      "training 2 epoch, loss is 20.58153014557121\n",
      "training 3 epoch, loss is 15.840017596397892\n",
      "training 4 epoch, loss is 15.634658141199843\n",
      "training 5 epoch, loss is 15.37869559742444\n",
      "training 6 epoch, loss is 14.56688325314918\n",
      "training 7 epoch, loss is 14.421297027256882\n",
      "training 8 epoch, loss is 14.32744748098914\n",
      "training 9 epoch, loss is 13.692025061033512\n",
      "training 0 epoch, loss is 35.17274896269913\n",
      "training 1 epoch, loss is 30.259037017424312\n",
      "training 2 epoch, loss is 42.332111950488404\n",
      "training 3 epoch, loss is 41.568304580467746\n",
      "training 4 epoch, loss is 43.592704842016325\n",
      "training 5 epoch, loss is 51.857827261134496\n",
      "training 6 epoch, loss is 50.78427653122992\n",
      "training 7 epoch, loss is 50.784275833439935\n",
      "training 8 epoch, loss is 50.78429469061623\n",
      "training 9 epoch, loss is 50.89024098737038\n",
      "training 0 epoch, loss is nan\n",
      "training 1 epoch, loss is nan\n",
      "training 2 epoch, loss is nan\n",
      "training 3 epoch, loss is nan\n",
      "training 4 epoch, loss is nan\n",
      "training 5 epoch, loss is nan\n",
      "training 6 epoch, loss is nan\n",
      "training 7 epoch, loss is nan\n",
      "training 8 epoch, loss is nan\n",
      "training 9 epoch, loss is nan\n",
      "training 0 epoch, loss is 30.690677786209964\n",
      "training 1 epoch, loss is 30.77518629965775\n",
      "training 2 epoch, loss is 30.76578383043652\n",
      "training 3 epoch, loss is 30.76578383043652\n",
      "training 4 epoch, loss is 30.76578383043652\n",
      "training 5 epoch, loss is 30.76578383043652\n",
      "training 6 epoch, loss is 30.76578383043652\n",
      "training 7 epoch, loss is 30.76578383043652\n",
      "training 8 epoch, loss is 30.76578383043652\n",
      "training 9 epoch, loss is 30.76578383043652\n",
      "training 0 epoch, loss is 11.536247256185643\n",
      "training 1 epoch, loss is 29.859466920857265\n",
      "training 2 epoch, loss is 47.33777919531223\n",
      "training 3 epoch, loss is 47.26454694026646\n",
      "training 4 epoch, loss is 47.038896082060475\n",
      "training 5 epoch, loss is 46.84417968023054\n",
      "training 6 epoch, loss is 46.35396572278832\n",
      "training 7 epoch, loss is 43.92387467755605\n",
      "training 8 epoch, loss is 41.485572045837785\n",
      "training 9 epoch, loss is 40.33649014098152\n",
      "training 0 epoch, loss is 27.312650904103627\n",
      "training 1 epoch, loss is 39.266996844453345\n",
      "training 2 epoch, loss is 44.696282799679025\n",
      "training 3 epoch, loss is 40.42043208509949\n",
      "training 4 epoch, loss is 36.38287304646891\n",
      "training 5 epoch, loss is 45.209244373246186\n",
      "training 6 epoch, loss is 47.10781982103335\n",
      "training 7 epoch, loss is 44.33873268283174\n",
      "training 8 epoch, loss is 43.16908677132353\n",
      "training 9 epoch, loss is 45.970320143800464\n",
      "training 0 epoch, loss is 5.421846212493118\n",
      "training 1 epoch, loss is 10.091382741917046\n",
      "training 2 epoch, loss is 29.446091368396488\n",
      "training 3 epoch, loss is 28.309298393413293\n",
      "training 4 epoch, loss is 26.291601349232106\n",
      "training 5 epoch, loss is 21.834830716097567\n",
      "training 6 epoch, loss is 18.897121676749574\n",
      "training 7 epoch, loss is 17.036192955775324\n",
      "training 8 epoch, loss is 15.39468970409531\n",
      "training 9 epoch, loss is 13.934330054036499\n",
      "training 0 epoch, loss is 28.374842722311573\n",
      "training 1 epoch, loss is 41.65298748889645\n",
      "training 2 epoch, loss is 37.083915558541385\n",
      "training 3 epoch, loss is 41.5020406928234\n",
      "training 4 epoch, loss is 41.30229448916208\n",
      "training 5 epoch, loss is 43.2136427268209\n",
      "training 6 epoch, loss is 35.53465605942803\n",
      "training 7 epoch, loss is 35.68480204983175\n",
      "training 8 epoch, loss is 41.46234819962911\n",
      "training 9 epoch, loss is 36.013049312709704\n",
      "training 0 epoch, loss is 5.623892206245431\n",
      "training 1 epoch, loss is 6.7940954147818715\n",
      "training 2 epoch, loss is 8.409453247903185\n",
      "training 3 epoch, loss is 7.384486125655008\n",
      "training 4 epoch, loss is 9.603132312426492\n",
      "training 5 epoch, loss is 9.61299959972545\n",
      "training 6 epoch, loss is 9.5806572863097\n",
      "training 7 epoch, loss is 10.420089857227193\n",
      "training 8 epoch, loss is 10.057902315250136\n",
      "training 9 epoch, loss is 9.561417610470194\n",
      "training 0 epoch, loss is 27.949109919789283\n",
      "training 1 epoch, loss is 35.81170367206328\n",
      "training 2 epoch, loss is 50.683478475634\n",
      "training 3 epoch, loss is 51.604059268308674\n",
      "training 4 epoch, loss is 34.91312293611047\n",
      "training 5 epoch, loss is 46.1952122492876\n",
      "training 6 epoch, loss is 33.57015824074738\n",
      "training 7 epoch, loss is 34.78060942092352\n",
      "training 8 epoch, loss is 40.20416249770351\n",
      "training 9 epoch, loss is 52.48055754650594\n",
      "training 0 epoch, loss is 5.75269066930392\n",
      "training 1 epoch, loss is 6.840339365373821\n",
      "training 2 epoch, loss is 7.977057545835523\n",
      "training 3 epoch, loss is 8.74006043618838\n",
      "training 4 epoch, loss is 7.461928144196817\n",
      "training 5 epoch, loss is 7.516195253046224\n",
      "training 6 epoch, loss is 9.240991384299466\n",
      "training 7 epoch, loss is 9.636951361525837\n",
      "training 8 epoch, loss is 10.084121551552583\n",
      "training 9 epoch, loss is 9.766410247530533\n",
      "training 0 epoch, loss is 29.196081435941657\n",
      "training 1 epoch, loss is 35.466443177074595\n",
      "training 2 epoch, loss is 36.13485012828949\n",
      "training 3 epoch, loss is 40.27665779530382\n",
      "training 4 epoch, loss is 33.10999695677725\n",
      "training 5 epoch, loss is 32.29544674750157\n",
      "training 6 epoch, loss is 44.353366667258086\n",
      "training 7 epoch, loss is 39.71822777950658\n",
      "training 8 epoch, loss is 35.09604992178362\n",
      "training 9 epoch, loss is 31.57393170396118\n",
      "training 0 epoch, loss is 5.615692263394373\n",
      "training 1 epoch, loss is 7.1334546990083725\n",
      "training 2 epoch, loss is 9.64877055644059\n",
      "training 3 epoch, loss is 9.884603357631686\n",
      "training 4 epoch, loss is 8.286377879022933\n",
      "training 5 epoch, loss is 9.404071679729055\n",
      "training 6 epoch, loss is 8.969635689395853\n",
      "training 7 epoch, loss is 9.26733874811903\n",
      "training 8 epoch, loss is 8.935653064748685\n",
      "training 9 epoch, loss is 8.927827163733664\n",
      "training 0 epoch, loss is 28.46902034641727\n",
      "training 1 epoch, loss is 38.589434314596545\n",
      "training 2 epoch, loss is 38.97112447611479\n",
      "training 3 epoch, loss is 39.190247238257164\n",
      "training 4 epoch, loss is 38.50495888096023\n",
      "training 5 epoch, loss is 36.294261452949186\n",
      "training 6 epoch, loss is 33.78619570912964\n",
      "training 7 epoch, loss is 33.51384112488711\n",
      "training 8 epoch, loss is 34.651879790429724\n",
      "training 9 epoch, loss is 34.63550900013493\n",
      "training 0 epoch, loss is 4.72961171352766\n",
      "training 1 epoch, loss is 4.8872250505123365\n",
      "training 2 epoch, loss is 7.153044534720016\n",
      "training 3 epoch, loss is 11.722427230825764\n",
      "training 4 epoch, loss is 10.971847111674286\n",
      "training 5 epoch, loss is 10.882220422954076\n",
      "training 6 epoch, loss is 9.622043258557511\n",
      "training 7 epoch, loss is 8.08929079205063\n",
      "training 8 epoch, loss is 11.570707218517004\n",
      "training 9 epoch, loss is 11.747538522958136\n",
      "training 0 epoch, loss is 29.49285979847526\n",
      "training 1 epoch, loss is 50.33053543889558\n",
      "training 2 epoch, loss is 51.23935559208474\n",
      "training 3 epoch, loss is 51.10311379230606\n",
      "training 4 epoch, loss is 51.09080235264333\n",
      "training 5 epoch, loss is 51.086952543378054\n",
      "training 6 epoch, loss is 51.08793839838809\n",
      "training 7 epoch, loss is 52.052151626423665\n",
      "training 8 epoch, loss is 55.28283605081214\n",
      "training 9 epoch, loss is 51.619255274191545\n",
      "training 0 epoch, loss is 6.506887300984619\n",
      "training 1 epoch, loss is 6.796959296498967\n",
      "training 2 epoch, loss is 8.85669409715811\n",
      "training 3 epoch, loss is 13.733314793501657\n",
      "training 4 epoch, loss is 11.131222007392687\n",
      "training 5 epoch, loss is 13.170950122553446\n",
      "training 6 epoch, loss is 12.954587059311061\n",
      "training 7 epoch, loss is 12.897384877175712\n",
      "training 8 epoch, loss is 12.912409731239434\n",
      "training 9 epoch, loss is 12.9448778794896\n",
      "training 0 epoch, loss is 41.364399694083176\n",
      "training 1 epoch, loss is 29.32546371259127\n",
      "training 2 epoch, loss is 38.465989802353285\n",
      "training 3 epoch, loss is 47.330770592948625\n",
      "training 4 epoch, loss is 46.9269106822525\n",
      "training 5 epoch, loss is 46.726600287509775\n",
      "training 6 epoch, loss is 46.61867084164029\n",
      "training 7 epoch, loss is 46.618651210288974\n",
      "training 8 epoch, loss is 46.61863937591498\n",
      "training 9 epoch, loss is 48.27728080921958\n",
      "training 0 epoch, loss is 34.17432320200857\n",
      "training 1 epoch, loss is 6.411084901596743\n",
      "training 2 epoch, loss is 8.394735405989854\n",
      "training 3 epoch, loss is 11.705640464048358\n",
      "training 4 epoch, loss is 14.55278970463649\n",
      "training 5 epoch, loss is 11.277740017806652\n",
      "training 6 epoch, loss is 6.177139969946103\n",
      "training 7 epoch, loss is 5.4685195137467515\n",
      "training 8 epoch, loss is 7.664592792509395\n",
      "training 9 epoch, loss is 10.442812183391004\n",
      "training 0 epoch, loss is 26.317322480244588\n",
      "training 1 epoch, loss is 37.52365716080698\n",
      "training 2 epoch, loss is 33.629078499408216\n",
      "training 3 epoch, loss is 29.87750321817079\n",
      "training 4 epoch, loss is 54.92764449004433\n",
      "training 5 epoch, loss is 54.8916440178124\n",
      "training 6 epoch, loss is 54.78229855144892\n",
      "training 7 epoch, loss is 55.21564748639692\n",
      "training 8 epoch, loss is 55.20188499506657\n",
      "training 9 epoch, loss is 55.069894758256545\n",
      "training 0 epoch, loss is 7.292744964206201\n",
      "training 1 epoch, loss is 8.101343108714733\n",
      "training 2 epoch, loss is 8.533920838033994\n",
      "training 3 epoch, loss is 9.002727688908754\n",
      "training 4 epoch, loss is 9.147487832225007\n",
      "training 5 epoch, loss is 9.005908506741141\n",
      "training 6 epoch, loss is 9.346479742479183\n",
      "training 7 epoch, loss is 9.719468604889526\n",
      "training 8 epoch, loss is 9.725165636963688\n",
      "training 9 epoch, loss is 9.79617843741515\n",
      "training 0 epoch, loss is 28.992770418745827\n",
      "training 1 epoch, loss is 30.673135449695305\n",
      "training 2 epoch, loss is 29.888989531896588\n",
      "training 3 epoch, loss is 32.9238419491654\n",
      "training 4 epoch, loss is 33.138481469775165\n",
      "training 5 epoch, loss is 33.13847994446224\n",
      "training 6 epoch, loss is 33.20221428520071\n",
      "training 7 epoch, loss is 33.01749314641917\n",
      "training 8 epoch, loss is 33.017493075671815\n",
      "training 9 epoch, loss is 33.05634356858292\n",
      "training 0 epoch, loss is 60.34293760376974\n",
      "training 1 epoch, loss is 7.1006651448544655\n",
      "training 2 epoch, loss is 6.640857801780945\n",
      "training 3 epoch, loss is 7.710426375004587\n",
      "training 4 epoch, loss is 7.88148743002634\n",
      "training 5 epoch, loss is 7.881431870361464\n",
      "training 6 epoch, loss is 7.890903243833727\n",
      "training 7 epoch, loss is 7.921861721805189\n",
      "training 8 epoch, loss is 7.968981138676494\n",
      "training 9 epoch, loss is 8.09741515414209\n",
      "training 0 epoch, loss is 49.10347121766041\n",
      "training 1 epoch, loss is 47.41445716504748\n",
      "training 2 epoch, loss is 46.60075377900629\n",
      "training 3 epoch, loss is 46.52633246118396\n",
      "training 4 epoch, loss is 44.58843163955813\n",
      "training 5 epoch, loss is 42.76273011847314\n",
      "training 6 epoch, loss is 30.92421637528551\n",
      "training 7 epoch, loss is 30.947465517953347\n",
      "training 8 epoch, loss is 30.822791139653244\n",
      "training 9 epoch, loss is 30.838850232764237\n",
      "training 0 epoch, loss is 6.355632617439555\n",
      "training 1 epoch, loss is 8.502890874208877\n",
      "training 2 epoch, loss is 12.053581055795403\n",
      "training 3 epoch, loss is 10.594075069914762\n",
      "training 4 epoch, loss is 10.775514793778473\n",
      "training 5 epoch, loss is 11.924797581602718\n",
      "training 6 epoch, loss is 14.704095967180297\n",
      "training 7 epoch, loss is 13.602410519976468\n",
      "training 8 epoch, loss is 17.751680348842275\n",
      "training 9 epoch, loss is 17.55090466351732\n",
      "training 0 epoch, loss is 125.9453095951137\n",
      "training 1 epoch, loss is 126.20068275893124\n",
      "training 2 epoch, loss is 126.20068275893124\n",
      "training 3 epoch, loss is 126.20068275893124\n",
      "training 4 epoch, loss is 126.20068275893124\n",
      "training 5 epoch, loss is 126.20068275893124\n",
      "training 6 epoch, loss is 126.20068275893124\n",
      "training 7 epoch, loss is 126.20068275893124\n",
      "training 8 epoch, loss is 126.20068275893124\n",
      "training 9 epoch, loss is 126.20068275893124\n",
      "training 0 epoch, loss is 32.99498280047132\n",
      "training 1 epoch, loss is 6.480331239320935\n",
      "training 2 epoch, loss is 8.54942359199377\n",
      "training 3 epoch, loss is 11.7137053767081\n",
      "training 4 epoch, loss is 11.659976157782605\n",
      "training 5 epoch, loss is 11.78137412660642\n",
      "training 6 epoch, loss is 11.170594845475623\n",
      "training 7 epoch, loss is 10.78701071007846\n",
      "training 8 epoch, loss is 10.817515440848974\n",
      "training 9 epoch, loss is 11.150523035159717\n",
      "training 0 epoch, loss is 30.382163549603185\n",
      "training 1 epoch, loss is 41.974324199942565\n",
      "training 2 epoch, loss is 40.0421142650306\n",
      "training 3 epoch, loss is 39.67623996522062\n",
      "training 4 epoch, loss is 39.53042462857836\n",
      "training 5 epoch, loss is 39.3276961753131\n",
      "training 6 epoch, loss is 39.152886626270515\n",
      "training 7 epoch, loss is 39.152885512779584\n",
      "training 8 epoch, loss is 39.152885467434\n",
      "training 9 epoch, loss is 39.152885467434\n",
      "training 0 epoch, loss is 6.101512932294722\n",
      "training 1 epoch, loss is 7.060003139742859\n",
      "training 2 epoch, loss is 7.571865606405406\n",
      "training 3 epoch, loss is 7.558352274313724\n",
      "training 4 epoch, loss is 7.558351544433593\n",
      "training 5 epoch, loss is 7.563968422109978\n",
      "training 6 epoch, loss is 7.567334959590683\n",
      "training 7 epoch, loss is 7.567334958705027\n",
      "training 8 epoch, loss is 7.567334577264084\n",
      "training 9 epoch, loss is 7.5673342289798855\n",
      "training 0 epoch, loss is 28.81704127324264\n",
      "training 1 epoch, loss is 34.21953195603205\n",
      "training 2 epoch, loss is 30.350857022652285\n",
      "training 3 epoch, loss is 31.524117812027917\n",
      "training 4 epoch, loss is 26.372705727749302\n",
      "training 5 epoch, loss is 22.93479491060583\n",
      "training 6 epoch, loss is 21.367950160730665\n",
      "training 7 epoch, loss is 33.202866346329365\n",
      "training 8 epoch, loss is 31.00580179198294\n",
      "training 9 epoch, loss is 30.394164424909153\n",
      "training 0 epoch, loss is 8.394225269732914\n",
      "training 1 epoch, loss is 8.378854579715064\n",
      "training 2 epoch, loss is 5.298218476798839\n",
      "training 3 epoch, loss is 5.501883013776395\n",
      "training 4 epoch, loss is 5.776613061435145\n",
      "training 5 epoch, loss is 5.851713832744151\n",
      "training 6 epoch, loss is 5.789797850638539\n",
      "training 7 epoch, loss is 5.375204238830582\n",
      "training 8 epoch, loss is 5.737052756441807\n",
      "training 9 epoch, loss is 5.6783819914041\n",
      "training 0 epoch, loss is 28.004135943772354\n",
      "training 1 epoch, loss is 30.001997326453733\n",
      "training 2 epoch, loss is 32.67066893297682\n",
      "training 3 epoch, loss is 24.08639911701488\n",
      "training 4 epoch, loss is 21.801776532758062\n",
      "training 5 epoch, loss is 20.828176848435614\n",
      "training 6 epoch, loss is 19.999197953311555\n",
      "training 7 epoch, loss is 19.454078716079454\n",
      "training 8 epoch, loss is 19.201614306637193\n",
      "training 9 epoch, loss is 19.425950445157394\n",
      "training 0 epoch, loss is 5.398278660541649\n",
      "training 1 epoch, loss is 8.311216760990527\n",
      "training 2 epoch, loss is 6.00064137496209\n",
      "training 3 epoch, loss is 4.175346869455778\n",
      "training 4 epoch, loss is 4.002670880539746\n",
      "training 5 epoch, loss is 4.369010119436402\n",
      "training 6 epoch, loss is 4.375450318132027\n",
      "training 7 epoch, loss is 4.37807592310741\n",
      "training 8 epoch, loss is 4.593089814974929\n",
      "training 9 epoch, loss is 4.455371341272879\n",
      "training 0 epoch, loss is 27.639586632877855\n",
      "training 1 epoch, loss is 28.29250542501137\n",
      "training 2 epoch, loss is 27.48959431052208\n",
      "training 3 epoch, loss is 25.745544643448085\n",
      "training 4 epoch, loss is 21.385828745648134\n",
      "training 5 epoch, loss is 19.09450224145849\n",
      "training 6 epoch, loss is 18.6418631987906\n",
      "training 7 epoch, loss is 17.94343897527863\n",
      "training 8 epoch, loss is 18.199788566269753\n",
      "training 9 epoch, loss is 17.563137364957914\n",
      "training 0 epoch, loss is 8.736279977358413\n",
      "training 1 epoch, loss is 9.426874880311397\n",
      "training 2 epoch, loss is 5.753262251659741\n",
      "training 3 epoch, loss is 4.778292403774729\n",
      "training 4 epoch, loss is 4.637475691481496\n",
      "training 5 epoch, loss is 4.827216425905065\n",
      "training 6 epoch, loss is 4.463503228362307\n",
      "training 7 epoch, loss is 4.381041424708214\n",
      "training 8 epoch, loss is 4.320895779734196\n",
      "training 9 epoch, loss is 4.424315174317502\n",
      "training 0 epoch, loss is 28.292879374827884\n",
      "training 1 epoch, loss is 42.0101500023793\n",
      "training 2 epoch, loss is 33.690757923924195\n",
      "training 3 epoch, loss is 31.186603076169252\n",
      "training 4 epoch, loss is 31.02811493539704\n",
      "training 5 epoch, loss is 31.507480983925433\n",
      "training 6 epoch, loss is 29.80401701874267\n",
      "training 7 epoch, loss is 31.85844385336094\n",
      "training 8 epoch, loss is 30.995125473099158\n",
      "training 9 epoch, loss is 25.888383559982277\n",
      "training 0 epoch, loss is 7.956846817309399\n",
      "training 1 epoch, loss is 10.552088163925935\n",
      "training 2 epoch, loss is 9.089053689935321\n",
      "training 3 epoch, loss is 7.419164674840923\n",
      "training 4 epoch, loss is 7.913431868632427\n",
      "training 5 epoch, loss is 6.390151586831944\n",
      "training 6 epoch, loss is 4.969857309982292\n",
      "training 7 epoch, loss is 4.804773116348668\n",
      "training 8 epoch, loss is 4.327523875942822\n",
      "training 9 epoch, loss is 4.398022641531603\n",
      "training 0 epoch, loss is 24.586606145126176\n",
      "training 1 epoch, loss is 35.1950761733759\n",
      "training 2 epoch, loss is 36.23894683450726\n",
      "training 3 epoch, loss is 35.33214092435929\n",
      "training 4 epoch, loss is 25.943009555472464\n",
      "training 5 epoch, loss is 30.374364425989217\n",
      "training 6 epoch, loss is 30.51965639552306\n",
      "training 7 epoch, loss is 30.611430987671504\n",
      "training 8 epoch, loss is 28.564690256331016\n",
      "training 9 epoch, loss is 31.04272516230978\n",
      "training 0 epoch, loss is 6.5301782603013585\n",
      "training 1 epoch, loss is 7.4724267584103155\n",
      "training 2 epoch, loss is 6.117265670507792\n",
      "training 3 epoch, loss is 5.213743957627332\n",
      "training 4 epoch, loss is 4.520627357916856\n",
      "training 5 epoch, loss is 4.555918851497\n",
      "training 6 epoch, loss is 4.271095159388968\n",
      "training 7 epoch, loss is 4.211568666229616\n",
      "training 8 epoch, loss is 4.026742380919371\n",
      "training 9 epoch, loss is 3.958164096851377\n",
      "training 0 epoch, loss is 26.5606046428003\n",
      "training 1 epoch, loss is 41.41431551389064\n",
      "training 2 epoch, loss is 35.98053762542211\n",
      "training 3 epoch, loss is 32.77139948940772\n",
      "training 4 epoch, loss is 35.64732741721483\n",
      "training 5 epoch, loss is 44.1776882517497\n",
      "training 6 epoch, loss is 43.61597141685401\n",
      "training 7 epoch, loss is 40.17121792140276\n",
      "training 8 epoch, loss is 37.413890250314594\n",
      "training 9 epoch, loss is 37.40419223637096\n",
      "training 0 epoch, loss is 7.218706305650027\n",
      "training 1 epoch, loss is 7.925985466003507\n",
      "training 2 epoch, loss is 5.093537754824148\n",
      "training 3 epoch, loss is 6.676474138563748\n",
      "training 4 epoch, loss is 7.464830354209823\n",
      "training 5 epoch, loss is 8.003825443434787\n",
      "training 6 epoch, loss is 8.27648660512945\n",
      "training 7 epoch, loss is 8.180787383426722\n",
      "training 8 epoch, loss is 7.554746470239997\n",
      "training 9 epoch, loss is 7.895770865705318\n",
      "training 0 epoch, loss is 28.81964196140936\n",
      "training 1 epoch, loss is 38.63578522161136\n",
      "training 2 epoch, loss is 39.26595207133526\n",
      "training 3 epoch, loss is 37.59485476806716\n",
      "training 4 epoch, loss is 36.93157136970882\n",
      "training 5 epoch, loss is 36.37792912912652\n",
      "training 6 epoch, loss is 35.28981892008456\n",
      "training 7 epoch, loss is 34.34982857249611\n",
      "training 8 epoch, loss is 32.447382543490264\n",
      "training 9 epoch, loss is 31.874042781241215\n",
      "training 0 epoch, loss is 8.711994158761792\n",
      "training 1 epoch, loss is 11.172485881172761\n",
      "training 2 epoch, loss is 5.261135237523595\n",
      "training 3 epoch, loss is 5.136789896390027\n",
      "training 4 epoch, loss is 5.362227335494863\n",
      "training 5 epoch, loss is 5.61723785786627\n",
      "training 6 epoch, loss is 5.573098581886742\n",
      "training 7 epoch, loss is 5.110983080830348\n",
      "training 8 epoch, loss is 6.212471484386249\n",
      "training 9 epoch, loss is 5.978461308344005\n",
      "training 0 epoch, loss is 26.65985084416962\n",
      "training 1 epoch, loss is 37.53494827493604\n",
      "training 2 epoch, loss is 44.09874201914708\n",
      "training 3 epoch, loss is 41.923488632597326\n",
      "training 4 epoch, loss is 31.827445671604433\n",
      "training 5 epoch, loss is 37.22445394840496\n",
      "training 6 epoch, loss is 32.366746408136954\n",
      "training 7 epoch, loss is 31.318768869670325\n",
      "training 8 epoch, loss is 31.295342839778268\n",
      "training 9 epoch, loss is 29.754594252565198\n",
      "training 0 epoch, loss is 8.0558901512428\n",
      "training 1 epoch, loss is 11.131952814458915\n",
      "training 2 epoch, loss is 7.005869720192048\n",
      "training 3 epoch, loss is 10.307632056666874\n",
      "training 4 epoch, loss is 8.566530718358807\n",
      "training 5 epoch, loss is 8.29760445889091\n",
      "training 6 epoch, loss is 8.029406138306918\n",
      "training 7 epoch, loss is 7.967609113826795\n",
      "training 8 epoch, loss is 7.949701753559828\n",
      "training 9 epoch, loss is 7.868939895952262\n",
      "training 0 epoch, loss is 28.5865929076332\n",
      "training 1 epoch, loss is 38.2899236200206\n",
      "training 2 epoch, loss is 32.04869732183324\n",
      "training 3 epoch, loss is 31.64813560667484\n",
      "training 4 epoch, loss is 44.93929619386306\n",
      "training 5 epoch, loss is 31.08909413130064\n",
      "training 6 epoch, loss is 27.648528693956862\n",
      "training 7 epoch, loss is 30.849885838996144\n",
      "training 8 epoch, loss is 31.515348671319224\n",
      "training 9 epoch, loss is 30.807762923332632\n",
      "training 0 epoch, loss is 7.328808171514322\n",
      "training 1 epoch, loss is 5.450863722590205\n",
      "training 2 epoch, loss is 7.940348160538812\n",
      "training 3 epoch, loss is 11.201398393387965\n",
      "training 4 epoch, loss is 8.220628129235449\n",
      "training 5 epoch, loss is 8.09551119702683\n",
      "training 6 epoch, loss is 6.6785430700272235\n",
      "training 7 epoch, loss is 6.247003838198829\n",
      "training 8 epoch, loss is 5.988615404029629\n",
      "training 9 epoch, loss is 5.852393379926858\n",
      "training 0 epoch, loss is 23.4577761034893\n",
      "training 1 epoch, loss is 38.53780644559453\n",
      "training 2 epoch, loss is 33.90942066792918\n",
      "training 3 epoch, loss is 36.69304506511646\n",
      "training 4 epoch, loss is 34.829354919913264\n",
      "training 5 epoch, loss is 33.500247947672705\n",
      "training 6 epoch, loss is 32.467122179597\n",
      "training 7 epoch, loss is 31.994014294679864\n",
      "training 8 epoch, loss is 33.769773546329944\n",
      "training 9 epoch, loss is 38.832317822212104\n",
      "training 0 epoch, loss is 6.38482524525341\n",
      "training 1 epoch, loss is 10.857268725508044\n",
      "training 2 epoch, loss is 10.128316661635917\n",
      "training 3 epoch, loss is 7.656217002063901\n",
      "training 4 epoch, loss is 7.700170794872219\n",
      "training 5 epoch, loss is 7.674799685463177\n",
      "training 6 epoch, loss is 7.58488606436051\n",
      "training 7 epoch, loss is 7.441533197458137\n",
      "training 8 epoch, loss is 7.408651099935484\n",
      "training 9 epoch, loss is 7.473500233220948\n",
      "training 0 epoch, loss is 28.991274033305555\n",
      "training 1 epoch, loss is 31.693431470841613\n",
      "training 2 epoch, loss is 31.694915704539692\n",
      "training 3 epoch, loss is 31.59368427133171\n",
      "training 4 epoch, loss is 33.25085213853362\n",
      "training 5 epoch, loss is 45.33826940745469\n",
      "training 6 epoch, loss is 42.099934197984396\n",
      "training 7 epoch, loss is 42.158793741831616\n",
      "training 8 epoch, loss is 34.02038242281014\n",
      "training 9 epoch, loss is 34.133048184790844\n",
      "training 0 epoch, loss is 5.8185189757772475\n",
      "training 1 epoch, loss is 7.853554096780385\n",
      "training 2 epoch, loss is 8.656636423986814\n",
      "training 3 epoch, loss is 9.051675406536114\n",
      "training 4 epoch, loss is 9.24003543751176\n",
      "training 5 epoch, loss is 9.257599332654511\n",
      "training 6 epoch, loss is 9.184346224725777\n",
      "training 7 epoch, loss is 9.994552195072174\n",
      "training 8 epoch, loss is 9.581683693227056\n",
      "training 9 epoch, loss is 9.491982200710815\n",
      "training 0 epoch, loss is 26.508972812482718\n",
      "training 1 epoch, loss is 38.42829890241418\n",
      "training 2 epoch, loss is 37.45208438252269\n",
      "training 3 epoch, loss is 37.17843152779704\n",
      "training 4 epoch, loss is 29.735158361040643\n",
      "training 5 epoch, loss is 45.324689042027956\n",
      "training 6 epoch, loss is 30.881953998713755\n",
      "training 7 epoch, loss is 30.056794678341227\n",
      "training 8 epoch, loss is 32.860500311426385\n",
      "training 9 epoch, loss is 33.1553155016846\n",
      "training 0 epoch, loss is 5.627765442904799\n",
      "training 1 epoch, loss is 6.27285362255343\n",
      "training 2 epoch, loss is 8.11951540051828\n",
      "training 3 epoch, loss is 6.742499829379269\n",
      "training 4 epoch, loss is 6.193870631205734\n",
      "training 5 epoch, loss is 6.473891392815166\n",
      "training 6 epoch, loss is 6.464852658945951\n",
      "training 7 epoch, loss is 6.336199858611972\n",
      "training 8 epoch, loss is 6.610761738926128\n",
      "training 9 epoch, loss is 4.913361385984991\n",
      "training 0 epoch, loss is 40.16595107173265\n",
      "training 1 epoch, loss is 45.48426146208356\n",
      "training 2 epoch, loss is 41.77595923491685\n",
      "training 3 epoch, loss is 36.14756857601577\n",
      "training 4 epoch, loss is 39.25940882580394\n",
      "training 5 epoch, loss is 49.02314885416087\n",
      "training 6 epoch, loss is 53.99676053520038\n",
      "training 7 epoch, loss is 58.30735067348894\n",
      "training 8 epoch, loss is 58.94273708785058\n",
      "training 9 epoch, loss is 59.58881479147988\n",
      "training 0 epoch, loss is 16.975507845201196\n",
      "training 1 epoch, loss is 19.36124864669423\n",
      "training 2 epoch, loss is 65.78074642458726\n",
      "training 3 epoch, loss is 65.17863465911911\n",
      "training 4 epoch, loss is 60.70274506407605\n",
      "training 5 epoch, loss is 60.82101749595619\n",
      "training 6 epoch, loss is 60.9298888285719\n",
      "training 7 epoch, loss is 63.48986724151702\n",
      "training 8 epoch, loss is 62.22972202867004\n",
      "training 9 epoch, loss is 60.24629366362484\n",
      "training 0 epoch, loss is 28.862216262970374\n",
      "training 1 epoch, loss is 35.00106881473777\n",
      "training 2 epoch, loss is 31.82124661041827\n",
      "training 3 epoch, loss is 41.779081118323504\n",
      "training 4 epoch, loss is 37.479207506961565\n",
      "training 5 epoch, loss is 34.25539119508482\n",
      "training 6 epoch, loss is 40.261565929814864\n",
      "training 7 epoch, loss is 40.5070935096647\n",
      "training 8 epoch, loss is 42.10371161382701\n",
      "training 9 epoch, loss is 45.78227608841056\n",
      "training 0 epoch, loss is 11.537276750281231\n",
      "training 1 epoch, loss is 19.367223356505\n",
      "training 2 epoch, loss is 11.113811786156646\n",
      "training 3 epoch, loss is 8.80384355898427\n",
      "training 4 epoch, loss is 7.826623006991888\n",
      "training 5 epoch, loss is 7.566806723084577\n",
      "training 6 epoch, loss is 7.4216833645229165\n",
      "training 7 epoch, loss is 7.207464562303412\n",
      "training 8 epoch, loss is 7.1086298374774195\n",
      "training 9 epoch, loss is 7.06192842486262\n",
      "training 0 epoch, loss is 27.80598406506929\n",
      "training 1 epoch, loss is 37.54379155046862\n",
      "training 2 epoch, loss is 44.71332338332953\n",
      "training 3 epoch, loss is 47.53382957463625\n",
      "training 4 epoch, loss is 45.910814503843426\n",
      "training 5 epoch, loss is 53.79784838218541\n",
      "training 6 epoch, loss is 56.66835947193272\n",
      "training 7 epoch, loss is 40.438671001948926\n",
      "training 8 epoch, loss is 41.58692148199598\n",
      "training 9 epoch, loss is 40.60066631400974\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:824: UserWarning: Scoring failed. The score on this train-test partition for these parameters will be set to nan. Details: \n",
      "Traceback (most recent call last):\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py\", line 813, in _score\n",
      "    scores = scorer(estimator, X_test, y_test)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/metrics/_scorer.py\", line 527, in __call__\n",
      "    return estimator.score(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_2266868/2548952913.py\", line 64, in score\n",
      "    y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/rnn.py\", line 1102, in forward\n",
      "    result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,\n",
      "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 114.00 MiB. GPU 0 has a total capacty of 23.68 GiB of which 16.94 MiB is free. Including non-PyTorch memory, this process has 23.65 GiB memory in use. Of the allocated memory 21.07 GiB is allocated by PyTorch, and 2.25 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
      "\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 16.75664266368004\n",
      "training 1 epoch, loss is 52.955193108196426\n",
      "training 2 epoch, loss is 47.57979163465585\n",
      "training 3 epoch, loss is 47.47228533966959\n",
      "training 4 epoch, loss is 47.486719318950215\n",
      "training 5 epoch, loss is 47.203122270920865\n",
      "training 6 epoch, loss is 48.857043958205495\n",
      "training 7 epoch, loss is 51.19503007303006\n",
      "training 8 epoch, loss is 50.78017503536066\n",
      "training 9 epoch, loss is 50.09117979253435\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:824: UserWarning: Scoring failed. The score on this train-test partition for these parameters will be set to nan. Details: \n",
      "Traceback (most recent call last):\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py\", line 813, in _score\n",
      "    scores = scorer(estimator, X_test, y_test)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/metrics/_scorer.py\", line 527, in __call__\n",
      "    return estimator.score(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_2266868/2548952913.py\", line 64, in score\n",
      "    y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/rnn.py\", line 1102, in forward\n",
      "    result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,\n",
      "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 114.00 MiB. GPU 0 has a total capacty of 23.68 GiB of which 18.94 MiB is free. Including non-PyTorch memory, this process has 23.65 GiB memory in use. Of the allocated memory 21.05 GiB is allocated by PyTorch, and 2.27 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
      "\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 20.37492224212077\n",
      "training 1 epoch, loss is 42.73420308587898\n",
      "training 2 epoch, loss is 52.25611118722104\n",
      "training 3 epoch, loss is 47.1339475157578\n",
      "training 4 epoch, loss is 47.690738461657055\n",
      "training 5 epoch, loss is 49.01155981309657\n",
      "training 6 epoch, loss is 49.534231818935666\n",
      "training 7 epoch, loss is 52.53993277024771\n",
      "training 8 epoch, loss is 52.833731112561644\n",
      "training 9 epoch, loss is 50.41819934318891\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:824: UserWarning: Scoring failed. The score on this train-test partition for these parameters will be set to nan. Details: \n",
      "Traceback (most recent call last):\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py\", line 813, in _score\n",
      "    scores = scorer(estimator, X_test, y_test)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/metrics/_scorer.py\", line 527, in __call__\n",
      "    return estimator.score(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_2266868/2548952913.py\", line 64, in score\n",
      "    y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/rnn.py\", line 1102, in forward\n",
      "    result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,\n",
      "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 126.00 MiB. GPU 0 has a total capacty of 23.68 GiB of which 80.94 MiB is free. Including non-PyTorch memory, this process has 23.59 GiB memory in use. Of the allocated memory 21.13 GiB is allocated by PyTorch, and 2.13 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
      "\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 12.154194646425003\n",
      "training 1 epoch, loss is 35.97467530358245\n",
      "training 2 epoch, loss is 50.253897835560046\n",
      "training 3 epoch, loss is 48.47284263310426\n",
      "training 4 epoch, loss is 48.43199655488908\n",
      "training 5 epoch, loss is 47.07775771600176\n",
      "training 6 epoch, loss is 46.33859790381138\n",
      "training 7 epoch, loss is 47.075593519706786\n",
      "training 8 epoch, loss is 44.57797982894648\n",
      "training 9 epoch, loss is 42.63308858481771\n",
      "training 0 epoch, loss is 29.252041264837857\n",
      "training 1 epoch, loss is 52.364606212028676\n",
      "training 2 epoch, loss is 66.41600061327397\n",
      "training 3 epoch, loss is 63.696875432677544\n",
      "training 4 epoch, loss is 63.67411181383741\n",
      "training 5 epoch, loss is 63.67386368261954\n",
      "training 6 epoch, loss is 64.55789303865272\n",
      "training 7 epoch, loss is 67.71400765893367\n",
      "training 8 epoch, loss is 70.23667695145579\n",
      "training 9 epoch, loss is 69.40756368413788\n",
      "training 0 epoch, loss is 14.752495157070879\n",
      "training 1 epoch, loss is 62.17336085712874\n",
      "training 2 epoch, loss is 61.40449204572231\n",
      "training 3 epoch, loss is 61.13891212310565\n",
      "training 4 epoch, loss is 60.5407503203044\n",
      "training 5 epoch, loss is 53.860692948191385\n",
      "training 6 epoch, loss is 18.46898906454665\n",
      "training 7 epoch, loss is 11.184751320044436\n",
      "training 8 epoch, loss is 10.904280333373508\n",
      "training 9 epoch, loss is 11.247560187809457\n",
      "training 0 epoch, loss is 116.14674212905172\n",
      "training 1 epoch, loss is 44.00452957096722\n",
      "training 2 epoch, loss is 42.68981288842171\n",
      "training 3 epoch, loss is 47.62182170926464\n",
      "training 4 epoch, loss is 50.179591317890484\n",
      "training 5 epoch, loss is 51.21514932119263\n",
      "training 6 epoch, loss is 52.57751682254787\n",
      "training 7 epoch, loss is 52.92081412894082\n",
      "training 8 epoch, loss is 51.81910465558222\n",
      "training 9 epoch, loss is 54.33752623629322\n",
      "training 0 epoch, loss is 14.514742118701648\n",
      "training 1 epoch, loss is 38.108949351381476\n",
      "training 2 epoch, loss is 42.2516481883448\n",
      "training 3 epoch, loss is 41.47994633916578\n",
      "training 4 epoch, loss is 44.189622151391795\n",
      "training 5 epoch, loss is 44.29476012214355\n",
      "training 6 epoch, loss is 43.78614472776917\n",
      "training 7 epoch, loss is 43.49522947416107\n",
      "training 8 epoch, loss is 42.51891834003635\n",
      "training 9 epoch, loss is 41.951172426475615\n",
      "training 0 epoch, loss is 187.84102868750824\n",
      "training 1 epoch, loss is 115.03529489500232\n",
      "training 2 epoch, loss is 110.38879427004285\n",
      "training 3 epoch, loss is 108.10702608459427\n",
      "training 4 epoch, loss is 108.10700203686157\n",
      "training 5 epoch, loss is 108.1069979575692\n",
      "training 6 epoch, loss is 108.10699742201174\n",
      "training 7 epoch, loss is 108.10699740786227\n",
      "training 8 epoch, loss is 108.10699740786227\n",
      "training 9 epoch, loss is 108.10699740786227\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:824: UserWarning: Scoring failed. The score on this train-test partition for these parameters will be set to nan. Details: \n",
      "Traceback (most recent call last):\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py\", line 813, in _score\n",
      "    scores = scorer(estimator, X_test, y_test)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/metrics/_scorer.py\", line 527, in __call__\n",
      "    return estimator.score(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_2266868/2548952913.py\", line 64, in score\n",
      "    y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/rnn.py\", line 1102, in forward\n",
      "    result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,\n",
      "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 23.68 GiB of which 114.94 MiB is free. Including non-PyTorch memory, this process has 23.55 GiB memory in use. Of the allocated memory 23.10 GiB is allocated by PyTorch, and 127.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
      "\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 60.107937331026754\n",
      "training 1 epoch, loss is 30.92481616077508\n",
      "training 2 epoch, loss is 19.889117012679222\n",
      "training 3 epoch, loss is 13.93094612534216\n",
      "training 4 epoch, loss is 12.685301787919125\n",
      "training 5 epoch, loss is 10.587866255411054\n",
      "training 6 epoch, loss is 9.776011150009731\n",
      "training 7 epoch, loss is 9.39277836304567\n",
      "training 8 epoch, loss is 9.193916492650523\n",
      "training 9 epoch, loss is 9.183955994078154\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:824: UserWarning: Scoring failed. The score on this train-test partition for these parameters will be set to nan. Details: \n",
      "Traceback (most recent call last):\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py\", line 813, in _score\n",
      "    scores = scorer(estimator, X_test, y_test)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/metrics/_scorer.py\", line 527, in __call__\n",
      "    return estimator.score(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_2266868/2548952913.py\", line 64, in score\n",
      "    y_hat = self.fc(self.gru(x)[0][:, -1:, :].squeeze(1))\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n",
      "    return self._call_impl(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/xiangyu/miniconda3/envs/top1hik/lib/python3.9/site-packages/torch/nn/modules/rnn.py\", line 1102, in forward\n",
      "    result = _VF.gru(input, hx, self._flat_weights, self.bias, self.num_layers,\n",
      "torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 23.68 GiB of which 114.94 MiB is free. Including non-PyTorch memory, this process has 23.55 GiB memory in use. Of the allocated memory 23.10 GiB is allocated by PyTorch, and 128.41 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
      "\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 41.42911683295315\n",
      "training 1 epoch, loss is 35.88155953322619\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[20], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mgridsearch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtarget_data\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/base.py:1151\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[0;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1144\u001b[0m     estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[1;32m   1146\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[1;32m   1147\u001b[0m     skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[1;32m   1148\u001b[0m         prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[1;32m   1149\u001b[0m     )\n\u001b[1;32m   1150\u001b[0m ):\n\u001b[0;32m-> 1151\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_search.py:898\u001b[0m, in \u001b[0;36mBaseSearchCV.fit\u001b[0;34m(self, X, y, groups, **fit_params)\u001b[0m\n\u001b[1;32m    892\u001b[0m     results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_format_results(\n\u001b[1;32m    893\u001b[0m         all_candidate_params, n_splits, all_out, all_more_results\n\u001b[1;32m    894\u001b[0m     )\n\u001b[1;32m    896\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m results\n\u001b[0;32m--> 898\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run_search\u001b[49m\u001b[43m(\u001b[49m\u001b[43mevaluate_candidates\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    900\u001b[0m \u001b[38;5;66;03m# multimetric is determined here because in the case of a callable\u001b[39;00m\n\u001b[1;32m    901\u001b[0m \u001b[38;5;66;03m# self.scoring the return type is only known after calling\u001b[39;00m\n\u001b[1;32m    902\u001b[0m first_test_score \u001b[38;5;241m=\u001b[39m all_out[\u001b[38;5;241m0\u001b[39m][\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtest_scores\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_search.py:1419\u001b[0m, in \u001b[0;36mGridSearchCV._run_search\u001b[0;34m(self, evaluate_candidates)\u001b[0m\n\u001b[1;32m   1417\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_run_search\u001b[39m(\u001b[38;5;28mself\u001b[39m, evaluate_candidates):\n\u001b[1;32m   1418\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Search all candidates in param_grid\"\"\"\u001b[39;00m\n\u001b[0;32m-> 1419\u001b[0m     \u001b[43mevaluate_candidates\u001b[49m\u001b[43m(\u001b[49m\u001b[43mParameterGrid\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparam_grid\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_search.py:845\u001b[0m, in \u001b[0;36mBaseSearchCV.fit.<locals>.evaluate_candidates\u001b[0;34m(candidate_params, cv, more_results)\u001b[0m\n\u001b[1;32m    837\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m    838\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\n\u001b[1;32m    839\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFitting \u001b[39m\u001b[38;5;132;01m{0}\u001b[39;00m\u001b[38;5;124m folds for each of \u001b[39m\u001b[38;5;132;01m{1}\u001b[39;00m\u001b[38;5;124m candidates,\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    840\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m totalling \u001b[39m\u001b[38;5;132;01m{2}\u001b[39;00m\u001b[38;5;124m fits\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\n\u001b[1;32m    841\u001b[0m             n_splits, n_candidates, n_candidates \u001b[38;5;241m*\u001b[39m n_splits\n\u001b[1;32m    842\u001b[0m         )\n\u001b[1;32m    843\u001b[0m     )\n\u001b[0;32m--> 845\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[43mparallel\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    846\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdelayed\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_fit_and_score\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    847\u001b[0m \u001b[43m        \u001b[49m\u001b[43mclone\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbase_estimator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    848\u001b[0m \u001b[43m        \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    849\u001b[0m \u001b[43m        \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    850\u001b[0m \u001b[43m        \u001b[49m\u001b[43mtrain\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    851\u001b[0m \u001b[43m        \u001b[49m\u001b[43mtest\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtest\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    852\u001b[0m \u001b[43m        \u001b[49m\u001b[43mparameters\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mparameters\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    853\u001b[0m \u001b[43m        \u001b[49m\u001b[43msplit_progress\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msplit_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_splits\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    854\u001b[0m \u001b[43m        \u001b[49m\u001b[43mcandidate_progress\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcand_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_candidates\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    855\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfit_and_score_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    856\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    857\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mcand_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mparameters\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43msplit_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mproduct\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    858\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43menumerate\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcandidate_params\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43menumerate\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mcv\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msplit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    859\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    860\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    862\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(out) \u001b[38;5;241m<\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m    863\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m    864\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNo fits were performed. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    865\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWas the CV iterator empty? \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    866\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWere there no candidates?\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m    867\u001b[0m     )\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/utils/parallel.py:65\u001b[0m, in \u001b[0;36mParallel.__call__\u001b[0;34m(self, iterable)\u001b[0m\n\u001b[1;32m     60\u001b[0m config \u001b[38;5;241m=\u001b[39m get_config()\n\u001b[1;32m     61\u001b[0m iterable_with_config \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m     62\u001b[0m     (_with_config(delayed_func, config), args, kwargs)\n\u001b[1;32m     63\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m delayed_func, args, kwargs \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[1;32m     64\u001b[0m )\n\u001b[0;32m---> 65\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43miterable_with_config\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/parallel.py:1088\u001b[0m, in \u001b[0;36mParallel.__call__\u001b[0;34m(self, iterable)\u001b[0m\n\u001b[1;32m   1085\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdispatch_one_batch(iterator):\n\u001b[1;32m   1086\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_iterating \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_original_iterator \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m-> 1088\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdispatch_one_batch\u001b[49m\u001b[43m(\u001b[49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m:\n\u001b[1;32m   1089\u001b[0m     \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[1;32m   1091\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pre_dispatch \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mall\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m n_jobs \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m   1092\u001b[0m     \u001b[38;5;66;03m# The iterable was consumed all at once by the above for loop.\u001b[39;00m\n\u001b[1;32m   1093\u001b[0m     \u001b[38;5;66;03m# No need to wait for async callbacks to trigger to\u001b[39;00m\n\u001b[1;32m   1094\u001b[0m     \u001b[38;5;66;03m# consumption.\u001b[39;00m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/parallel.py:901\u001b[0m, in \u001b[0;36mParallel.dispatch_one_batch\u001b[0;34m(self, iterator)\u001b[0m\n\u001b[1;32m    899\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m    900\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 901\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_dispatch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtasks\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    902\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/parallel.py:819\u001b[0m, in \u001b[0;36mParallel._dispatch\u001b[0;34m(self, batch)\u001b[0m\n\u001b[1;32m    817\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock:\n\u001b[1;32m    818\u001b[0m     job_idx \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_jobs)\n\u001b[0;32m--> 819\u001b[0m     job \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_backend\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mapply_async\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatch\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallback\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    820\u001b[0m     \u001b[38;5;66;03m# A job can complete so quickly than its callback is\u001b[39;00m\n\u001b[1;32m    821\u001b[0m     \u001b[38;5;66;03m# called before we get here, causing self._jobs to\u001b[39;00m\n\u001b[1;32m    822\u001b[0m     \u001b[38;5;66;03m# grow. To ensure correct results ordering, .insert is\u001b[39;00m\n\u001b[1;32m    823\u001b[0m     \u001b[38;5;66;03m# used (rather than .append) in the following line\u001b[39;00m\n\u001b[1;32m    824\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_jobs\u001b[38;5;241m.\u001b[39minsert(job_idx, job)\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/_parallel_backends.py:208\u001b[0m, in \u001b[0;36mSequentialBackend.apply_async\u001b[0;34m(self, func, callback)\u001b[0m\n\u001b[1;32m    206\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mapply_async\u001b[39m(\u001b[38;5;28mself\u001b[39m, func, callback\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m):\n\u001b[1;32m    207\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Schedule a func to be run\"\"\"\u001b[39;00m\n\u001b[0;32m--> 208\u001b[0m     result \u001b[38;5;241m=\u001b[39m \u001b[43mImmediateResult\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    209\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m callback:\n\u001b[1;32m    210\u001b[0m         callback(result)\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/_parallel_backends.py:597\u001b[0m, in \u001b[0;36mImmediateResult.__init__\u001b[0;34m(self, batch)\u001b[0m\n\u001b[1;32m    594\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, batch):\n\u001b[1;32m    595\u001b[0m     \u001b[38;5;66;03m# Don't delay the application, to avoid keeping the input\u001b[39;00m\n\u001b[1;32m    596\u001b[0m     \u001b[38;5;66;03m# arguments in memory\u001b[39;00m\n\u001b[0;32m--> 597\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mresults \u001b[38;5;241m=\u001b[39m \u001b[43mbatch\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/parallel.py:288\u001b[0m, in \u001b[0;36mBatchedCalls.__call__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    284\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    285\u001b[0m     \u001b[38;5;66;03m# Set the default nested backend to self._backend but do not set the\u001b[39;00m\n\u001b[1;32m    286\u001b[0m     \u001b[38;5;66;03m# change the default number of processes to -1\u001b[39;00m\n\u001b[1;32m    287\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m parallel_backend(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backend, n_jobs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_n_jobs):\n\u001b[0;32m--> 288\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m [func(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m    289\u001b[0m                 \u001b[38;5;28;01mfor\u001b[39;00m func, args, kwargs \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mitems]\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/joblib/parallel.py:288\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m    284\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    285\u001b[0m     \u001b[38;5;66;03m# Set the default nested backend to self._backend but do not set the\u001b[39;00m\n\u001b[1;32m    286\u001b[0m     \u001b[38;5;66;03m# change the default number of processes to -1\u001b[39;00m\n\u001b[1;32m    287\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m parallel_backend(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backend, n_jobs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_n_jobs):\n\u001b[0;32m--> 288\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m [\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    289\u001b[0m                 \u001b[38;5;28;01mfor\u001b[39;00m func, args, kwargs \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mitems]\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/utils/parallel.py:127\u001b[0m, in \u001b[0;36m_FuncWrapper.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m    125\u001b[0m     config \u001b[38;5;241m=\u001b[39m {}\n\u001b[1;32m    126\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mconfig):\n\u001b[0;32m--> 127\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/miniconda3/envs/top1hik/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:732\u001b[0m, in \u001b[0;36m_fit_and_score\u001b[0;34m(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, split_progress, candidate_progress, error_score)\u001b[0m\n\u001b[1;32m    730\u001b[0m         estimator\u001b[38;5;241m.\u001b[39mfit(X_train, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mfit_params)\n\u001b[1;32m    731\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 732\u001b[0m         \u001b[43mestimator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfit_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    734\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m:\n\u001b[1;32m    735\u001b[0m     \u001b[38;5;66;03m# Note fit time as time until error\u001b[39;00m\n\u001b[1;32m    736\u001b[0m     fit_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n",
      "Cell \u001b[0;32mIn[7], line 25\u001b[0m, in \u001b[0;36mGRU_Estimator.fit\u001b[0;34m(self, X, y)\u001b[0m\n\u001b[1;32m     22\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcriterion \u001b[38;5;241m=\u001b[39m nn\u001b[38;5;241m.\u001b[39mMSELoss()\n\u001b[1;32m     24\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mepoch):\n\u001b[0;32m---> 25\u001b[0m     loss \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_epoch\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     26\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtraining \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m epoch, loss is \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mloss\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m     27\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\n",
      "Cell \u001b[0;32mIn[7], line 35\u001b[0m, in \u001b[0;36mGRU_Estimator.train_epoch\u001b[0;34m(self, X, y)\u001b[0m\n\u001b[1;32m     32\u001b[0m trainlen \u001b[38;5;241m=\u001b[39m trainlen \u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch\n\u001b[1;32m     33\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m0\u001b[39m, trainlen, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch):\n\u001b[1;32m     34\u001b[0m     inputs \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstack(\n\u001b[0;32m---> 35\u001b[0m         [torch\u001b[38;5;241m.\u001b[39mtensor(X[j:j \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseqlen], dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mfloat32)\u001b[38;5;241m.\u001b[39mto(device) \u001b[38;5;28;01mfor\u001b[39;00m j \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch)]\n\u001b[1;32m     36\u001b[0m     )\n\u001b[1;32m     37\u001b[0m     targets \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstack(\n\u001b[1;32m     38\u001b[0m         [torch\u001b[38;5;241m.\u001b[39mtensor(y[j \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseqlen:j \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseqlen \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m], dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mfloat32)\u001b[38;5;241m.\u001b[39mto(device) \u001b[38;5;28;01mfor\u001b[39;00m j \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch)]\n\u001b[1;32m     39\u001b[0m     )\n\u001b[1;32m     40\u001b[0m     total_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtrain_step(inputs, targets)\n",
      "Cell \u001b[0;32mIn[7], line 35\u001b[0m, in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m     32\u001b[0m trainlen \u001b[38;5;241m=\u001b[39m trainlen \u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch\n\u001b[1;32m     33\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m0\u001b[39m, trainlen, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch):\n\u001b[1;32m     34\u001b[0m     inputs \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstack(\n\u001b[0;32m---> 35\u001b[0m         [\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtensor\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m[\u001b[49m\u001b[43mj\u001b[49m\u001b[43m:\u001b[49m\u001b[43mj\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mseqlen\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfloat32\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m j \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch)]\n\u001b[1;32m     36\u001b[0m     )\n\u001b[1;32m     37\u001b[0m     targets \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstack(\n\u001b[1;32m     38\u001b[0m         [torch\u001b[38;5;241m.\u001b[39mtensor(y[j \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseqlen:j \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mseqlen \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m], dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mfloat32)\u001b[38;5;241m.\u001b[39mto(device) \u001b[38;5;28;01mfor\u001b[39;00m j \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(i, i \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch)]\n\u001b[1;32m     39\u001b[0m     )\n\u001b[1;32m     40\u001b[0m     total_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtrain_step(inputs, targets)\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "gridsearch.fit(train_data, target_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training 0 epoch, loss is 0.4195036465377016\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<style>#sk-container-id-2 {color: black;}#sk-container-id-2 pre{padding: 0;}#sk-container-id-2 div.sk-toggleable {background-color: white;}#sk-container-id-2 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-2 label.sk-toggleable__label-arrow:before {content: \"▸\";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-2 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-2 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-2 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-2 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-2 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-2 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: \"▾\";}#sk-container-id-2 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-2 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-2 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-2 div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-2 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-2 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-2 div.sk-item {position: relative;z-index: 1;}#sk-container-id-2 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-2 div.sk-item::before, #sk-container-id-2 div.sk-parallel-item::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-2 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-2 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-2 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-2 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-2 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-2 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-2 div.sk-label-container {text-align: center;}#sk-container-id-2 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-2 div.sk-text-repr-fallback {display: none;}</style><div id=\"sk-container-id-2\" class=\"sk-top-container\"><div class=\"sk-text-repr-fallback\"><pre>GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class=\"sk-container\" hidden><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-2\" type=\"checkbox\" checked><label for=\"sk-estimator-id-2\" class=\"sk-toggleable__label sk-toggleable__label-arrow\">GRU_Estimator</label><div class=\"sk-toggleable__content\"><pre>GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)</pre></div></div></div></div></div>"
      ],
      "text/plain": [
       "GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(train_data, target_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<style>#sk-container-id-2 {color: black;}#sk-container-id-2 pre{padding: 0;}#sk-container-id-2 div.sk-toggleable {background-color: white;}#sk-container-id-2 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-2 label.sk-toggleable__label-arrow:before {content: \"▸\";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-2 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-2 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-2 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-2 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-2 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-2 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: \"▾\";}#sk-container-id-2 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-2 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-2 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-2 div.sk-parallel-item::after {content: \"\";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-2 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-serial::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-2 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-2 div.sk-item {position: relative;z-index: 1;}#sk-container-id-2 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-2 div.sk-item::before, #sk-container-id-2 div.sk-parallel-item::before {content: \"\";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-2 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-2 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-2 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-2 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-2 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-2 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-2 div.sk-label-container {text-align: center;}#sk-container-id-2 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-2 div.sk-text-repr-fallback {display: none;}</style><div id=\"sk-container-id-2\" class=\"sk-top-container\"><div class=\"sk-text-repr-fallback\"><pre>GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class=\"sk-container\" hidden><div class=\"sk-item\"><div class=\"sk-estimator sk-toggleable\"><input class=\"sk-toggleable__control sk-hidden--visually\" id=\"sk-estimator-id-4\" type=\"checkbox\" checked><label for=\"sk-estimator-id-4\" class=\"sk-toggleable__label sk-toggleable__label-arrow\">GRU_Estimator</label><div class=\"sk-toggleable__content\"><pre>GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)</pre></div></div></div></div></div>"
      ],
      "text/plain": [
       "GRU_Estimator(epoch=1, hidden_size=8, lr=0.001, num_layers=1, seqlen=60)"
      ]
     },
     "execution_count": 117,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gridsearch.best_estimator_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test = torch.stack(\n",
    "    [torch.tensor(train_data[j:j + 60], dtype=torch.float32).to(device) for j in range(0, 1)]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.4490]], device='cuda:0', grad_fn=<AddmmBackward0>)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.predict(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7.92"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "target_data[60]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "top1hik",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
