{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "35 35\n"
     ]
    }
   ],
   "source": [
    "# pip install openpyxl -i https://pypi.tuna.tsinghua.edu.cn/simple/\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import torch\n",
    "from torch import nn\n",
    "import torch.utils.data as data\n",
    "import torch.nn.functional as F\n",
    "from torch import tensor\n",
    "import torch.utils.data as Data\n",
    "import math\n",
    "from matplotlib import pyplot\n",
    "from datetime import datetime, timedelta\n",
    "from sklearn.model_selection import train_test_split\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import math\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号\n",
    "\n",
    "# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "device = torch.device(\"cpu\")\n",
    "# print(torch.cuda.is_available())\n",
    "data = pd.read_csv(\"1.csv\")  # 1 3 7 是 预测列\n",
    "data.dropna(axis=0, how='any')\n",
    "# data = data.fillna(0)\n",
    "# print(data.head())\n",
    "# print(data.columns)\n",
    "data_x = data[\n",
    "    ['Weekly_Sales', 'Holiday_Flag', 'Temperature', 'Fuel_Price','CPI', 'Unemployment', ]].values\n",
    "data_y = data[[ 'Weekly_Sales']].values\n",
    "\n",
    "# print(len(data_y))\n",
    "# 四个数据划分为一组 用前三个预测后一个\n",
    "data_4_x = []\n",
    "data_4_y = []\n",
    "for i in range(0, len(data_y) - 4, 4):\n",
    "    data_4_x.append(data_x[i:i + 3])\n",
    "    data_4_y.append(data_y[i + 4])\n",
    "print(len(data_4_x), len(data_4_y))\n",
    "x_train, x_test, y_train, y_test = train_test_split(np.array(data_4_x), np.array(data_4_y), test_size=0.2)\n",
    "\n",
    "class DataSet(Data.Dataset):\n",
    "    def __init__(self, data_inputs, data_targets):\n",
    "        self.inputs = torch.FloatTensor(data_inputs)\n",
    "        self.label = torch.FloatTensor(data_targets)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.inputs[index], self.label[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.inputs)\n",
    "\n",
    "\n",
    "Batch_Size = 8  #\n",
    "DataSet = DataSet(np.array(x_train), list(y_train))\n",
    "train_size = int(len(x_train) * 0.8)\n",
    "test_size = len(y_train) - train_size\n",
    "train_dataset, test_dataset = torch.utils.data.random_split(DataSet, [train_size, test_size])\n",
    "TrainDataLoader = Data.DataLoader(train_dataset, batch_size=Batch_Size, shuffle=True, drop_last=True)\n",
    "TestDataLoader = Data.DataLoader(test_dataset, batch_size=Batch_Size, shuffle=True, drop_last=True)\n",
    "\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_len=5000):\n",
    "        super(PositionalEncoding, self).__init__()\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n",
    "        pe[:, 0::2] = torch.sin(position * div_term)\n",
    "        pe[:, 1::2] = torch.cos(position * div_term)\n",
    "        pe = pe.unsqueeze(0).transpose(0, 1)\n",
    "        # pe.requires_grad = False\n",
    "        self.register_buffer('pe', pe)\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        chunk = x.chunk(x.size(-1), dim=2)\n",
    "        out = torch.Tensor([]).to(x.device)\n",
    "        for i in range(len(chunk)):\n",
    "            out = torch.cat((out, chunk[i] + self.pe[:chunk[i].size(0), ...]), dim=2)\n",
    "        return out\n",
    "\n",
    "\n",
    "def transformer_generate_tgt_mask(length, device):\n",
    "    mask = torch.tril(torch.ones(length, length, device=device)) == 1\n",
    "    mask = (\n",
    "        mask.float()\n",
    "        .masked_fill(mask == 0, float(\"-inf\"))\n",
    "        .masked_fill(mask == 1, float(0.0))\n",
    "    )\n",
    "    return mask\n",
    "\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    \"\"\"标准的Transformer编码器-解码器结构\"\"\"\n",
    "\n",
    "    def __init__(self, n_encoder_inputs, n_decoder_inputs, Sequence_length, d_model=512, dropout=0.1, num_layer=8):\n",
    "        \"\"\"\n",
    "        初始化\n",
    "        :param n_encoder_inputs:    输入数据的特征维度\n",
    "        :param n_decoder_inputs:    编码器输入的特征维度，其实等于编码器输出的特征维度\n",
    "        :param d_model:             词嵌入特征维度\n",
    "        :param dropout:             dropout\n",
    "        :param num_layer:           Transformer块的个数\n",
    "         Sequence_length:           transformer 输入数据 序列的长度\n",
    "        \"\"\"\n",
    "        super(Transformer, self).__init__()\n",
    "\n",
    "        self.input_pos_embedding = torch.nn.Embedding(5000, embedding_dim=d_model)\n",
    "        self.target_pos_embedding = torch.nn.Embedding(5000, embedding_dim=d_model)\n",
    "\n",
    "        encoder_layer = torch.nn.TransformerEncoderLayer(d_model=d_model, nhead=8, dropout=dropout,\n",
    "                                                         dim_feedforward=4 * d_model)\n",
    "        decoder_layer = torch.nn.TransformerDecoderLayer(d_model=d_model, nhead=8, dropout=dropout,\n",
    "                                                         dim_feedforward=4 * d_model)\n",
    "\n",
    "        self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=2)\n",
    "        self.decoder = torch.nn.TransformerDecoder(decoder_layer, num_layers=4)\n",
    "\n",
    "        self.input_projection = torch.nn.Linear(n_encoder_inputs, d_model)\n",
    "        self.output_projection = torch.nn.Linear(n_decoder_inputs, d_model)\n",
    "\n",
    "        self.linear = torch.nn.Linear(d_model, 1)\n",
    "        self.ziji_add_linear = torch.nn.Linear(Sequence_length, 1)\n",
    "\n",
    "    def encode_in(self, src):\n",
    "        src_start = self.input_projection(src).permute(1, 0, 2)\n",
    "        in_sequence_len, batch_size = src_start.size(0), src_start.size(1)\n",
    "        pos_encoder = (torch.arange(0, in_sequence_len, device=src.device).unsqueeze(0).repeat(batch_size, 1))\n",
    "        pos_encoder = self.input_pos_embedding(pos_encoder).permute(1, 0, 2)\n",
    "        src = src_start + pos_encoder\n",
    "        src = self.encoder(src) + src_start\n",
    "        return src\n",
    "\n",
    "    def decode_out(self, tgt, memory):\n",
    "        tgt_start = self.output_projection(tgt).permute(1, 0, 2)\n",
    "        out_sequence_len, batch_size = tgt_start.size(0), tgt_start.size(1)\n",
    "        pos_decoder = (torch.arange(0, out_sequence_len, device=tgt.device).unsqueeze(0).repeat(batch_size, 1))\n",
    "        pos_decoder = self.target_pos_embedding(pos_decoder).permute(1, 0, 2)\n",
    "        tgt = tgt_start + pos_decoder\n",
    "        tgt_mask = transformer_generate_tgt_mask(out_sequence_len, tgt.device)\n",
    "        out = self.decoder(tgt=tgt, memory=memory, tgt_mask=tgt_mask) + tgt_start\n",
    "        out = out.permute(1, 0, 2)  # [batch_size, seq_len, d_model]\n",
    "        out = self.linear(out)\n",
    "        return out\n",
    "\n",
    "    def forward(self, src, target_in):\n",
    "        src = self.encode_in(src)\n",
    "        out = self.decode_out(tgt=target_in, memory=src)\n",
    "        # print(\"out.shape:\",out.shape)# torch.Size([batch, 3, 1]) # 原本代码中的输出\n",
    "        # 上边的这个输入可以用于很多任务的输出 可以根据任务进行自由的变换\n",
    "        # 下面是自己修改的\n",
    "        # 使用全连接变成 [batch,1] 构成了基于transformer的回归单值预测\n",
    "        out = out.squeeze(2)\n",
    "        out = self.ziji_add_linear(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "model = Transformer(n_encoder_inputs=6, n_decoder_inputs=6, Sequence_length=3).to(device)  # 3 表示Sequence_length  transformer 输入数据 序列的长度\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "start_time": "2023-08-16T23:18:33.965459Z",
     "end_time": "2023-08-16T23:18:37.379545Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [
    {
     "data": {
      "text/plain": "Transformer(\n  (input_pos_embedding): Embedding(5000, 512)\n  (target_pos_embedding): Embedding(5000, 512)\n  (encoder): TransformerEncoder(\n    (layers): ModuleList(\n      (0-1): 2 x TransformerEncoderLayer(\n        (self_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n        (dropout): Dropout(p=0.1, inplace=False)\n        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (dropout1): Dropout(p=0.1, inplace=False)\n        (dropout2): Dropout(p=0.1, inplace=False)\n      )\n    )\n  )\n  (decoder): TransformerDecoder(\n    (layers): ModuleList(\n      (0-3): 4 x TransformerDecoderLayer(\n        (self_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (multihead_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n        (dropout): Dropout(p=0.1, inplace=False)\n        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm3): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (dropout1): Dropout(p=0.1, inplace=False)\n        (dropout2): Dropout(p=0.1, inplace=False)\n        (dropout3): Dropout(p=0.1, inplace=False)\n      )\n    )\n  )\n  (input_projection): Linear(in_features=6, out_features=512, bias=True)\n  (output_projection): Linear(in_features=6, out_features=512, bias=True)\n  (linear): Linear(in_features=512, out_features=1, bias=True)\n  (ziji_add_linear): Linear(in_features=3, out_features=1, bias=True)\n)"
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "start_time": "2023-08-16T23:18:37.347543Z",
     "end_time": "2023-08-16T23:18:37.421542Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/500 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.3457e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3213e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 1/500 [00:00<06:10,  1.35it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 train_epoch_loss: [2345676570624.0, 2321270702080.0] val_epoch_loss: 2406094209024.0\n",
      "loss: tensor(2.3195e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3999e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 2/500 [00:01<05:48,  1.43it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 1 train_epoch_loss: [2319486025728.0, 2399927795712.0] val_epoch_loss: 2397515939840.0\n",
      "loss: tensor(2.4286e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3746e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|          | 3/500 [00:01<05:19,  1.56it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 2 train_epoch_loss: [2428618932224.0, 2374607568896.0] val_epoch_loss: 2380857606144.0\n",
      "loss: tensor(2.3464e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.5159e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|          | 4/500 [00:02<04:51,  1.70it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 3 train_epoch_loss: [2346358931456.0, 2515873038336.0] val_epoch_loss: 2359275945984.0\n",
      "loss: tensor(2.3623e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2842e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|          | 5/500 [00:03<04:54,  1.68it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 4 train_epoch_loss: [2362254819328.0, 2284168413184.0] val_epoch_loss: 2335788367872.0\n",
      "loss: tensor(2.3746e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2761e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|          | 6/500 [00:03<04:59,  1.65it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 5 train_epoch_loss: [2374649249792.0, 2276063182848.0] val_epoch_loss: 2328546902016.0\n",
      "loss: tensor(2.3817e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3354e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|▏         | 7/500 [00:04<04:52,  1.68it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 6 train_epoch_loss: [2381683359744.0, 2335394758656.0] val_epoch_loss: 2394754121728.0\n",
      "loss: tensor(2.3485e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4054e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▏         | 8/500 [00:04<04:50,  1.70it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 7 train_epoch_loss: [2348536823808.0, 2405368070144.0] val_epoch_loss: 2308122083328.0\n",
      "loss: tensor(2.3648e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3642e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▏         | 9/500 [00:05<04:47,  1.71it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 8 train_epoch_loss: [2364770615296.0, 2364221423616.0] val_epoch_loss: 2398376689664.0\n",
      "loss: tensor(2.4510e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2111e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▏         | 10/500 [00:05<04:38,  1.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 9 train_epoch_loss: [2450969853952.0, 2211074801664.0] val_epoch_loss: 2381160513536.0\n",
      "loss: tensor(2.2888e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4258e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▏         | 11/500 [00:06<04:26,  1.83it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 10 train_epoch_loss: [2288803905536.0, 2425774669824.0] val_epoch_loss: 2403045867520.0\n",
      "loss: tensor(2.3356e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3803e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▏         | 12/500 [00:07<04:28,  1.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 11 train_epoch_loss: [2335576424448.0, 2380253888512.0] val_epoch_loss: 2368911704064.0\n",
      "loss: tensor(2.2943e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.5721e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|▎         | 13/500 [00:07<04:16,  1.90it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 12 train_epoch_loss: [2294298443776.0, 2572117344256.0] val_epoch_loss: 2302954438656.0\n",
      "loss: tensor(2.3383e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3367e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|▎         | 14/500 [00:08<04:12,  1.92it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 13 train_epoch_loss: [2338289352704.0, 2336695779328.0] val_epoch_loss: 2312417443840.0\n",
      "loss: tensor(2.2097e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3519e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|▎         | 15/500 [00:08<04:09,  1.94it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 14 train_epoch_loss: [2209714012160.0, 2351944171520.0] val_epoch_loss: 2388590460928.0\n",
      "loss: tensor(2.3563e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3707e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|▎         | 16/500 [00:08<04:03,  1.99it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 15 train_epoch_loss: [2356285538304.0, 2370687467520.0] val_epoch_loss: 2385452466176.0\n",
      "loss: tensor(2.3731e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4085e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|▎         | 17/500 [00:09<04:06,  1.96it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 16 train_epoch_loss: [2373081890816.0, 2408538701824.0] val_epoch_loss: 2371920068608.0\n",
      "loss: tensor(2.2747e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2945e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▎         | 18/500 [00:10<04:07,  1.95it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 17 train_epoch_loss: [2274704490496.0, 2294499246080.0] val_epoch_loss: 2291436355584.0\n",
      "loss: tensor(2.3502e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4814e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▍         | 19/500 [00:10<04:07,  1.94it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 18 train_epoch_loss: [2350238400512.0, 2481351819264.0] val_epoch_loss: 2350380089344.0\n",
      "loss: tensor(2.4560e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3394e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▍         | 20/500 [00:11<03:59,  2.00it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 19 train_epoch_loss: [2456041291776.0, 2339374628864.0] val_epoch_loss: 2237580181504.0\n",
      "loss: tensor(2.4378e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3794e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▍         | 21/500 [00:11<04:02,  1.97it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 20 train_epoch_loss: [2437772738560.0, 2379367841792.0] val_epoch_loss: 2283377393664.0\n",
      "loss: tensor(2.3277e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4588e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|▍         | 22/500 [00:11<03:52,  2.06it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 21 train_epoch_loss: [2327681171456.0, 2458835746816.0] val_epoch_loss: 2293120237568.0\n",
      "loss: tensor(2.4209e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3618e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▍         | 23/500 [00:12<03:54,  2.03it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 22 train_epoch_loss: [2420885946368.0, 2361849544704.0] val_epoch_loss: 2385864163328.0\n",
      "loss: tensor(2.4680e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3179e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▍         | 24/500 [00:12<03:55,  2.02it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 23 train_epoch_loss: [2467993223168.0, 2317907132416.0] val_epoch_loss: 2360729403392.0\n",
      "loss: tensor(2.4447e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2024e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▌         | 25/500 [00:13<03:55,  2.02it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 24 train_epoch_loss: [2444683640832.0, 2202377650176.0] val_epoch_loss: 2354207522816.0\n",
      "loss: tensor(2.4755e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2820e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▌         | 26/500 [00:14<03:57,  1.99it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 25 train_epoch_loss: [2475459084288.0, 2282036658176.0] val_epoch_loss: 2363452686336.0\n",
      "loss: tensor(2.3899e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4312e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▌         | 27/500 [00:14<03:54,  2.02it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 26 train_epoch_loss: [2389867233280.0, 2431159631872.0] val_epoch_loss: 2377733832704.0\n",
      "loss: tensor(2.3751e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|▌         | 28/500 [00:14<03:48,  2.07it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.4185e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 27 train_epoch_loss: [2375109312512.0, 2418456395776.0] val_epoch_loss: 2397484351488.0\n",
      "loss: tensor(2.5406e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3234e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|▌         | 29/500 [00:15<03:50,  2.04it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 28 train_epoch_loss: [2540628344832.0, 2323378339840.0] val_epoch_loss: 2264897945600.0\n",
      "loss: tensor(2.2999e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3306e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|▌         | 30/500 [00:15<03:42,  2.11it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 29 train_epoch_loss: [2299851177984.0, 2330552172544.0] val_epoch_loss: 2396309028864.0\n",
      "loss: tensor(2.2965e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3495e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|▌         | 31/500 [00:16<03:50,  2.03it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 30 train_epoch_loss: [2296451694592.0, 2349454852096.0] val_epoch_loss: 2404247928832.0\n",
      "loss: tensor(2.2162e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.5595e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|▋         | 32/500 [00:16<03:59,  1.96it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 31 train_epoch_loss: [2216155938816.0, 2559495110656.0] val_epoch_loss: 2446528348160.0\n",
      "loss: tensor(2.3195e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2353e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|▋         | 33/500 [00:17<04:03,  1.92it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 32 train_epoch_loss: [2319536619520.0, 2235299266560.0] val_epoch_loss: 2249329541120.0\n",
      "loss: tensor(2.3454e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3249e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|▋         | 34/500 [00:18<04:03,  1.91it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 33 train_epoch_loss: [2345423863808.0, 2324906639360.0] val_epoch_loss: 2356831059968.0\n",
      "loss: tensor(2.2363e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4829e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|▋         | 35/500 [00:18<04:11,  1.85it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 34 train_epoch_loss: [2236344172544.0, 2482866487296.0] val_epoch_loss: 2374805749760.0\n",
      "loss: tensor(2.3625e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3454e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|▋         | 36/500 [00:20<06:41,  1.16it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 35 train_epoch_loss: [2362534526976.0, 2345432252416.0] val_epoch_loss: 2336005160960.0\n",
      "loss: tensor(2.2386e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4715e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|▋         | 37/500 [00:20<05:47,  1.33it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 36 train_epoch_loss: [2238593630208.0, 2471541866496.0] val_epoch_loss: 2341546754048.0\n",
      "loss: tensor(2.3303e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.5234e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|▊         | 38/500 [00:21<05:48,  1.33it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 37 train_epoch_loss: [2330320699392.0, 2523431960576.0] val_epoch_loss: 2310800801792.0\n",
      "loss: tensor(2.4534e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3391e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|▊         | 39/500 [00:22<05:32,  1.39it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 38 train_epoch_loss: [2453362180096.0, 2339136602112.0] val_epoch_loss: 2397335977984.0\n",
      "loss: tensor(2.4798e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3238e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|▊         | 40/500 [00:22<05:12,  1.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 39 train_epoch_loss: [2479793635328.0, 2323783614464.0] val_epoch_loss: 2324120338432.0\n",
      "loss: tensor(2.4413e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|▊         | 41/500 [00:23<04:34,  1.67it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.2121e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 40 train_epoch_loss: [2441346809856.0, 2212110270464.0] val_epoch_loss: 2301698113536.0\n",
      "loss: tensor(2.5543e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.1220e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|▊         | 42/500 [00:23<04:02,  1.89it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 41 train_epoch_loss: [2554308591616.0, 2121986998272.0] val_epoch_loss: 2366957682688.0\n",
      "loss: tensor(2.5185e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3192e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|▊         | 43/500 [00:23<03:40,  2.07it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 42 train_epoch_loss: [2518455943168.0, 2319249833984.0] val_epoch_loss: 2327564255232.0\n",
      "loss: tensor(2.3834e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4476e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|▉         | 44/500 [00:24<03:24,  2.23it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 43 train_epoch_loss: [2383374974976.0, 2447597633536.0] val_epoch_loss: 2349531660288.0\n",
      "loss: tensor(2.4017e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3986e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|▉         | 45/500 [00:24<03:14,  2.34it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 44 train_epoch_loss: [2401670791168.0, 2398613667840.0] val_epoch_loss: 2340087005184.0\n",
      "loss: tensor(2.3168e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3659e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|▉         | 46/500 [00:24<03:06,  2.43it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 45 train_epoch_loss: [2316753436672.0, 2365903339520.0] val_epoch_loss: 2285058785280.0\n",
      "loss: tensor(2.1808e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4673e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|▉         | 47/500 [00:25<03:13,  2.34it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 46 train_epoch_loss: [2180840030208.0, 2467307716608.0] val_epoch_loss: 2372064509952.0\n",
      "loss: tensor(2.4061e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3407e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 47 train_epoch_loss: [2406057246720.0, 2340658085888.0] val_epoch_loss: 2331250262016.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|▉         | 48/500 [00:25<03:11,  2.36it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.5244e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2133e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|▉         | 49/500 [00:26<03:06,  2.42it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 48 train_epoch_loss: [2524414738432.0, 2213341560832.0] val_epoch_loss: 2380483264512.0\n",
      "loss: tensor(2.5553e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2755e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 49 train_epoch_loss: [2555344846848.0, 2275547807744.0] val_epoch_loss: 2293069643776.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|█         | 50/500 [00:26<03:10,  2.36it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.4630e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3546e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|█         | 51/500 [00:27<03:09,  2.37it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 50 train_epoch_loss: [2462952194048.0, 2354647138304.0] val_epoch_loss: 2433238040576.0\n",
      "loss: tensor(2.4504e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3422e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|█         | 52/500 [00:27<03:11,  2.34it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 51 train_epoch_loss: [2450365874176.0, 2342202638336.0] val_epoch_loss: 2367679889408.0\n",
      "loss: tensor(2.2168e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3955e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 53/500 [00:27<03:04,  2.42it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 52 train_epoch_loss: [2216805007360.0, 2395525087232.0] val_epoch_loss: 2364018393088.0\n",
      "loss: tensor(2.2703e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4927e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 54/500 [00:28<03:00,  2.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 53 train_epoch_loss: [2270326685696.0, 2492734111744.0] val_epoch_loss: 2350640529408.0\n",
      "loss: tensor(2.3285e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4697e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 55/500 [00:28<02:59,  2.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 54 train_epoch_loss: [2328521605120.0, 2469697683456.0] val_epoch_loss: 2352520364032.0\n",
      "loss: tensor(2.2790e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4200e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 56/500 [00:29<02:55,  2.53it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 55 train_epoch_loss: [2278985826304.0, 2420034764800.0] val_epoch_loss: 2398531878912.0\n",
      "loss: tensor(2.3883e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4189e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█▏        | 57/500 [00:29<03:02,  2.43it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 56 train_epoch_loss: [2388253999104.0, 2418925371392.0] val_epoch_loss: 2288172662784.0\n",
      "loss: tensor(2.3758e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▏        | 58/500 [00:29<03:01,  2.43it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.3420e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 57 train_epoch_loss: [2375803469824.0, 2341973262336.0] val_epoch_loss: 2435645702144.0\n",
      "loss: tensor(2.2577e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4419e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▏        | 59/500 [00:30<03:03,  2.40it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 58 train_epoch_loss: [2257661984768.0, 2441855631360.0] val_epoch_loss: 2309332074496.0\n",
      "loss: tensor(2.1884e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.5691e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▏        | 60/500 [00:30<02:59,  2.45it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 59 train_epoch_loss: [2188430016512.0, 2569092726784.0] val_epoch_loss: 2329264783360.0\n",
      "loss: tensor(2.4599e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3282e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▏        | 61/500 [00:31<02:55,  2.50it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 60 train_epoch_loss: [2459893235712.0, 2328161419264.0] val_epoch_loss: 2393494519808.0\n",
      "loss: tensor(2.5056e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3714e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 12%|█▏        | 62/500 [00:31<03:00,  2.42it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 61 train_epoch_loss: [2505555574784.0, 2371365896192.0] val_epoch_loss: 2322586402816.0\n",
      "loss: tensor(2.3592e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3013e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|█▎        | 63/500 [00:31<02:56,  2.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 62 train_epoch_loss: [2359190880256.0, 2301323378688.0] val_epoch_loss: 2314699931648.0\n",
      "loss: tensor(2.3610e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3349e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|█▎        | 64/500 [00:32<02:56,  2.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 63 train_epoch_loss: [2360958255104.0, 2334915297280.0] val_epoch_loss: 2332339077120.0\n",
      "loss: tensor(2.4190e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.1730e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 64 train_epoch_loss: [2419049889792.0, 2172967583744.0] val_epoch_loss: 2285733740544.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|█▎        | 65/500 [00:32<02:55,  2.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.3921e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3452e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|█▎        | 66/500 [00:33<02:57,  2.45it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 65 train_epoch_loss: [2392064000000.0, 2345175875584.0] val_epoch_loss: 2393255968768.0\n",
      "loss: tensor(2.4933e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3195e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 66 train_epoch_loss: [2493291167744.0, 2319530590208.0] val_epoch_loss: 2320414539776.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|█▎        | 67/500 [00:33<02:56,  2.45it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.4351e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2466e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▎        | 68/500 [00:34<02:53,  2.49it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 67 train_epoch_loss: [2435108306944.0, 2246564904960.0] val_epoch_loss: 2364375433216.0\n",
      "loss: tensor(2.2874e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3090e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 68 train_epoch_loss: [2287393046528.0, 2309027790848.0] val_epoch_loss: 2360387043328.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▍        | 69/500 [00:34<02:54,  2.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.5280e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2768e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▍        | 70/500 [00:34<02:54,  2.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 69 train_epoch_loss: [2528030490624.0, 2276795875328.0] val_epoch_loss: 2256544661504.0\n",
      "loss: tensor(2.6263e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2579e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▍        | 71/500 [00:35<02:48,  2.55it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 70 train_epoch_loss: [2626268168192.0, 2257872748544.0] val_epoch_loss: 2391444291584.0\n",
      "loss: tensor(2.3614e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2767e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 71 train_epoch_loss: [2361446629376.0, 2276730077184.0] val_epoch_loss: 2329704398848.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▍        | 72/500 [00:35<02:50,  2.51it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.4078e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4074e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▍        | 73/500 [00:35<02:46,  2.56it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 72 train_epoch_loss: [2407756201984.0, 2407385530368.0] val_epoch_loss: 2332578283520.0\n",
      "loss: tensor(2.4012e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3524e+12, grad_fn=<MseLossBackward0>)\n",
      "epoch: 73 train_epoch_loss: [2401173766144.0, 2352425205760.0] val_epoch_loss: 2328684134400.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▍        | 74/500 [00:36<02:47,  2.55it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: tensor(2.3415e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2721e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▌        | 75/500 [00:36<02:44,  2.58it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 74 train_epoch_loss: [2341497995264.0, 2272084099072.0] val_epoch_loss: 2351448850432.0\n",
      "loss: tensor(2.3281e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2935e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▌        | 76/500 [00:37<02:50,  2.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 75 train_epoch_loss: [2328057085952.0, 2293485535232.0] val_epoch_loss: 2433656815616.0\n",
      "loss: tensor(2.3182e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4205e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▌        | 77/500 [00:38<03:46,  1.86it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 76 train_epoch_loss: [2318156431360.0, 2420528644096.0] val_epoch_loss: 2298067419136.0\n",
      "loss: tensor(2.3455e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4004e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▌        | 78/500 [00:39<04:45,  1.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 77 train_epoch_loss: [2345534226432.0, 2400365314048.0] val_epoch_loss: 2307890216960.0\n",
      "loss: tensor(2.3737e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4078e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▌        | 79/500 [00:40<06:44,  1.04it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 78 train_epoch_loss: [2373696094208.0, 2407836155904.0] val_epoch_loss: 2372759715840.0\n",
      "loss: tensor(2.4621e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2495e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▌        | 80/500 [00:42<07:43,  1.10s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 79 train_epoch_loss: [2462072438784.0, 2249459236864.0] val_epoch_loss: 2304394657792.0\n",
      "loss: tensor(2.3797e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2899e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▌        | 81/500 [00:42<06:39,  1.05it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 80 train_epoch_loss: [2379669045248.0, 2289923784704.0] val_epoch_loss: 2240504397824.0\n",
      "loss: tensor(2.3499e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3470e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▋        | 82/500 [00:43<05:39,  1.23it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 81 train_epoch_loss: [2349914914816.0, 2346986766336.0] val_epoch_loss: 2306508718080.0\n",
      "loss: tensor(2.4805e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2947e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|█▋        | 83/500 [00:44<07:18,  1.05s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 82 train_epoch_loss: [2480454500352.0, 2294697164800.0] val_epoch_loss: 2371573121024.0\n",
      "loss: tensor(2.3388e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4657e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|█▋        | 84/500 [00:46<09:11,  1.33s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 83 train_epoch_loss: [2338797387776.0, 2465718337536.0] val_epoch_loss: 2320588734464.0\n",
      "loss: tensor(2.3520e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4280e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|█▋        | 85/500 [00:47<08:13,  1.19s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 84 train_epoch_loss: [2352030941184.0, 2428005253120.0] val_epoch_loss: 2374122864640.0\n",
      "loss: tensor(2.3680e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.4108e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|█▋        | 86/500 [00:48<07:37,  1.11s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 85 train_epoch_loss: [2368039550976.0, 2410840588288.0] val_epoch_loss: 2331640332288.0\n",
      "loss: tensor(2.3790e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.2317e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|█▋        | 87/500 [00:49<08:06,  1.18s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 86 train_epoch_loss: [2378953392128.0, 2231655464960.0] val_epoch_loss: 2383470002176.0\n",
      "loss: tensor(2.4605e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3811e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|█▊        | 88/500 [00:50<07:46,  1.13s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 87 train_epoch_loss: [2460470476800.0, 2381067321344.0] val_epoch_loss: 2343148716032.0\n",
      "loss: tensor(2.4393e+12, grad_fn=<MseLossBackward0>)\n",
      "loss: tensor(2.3690e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|█▊        | 89/500 [00:51<07:16,  1.06s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 88 train_epoch_loss: [2439339573248.0, 2368994279424.0] val_epoch_loss: 2348754665472.0\n",
      "loss: tensor(2.3970e+12, grad_fn=<MseLossBackward0>)\n"
     ]
    }
   ],
   "source": [
    "def _test():\n",
    "    with torch.no_grad():\n",
    "        val_epoch_loss = []\n",
    "        # for i in range(0, len(x_test),batch):# batch是 1 测试用1测试就行\n",
    "        for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "            # inputs = x_test[i:i+batch]\n",
    "            # targets = y_test[i:i+batch]\n",
    "            # if len(inputs) == batch:  # 最后一个batch可能不足长度 舍弃\n",
    "            inputs = torch.tensor(inputs).to(device)\n",
    "            targets = torch.tensor(targets).to(device)\n",
    "            inputs = inputs.float()\n",
    "            targets = targets.float()\n",
    "            tgt_in = torch.rand((Batch_Size, 3, 6))\n",
    "            outputs = model(inputs, tgt_in)\n",
    "            loss = criterion(outputs.float(), targets.float())\n",
    "            val_epoch_loss.append(loss.item())\n",
    "    return np.mean(val_epoch_loss)\n",
    "\n",
    "\n",
    "epochs = 500\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
    "criterion = torch.nn.MSELoss().to(device)\n",
    "\n",
    "val_loss = []\n",
    "train_loss = []\n",
    "best_test_loss = 10000000\n",
    "for epoch in tqdm(range(epochs)):\n",
    "    train_epoch_loss = []\n",
    "    # for i in range(0, len(x_train),batch):# batch是 1\n",
    "    for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "        inputs = torch.tensor(inputs).to(device)\n",
    "        targets = torch.tensor(targets).to(device)\n",
    "        inputs = inputs.float()\n",
    "        targets = targets.float()\n",
    "        # print(\"inputs\",inputs.shape) # [batch,3，16]\n",
    "        # print(\"targets\",targets.shape) # targets torch.Size([batch])\n",
    "        tgt_in = torch.rand((Batch_Size, 3,6))  # 输入数据的维度是[batch,序列长度，每个单元的维度]\n",
    "\n",
    "        outputs = model(inputs, tgt_in)\n",
    "        # print(\"outputs.shape:\",outputs.shape) # outputs.shape [batch, 3, 1]\n",
    "        loss = criterion(outputs.float(), targets.float())\n",
    "        print(\"loss:\", loss)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        train_epoch_loss.append(loss.item())\n",
    "    train_loss.append(np.mean(train_epoch_loss))\n",
    "    val_epoch_loss = _test()\n",
    "    val_loss.append(val_epoch_loss)\n",
    "    print(\"epoch:\", epoch, \"train_epoch_loss:\", train_epoch_loss, \"val_epoch_loss:\", val_epoch_loss)\n",
    "    # 保存下来最好的模型：\n",
    "    if val_epoch_loss < best_test_loss:\n",
    "        best_test_loss = val_epoch_loss\n",
    "        best_model = model\n",
    "        print(\"best_test_loss -------------------------------------------------\", best_test_loss)\n",
    "        torch.save(best_model.state_dict(), 'best_Transformer_trainModel.pth')\n",
    "\n",
    "# 画一下loss图\n",
    "fig = plt.figure(facecolor='white', figsize=(10, 7))\n",
    "plt.xlabel('X')\n",
    "plt.ylabel('Y')\n",
    "plt.xlim(xmax=len(val_loss), xmin=0)\n",
    "plt.ylim(ymax=max(max(train_loss), max(val_loss)), ymin=0)\n",
    "# 画两条（0-9）的坐标轴并设置轴标签x，y\n",
    "x1 = [i for i in range(0, len(train_loss), 1)]  # 随机产生300个平均值为2，方差为1.2的浮点数，即第一簇点的x轴坐标\n",
    "y1 = val_loss  # 随机产生300个平均值为2，方差为1.2的浮点数，即第一簇点的y轴坐标\n",
    "x2 = [i for i in range(0, len(train_loss), 1)]\n",
    "y2 = train_loss\n",
    "colors1 = '#00CED4'  # 点的颜色\n",
    "colors2 = '#DC143C'\n",
    "area = np.pi * 4 ** 1  # 点面积\n",
    "# 画散点图\n",
    "plt.scatter(x1, y1, s=area, c=colors1, alpha=0.4, label='val_loss')\n",
    "plt.scatter(x2, y2, s=area, c=colors2, alpha=0.4, label='train_loss')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "\n",
    "# 加载模型预测------\n",
    "model = Transformer(n_encoder_inputs=6, n_decoder_inputs=6, Sequence_length=3).to(device)\n",
    "model.load_state_dict(torch.load('best_Transformer_trainModel.pth'))\n",
    "model.to(device)\n",
    "model.eval()\n",
    "# 在对模型进行评估时，应该配合使用with torch.no_grad() 与 model.eval()：\n",
    "y_pred = []\n",
    "y_true = []\n",
    "with torch.no_grad():\n",
    "    with torch.no_grad():\n",
    "        val_epoch_loss = []\n",
    "        for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "            inputs = torch.tensor(inputs).to(device)\n",
    "            targets = torch.tensor(targets).to(device)\n",
    "            inputs = inputs.float()\n",
    "            targets = targets.float()\n",
    "            tgt_in = torch.rand((Batch_Size, 3, 6))\n",
    "            outputs = model(inputs, tgt_in)\n",
    "            outputs = list(outputs.cpu().numpy().reshape([1, -1])[0])  # 转化为1行列数不指定\n",
    "            targets = list(targets.cpu().numpy().reshape([1, -1])[0])\n",
    "            y_pred.extend(outputs)\n",
    "            y_true.extend(targets)\n",
    "\n",
    "\n",
    "y_true = np.array(y_true)\n",
    "y_pred = np.array(y_pred)\n",
    "print(y_true.shape)\n",
    "print(y_pred.shape)\n",
    "# 画折线图显示----\n",
    "dataframe = pd.DataFrame({'pred': y_pred,\n",
    "                                            'true': y_true\n",
    "                          })\n",
    "dataframe.to_csv(\"bijiao2.csv\", index=False, sep=',')\n",
    "\n",
    "print(\"y_pred\", y_pred)\n",
    "print(\"y_true\", y_true)\n",
    "len_ = [i for i in range(len(y_pred[0:1000]))]\n",
    "plt.xlabel('标签', fontsize=8)\n",
    "plt.ylabel('值', fontsize=8)\n",
    "plt.plot(len_, y_true[0:1000], label='y_true', color=\"blue\")\n",
    "plt.plot(len_, y_pred[0:1000], label='y_pred', color=\"yellow\")\n",
    "plt.title(\"真实值预测值画图\")\n",
    "plt.show()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "is_executing": true
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
