{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "5399 5399\n"
     ]
    }
   ],
   "source": [
    "# pip install openpyxl -i https://pypi.tuna.tsinghua.edu.cn/simple/\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import torch\n",
    "from torch import nn\n",
    "import torch.utils.data as data\n",
    "import torch.nn.functional as F\n",
    "from torch import tensor\n",
    "import torch.utils.data as Data\n",
    "import math\n",
    "from matplotlib import pyplot\n",
    "from datetime import datetime, timedelta\n",
    "from sklearn.model_selection import train_test_split\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import math\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号\n",
    "\n",
    "# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "device = torch.device(\"cpu\")\n",
    "# print(torch.cuda.is_available())\n",
    "data = pd.read_csv(\"Test_Data.csv\")  # 1 3 7 是 预测列\n",
    "data.dropna(axis=0, how='any')\n",
    "# data = data.fillna(0)\n",
    "# print(data.head())\n",
    "# print(data.columns)\n",
    "data_x = data[\n",
    "    ['2#泵排出压力(bar)', '吸入真空(bar)', '管路平均浓度(%)', '横移速度(m/min)','绞刀电机电流(A)', ]].values\n",
    "data_y = data[[ '管路流速(m/s)']].values\n",
    "\n",
    "# print(len(data_y))\n",
    "# 四个数据划分为一组 用前三个预测后一个\n",
    "data_4_x = []\n",
    "data_4_y = []\n",
    "for i in range(0, len(data_y) - 4, 4):\n",
    "    data_4_x.append(data_x[i:i + 3])\n",
    "    data_4_y.append(data_y[i + 4])\n",
    "print(len(data_4_x), len(data_4_y))\n",
    "x_train, x_test, y_train, y_test = train_test_split(np.array(data_4_x), np.array(data_4_y), test_size=0.2)\n",
    "\n",
    "class DataSet(Data.Dataset):\n",
    "    def __init__(self, data_inputs, data_targets):\n",
    "        self.inputs = torch.FloatTensor(data_inputs)\n",
    "        self.label = torch.FloatTensor(data_targets)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.inputs[index], self.label[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.inputs)\n",
    "\n",
    "\n",
    "Batch_Size = 8  #\n",
    "DataSet = DataSet(np.array(x_train), list(y_train))\n",
    "train_size = int(len(x_train) * 0.8)\n",
    "test_size = len(y_train) - train_size\n",
    "train_dataset, test_dataset = torch.utils.data.random_split(DataSet, [train_size, test_size])\n",
    "TrainDataLoader = Data.DataLoader(train_dataset, batch_size=Batch_Size, shuffle=True, drop_last=True)\n",
    "TestDataLoader = Data.DataLoader(test_dataset, batch_size=Batch_Size, shuffle=True, drop_last=True)\n",
    "\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_len=5000):\n",
    "        super(PositionalEncoding, self).__init__()\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n",
    "        pe[:, 0::2] = torch.sin(position * div_term)\n",
    "        pe[:, 1::2] = torch.cos(position * div_term)\n",
    "        pe = pe.unsqueeze(0).transpose(0, 1)\n",
    "        # pe.requires_grad = False\n",
    "        self.register_buffer('pe', pe)\n",
    "\n",
    "    def forward(self, x: torch.Tensor):\n",
    "        chunk = x.chunk(x.size(-1), dim=2)\n",
    "        out = torch.Tensor([]).to(x.device)\n",
    "        for i in range(len(chunk)):\n",
    "            out = torch.cat((out, chunk[i] + self.pe[:chunk[i].size(0), ...]), dim=2)\n",
    "        return out\n",
    "\n",
    "\n",
    "def transformer_generate_tgt_mask(length, device):\n",
    "    mask = torch.tril(torch.ones(length, length, device=device)) == 1\n",
    "    mask = (\n",
    "        mask.float()\n",
    "        .masked_fill(mask == 0, float(\"-inf\"))\n",
    "        .masked_fill(mask == 1, float(0.0))\n",
    "    )\n",
    "    return mask\n",
    "\n",
    "\n",
    "class Transformer(nn.Module):\n",
    "    \"\"\"标准的Transformer编码器-解码器结构\"\"\"\n",
    "\n",
    "    def __init__(self, n_encoder_inputs, n_decoder_inputs, Sequence_length, d_model=512, dropout=0.1, num_layer=8):\n",
    "        \"\"\"\n",
    "        初始化\n",
    "        :param n_encoder_inputs:    输入数据的特征维度\n",
    "        :param n_decoder_inputs:    编码器输入的特征维度，其实等于编码器输出的特征维度\n",
    "        :param d_model:             词嵌入特征维度\n",
    "        :param dropout:             dropout\n",
    "        :param num_layer:           Transformer块的个数\n",
    "         Sequence_length:           transformer 输入数据 序列的长度\n",
    "        \"\"\"\n",
    "        super(Transformer, self).__init__()\n",
    "\n",
    "        self.input_pos_embedding = torch.nn.Embedding(5000, embedding_dim=d_model)\n",
    "        self.target_pos_embedding = torch.nn.Embedding(5000, embedding_dim=d_model)\n",
    "\n",
    "        encoder_layer = torch.nn.TransformerEncoderLayer(d_model=d_model, nhead=8, dropout=dropout,\n",
    "                                                         dim_feedforward=4 * d_model)\n",
    "        decoder_layer = torch.nn.TransformerDecoderLayer(d_model=d_model, nhead=8, dropout=dropout,\n",
    "                                                         dim_feedforward=4 * d_model)\n",
    "\n",
    "        self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=2)\n",
    "        self.decoder = torch.nn.TransformerDecoder(decoder_layer, num_layers=4)\n",
    "\n",
    "        self.input_projection = torch.nn.Linear(n_encoder_inputs, d_model)\n",
    "        self.output_projection = torch.nn.Linear(n_decoder_inputs, d_model)\n",
    "\n",
    "        self.linear = torch.nn.Linear(d_model, 1)\n",
    "        self.ziji_add_linear = torch.nn.Linear(Sequence_length, 1)\n",
    "\n",
    "    def encode_in(self, src):\n",
    "        src_start = self.input_projection(src).permute(1, 0, 2)\n",
    "        in_sequence_len, batch_size = src_start.size(0), src_start.size(1)\n",
    "        pos_encoder = (torch.arange(0, in_sequence_len, device=src.device).unsqueeze(0).repeat(batch_size, 1))\n",
    "        pos_encoder = self.input_pos_embedding(pos_encoder).permute(1, 0, 2)\n",
    "        src = src_start + pos_encoder\n",
    "        src = self.encoder(src) + src_start\n",
    "        return src\n",
    "\n",
    "    def decode_out(self, tgt, memory):\n",
    "        tgt_start = self.output_projection(tgt).permute(1, 0, 2)\n",
    "        out_sequence_len, batch_size = tgt_start.size(0), tgt_start.size(1)\n",
    "        pos_decoder = (torch.arange(0, out_sequence_len, device=tgt.device).unsqueeze(0).repeat(batch_size, 1))\n",
    "        pos_decoder = self.target_pos_embedding(pos_decoder).permute(1, 0, 2)\n",
    "        tgt = tgt_start + pos_decoder\n",
    "        tgt_mask = transformer_generate_tgt_mask(out_sequence_len, tgt.device)\n",
    "        out = self.decoder(tgt=tgt, memory=memory, tgt_mask=tgt_mask) + tgt_start\n",
    "        out = out.permute(1, 0, 2)  # [batch_size, seq_len, d_model]\n",
    "        out = self.linear(out)\n",
    "        return out\n",
    "\n",
    "    def forward(self, src, target_in):\n",
    "        src = self.encode_in(src)\n",
    "        out = self.decode_out(tgt=target_in, memory=src)\n",
    "        # print(\"out.shape:\",out.shape)# torch.Size([batch, 3, 1]) # 原本代码中的输出\n",
    "        # 上边的这个输入可以用于很多任务的输出 可以根据任务进行自由的变换\n",
    "        # 下面是自己修改的\n",
    "        # 使用全连接变成 [batch,1] 构成了基于transformer的回归单值预测\n",
    "        out = out.squeeze(2)\n",
    "        out = self.ziji_add_linear(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "model = Transformer(n_encoder_inputs=5, n_decoder_inputs=5, Sequence_length=3).to(device)  # 3 表示Sequence_length  transformer 输入数据 序列的长度\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "start_time": "2023-08-16T19:43:15.484461Z",
     "end_time": "2023-08-16T19:43:17.631058Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [
    {
     "data": {
      "text/plain": "Transformer(\n  (input_pos_embedding): Embedding(5000, 512)\n  (target_pos_embedding): Embedding(5000, 512)\n  (encoder): TransformerEncoder(\n    (layers): ModuleList(\n      (0-1): 2 x TransformerEncoderLayer(\n        (self_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n        (dropout): Dropout(p=0.1, inplace=False)\n        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (dropout1): Dropout(p=0.1, inplace=False)\n        (dropout2): Dropout(p=0.1, inplace=False)\n      )\n    )\n  )\n  (decoder): TransformerDecoder(\n    (layers): ModuleList(\n      (0-3): 4 x TransformerDecoderLayer(\n        (self_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (multihead_attn): MultiheadAttention(\n          (out_proj): NonDynamicallyQuantizableLinear(in_features=512, out_features=512, bias=True)\n        )\n        (linear1): Linear(in_features=512, out_features=2048, bias=True)\n        (dropout): Dropout(p=0.1, inplace=False)\n        (linear2): Linear(in_features=2048, out_features=512, bias=True)\n        (norm1): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm2): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (norm3): LayerNorm((512,), eps=1e-05, elementwise_affine=True)\n        (dropout1): Dropout(p=0.1, inplace=False)\n        (dropout2): Dropout(p=0.1, inplace=False)\n        (dropout3): Dropout(p=0.1, inplace=False)\n      )\n    )\n  )\n  (input_projection): Linear(in_features=5, out_features=512, bias=True)\n  (output_projection): Linear(in_features=5, out_features=512, bias=True)\n  (linear): Linear(in_features=512, out_features=1, bias=True)\n  (ziji_add_linear): Linear(in_features=3, out_features=1, bias=True)\n)"
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "start_time": "2023-08-16T19:43:17.634465Z",
     "end_time": "2023-08-16T19:43:17.646582Z"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def _test():\n",
    "    with torch.no_grad():\n",
    "        val_epoch_loss = []\n",
    "        # for i in range(0, len(x_test),batch):# batch是 1 测试用1测试就行\n",
    "        for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "            # inputs = x_test[i:i+batch]\n",
    "            # targets = y_test[i:i+batch]\n",
    "            # if len(inputs) == batch:  # 最后一个batch可能不足长度 舍弃\n",
    "            inputs = torch.tensor(inputs).to(device)\n",
    "            targets = torch.tensor(targets).to(device)\n",
    "            inputs = inputs.float()\n",
    "            targets = targets.float()\n",
    "            tgt_in = torch.rand((Batch_Size, 3, 5))\n",
    "            outputs = model(inputs, tgt_in)\n",
    "            loss = criterion(outputs.float(), targets.float())\n",
    "            val_epoch_loss.append(loss.item())\n",
    "    return np.mean(val_epoch_loss)\n",
    "\n",
    "\n",
    "epochs = 2\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
    "criterion = torch.nn.MSELoss().to(device)\n",
    "\n",
    "val_loss = []\n",
    "train_loss = []\n",
    "best_test_loss = 10000000\n",
    "for epoch in tqdm(range(epochs)):\n",
    "    train_epoch_loss = []\n",
    "    # for i in range(0, len(x_train),batch):# batch是 1\n",
    "    for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "        inputs = torch.tensor(inputs).to(device)\n",
    "        targets = torch.tensor(targets).to(device)\n",
    "        inputs = inputs.float()\n",
    "        targets = targets.float()\n",
    "        # print(\"inputs\",inputs.shape) # [batch,3，16]\n",
    "        # print(\"targets\",targets.shape) # targets torch.Size([batch])\n",
    "        tgt_in = torch.rand((Batch_Size, 3,5))  # 输入数据的维度是[batch,序列长度，每个单元的维度]\n",
    "\n",
    "        outputs = model(inputs, tgt_in)\n",
    "        # print(\"outputs.shape:\",outputs.shape) # outputs.shape [batch, 3, 1]\n",
    "        loss = criterion(outputs.float(), targets.float())\n",
    "        print(\"loss:\", loss)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        train_epoch_loss.append(loss.item())\n",
    "    train_loss.append(np.mean(train_epoch_loss))\n",
    "    val_epoch_loss = _test()\n",
    "    val_loss.append(val_epoch_loss)\n",
    "    print(\"epoch:\", epoch, \"train_epoch_loss:\", train_epoch_loss, \"val_epoch_loss:\", val_epoch_loss)\n",
    "    # 保存下来最好的模型：\n",
    "    if val_epoch_loss < best_test_loss:\n",
    "        best_test_loss = val_epoch_loss\n",
    "        best_model = model\n",
    "        print(\"best_test_loss -------------------------------------------------\", best_test_loss)\n",
    "        torch.save(best_model.state_dict(), 'best_Transformer_trainModel.pth')\n",
    "\n",
    "# 画一下loss图\n",
    "fig = plt.figure(facecolor='white', figsize=(10, 7))\n",
    "plt.xlabel('X')\n",
    "plt.ylabel('Y')\n",
    "plt.xlim(xmax=len(val_loss), xmin=0)\n",
    "plt.ylim(ymax=max(max(train_loss), max(val_loss)), ymin=0)\n",
    "# 画两条（0-9）的坐标轴并设置轴标签x，y\n",
    "x1 = [i for i in range(0, len(train_loss), 1)]  # 随机产生300个平均值为2，方差为1.2的浮点数，即第一簇点的x轴坐标\n",
    "y1 = val_loss  # 随机产生300个平均值为2，方差为1.2的浮点数，即第一簇点的y轴坐标\n",
    "x2 = [i for i in range(0, len(train_loss), 1)]\n",
    "y2 = train_loss\n",
    "colors1 = '#00CED4'  # 点的颜色\n",
    "colors2 = '#DC143C'\n",
    "area = np.pi * 4 ** 1  # 点面积\n",
    "# 画散点图\n",
    "plt.scatter(x1, y1, s=area, c=colors1, alpha=0.4, label='val_loss')\n",
    "plt.scatter(x2, y2, s=area, c=colors2, alpha=0.4, label='train_loss')\n",
    "plt.legend()\n",
    "plt.show()\n",
    "\n",
    "# 加载模型预测------\n",
    "model = Transformer(n_encoder_inputs=5, n_decoder_inputs=5, Sequence_length=3).to(device)\n",
    "model.load_state_dict(torch.load('best_Transformer_trainModel.pth'))\n",
    "model.to(device)\n",
    "model.eval()\n",
    "# 在对模型进行评估时，应该配合使用with torch.no_grad() 与 model.eval()：\n",
    "y_pred = []\n",
    "y_true = []\n",
    "with torch.no_grad():\n",
    "    with torch.no_grad():\n",
    "        val_epoch_loss = []\n",
    "        for index, (inputs, targets) in enumerate(TrainDataLoader):\n",
    "            inputs = torch.tensor(inputs).to(device)\n",
    "            targets = torch.tensor(targets).to(device)\n",
    "            inputs = inputs.float()\n",
    "            targets = targets.float()\n",
    "            tgt_in = torch.rand((Batch_Size, 3, 5))\n",
    "            outputs = model(inputs, tgt_in)\n",
    "            outputs = list(outputs.cpu().numpy().reshape([1, -1])[0])  # 转化为1行列数不指定\n",
    "            targets = list(targets.cpu().numpy().reshape([1, -1])[0])\n",
    "            y_pred.extend(outputs)\n",
    "            y_true.extend(targets)\n",
    "\n",
    "\n",
    "y_true = np.array(y_true)\n",
    "y_pred = np.array(y_pred)\n",
    "print(y_true.shape)\n",
    "print(y_pred.shape)\n",
    "# 画折线图显示----\n",
    "dataframe = pd.DataFrame({'pred': y_pred,\n",
    "                                            'true': y_true\n",
    "                          })\n",
    "dataframe.to_csv(\"bijiao2.csv\", index=False, sep=',')\n",
    "\n",
    "print(\"y_pred\", y_pred)\n",
    "print(\"y_true\", y_true)\n",
    "len_ = [i for i in range(len(y_pred[0:1000]))]\n",
    "plt.xlabel('标签', fontsize=8)\n",
    "plt.ylabel('值', fontsize=8)\n",
    "plt.plot(len_, y_true[0:1000], label='y_true', color=\"blue\")\n",
    "plt.plot(len_, y_pred[0:1000], label='y_pred', color=\"yellow\")\n",
    "plt.title(\"真实值预测值画图\")\n",
    "plt.show()"
   ],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
