{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "本文将实现用paddle,pytorch,tensorflow2三种框架实现lstm的单层、双层、双向双层三种形式，并将整个过程生成的模型转换成onnx"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 1、Paddle 生成LSTM\n",
    "\n",
    "整个过程包括模型定义、导出、转onnx、优化onnx.最后的一个onnx是我们最后需要的onnx,可以查看图。这部分实际包括了paddle生成模型及转Onnx的过程，关于更多的整个流程，请参考博客"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:36: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead.\n",
      "  'nearest': Image.NEAREST,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:37: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead.\n",
      "  'bilinear': Image.BILINEAR,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:38: DeprecationWarning: BICUBIC is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BICUBIC instead.\n",
      "  'bicubic': Image.BICUBIC,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:39: DeprecationWarning: BOX is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BOX instead.\n",
      "  'box': Image.BOX,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:40: DeprecationWarning: LANCZOS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead.\n",
      "  'lanczos': Image.LANCZOS,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/vision/transforms/functional_pil.py:41: DeprecationWarning: HAMMING is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.HAMMING instead.\n",
      "  'hamming': Image.HAMMING\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/onnx/mapping.py:27: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. \n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  int(TensorProto.STRING): np.dtype(np.object)\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "import paddle\n",
    "from paddle import nn\n",
    "import numpy as np\n",
    "from onnxsim import simplify\n",
    "import onnxoptimizer\n",
    "import onnx\n",
    "import onnxruntime"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.1 time_major=False \n",
    "与pytorch的batch_first=True是相同的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-08-03 09:39:41 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:39:41 [INFO]\tONNX model saved in paddle/One_LSTM_batch.onnx\n",
      "2022-08-03 09:39:41 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:39:41 [INFO]\tONNX model saved in paddle/Two_LSTM_batch.onnx\n",
      "2022-08-03 09:39:41 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:39:41 [INFO]\tONNX model saved in paddle/Bi_Two_LSTM_batch.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/onnx/numpy_helper.py:93: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. \n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  if arr.dtype == np.object:\n"
     ]
    }
   ],
   "source": [
    "class One_LSTM_batch(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=False)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((1,1,4))\n",
    "        c0 = paddle.zeros((1,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/One_LSTM_batch\"\n",
    "model = One_LSTM_batch()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n",
    "\n",
    "\n",
    "\n",
    "class Two_LSTM_batch(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=False,num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((2,1,4))\n",
    "        c0 = paddle.zeros((2,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Two_LSTM_batch\"\n",
    "model = Two_LSTM_batch()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n",
    "\n",
    "\n",
    "\n",
    "class Bi_Two_LSTM_batch(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=False,direction=\"bidirect\",num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((4,1,4))\n",
    "        c0 = paddle.zeros((4,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Bi_Two_LSTM_batch\"\n",
    "model = Bi_Two_LSTM_batch()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.2 time_major=True\n",
    "与pytorch的batch_first=False相同"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-08-03 09:40:26 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:40:26 [INFO]\tONNX model saved in paddle/One_LSTM_time.onnx\n",
      "2022-08-03 09:40:26 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:40:26 [INFO]\tONNX model saved in paddle/Two_LSTM_time.onnx\n",
      "2022-08-03 09:40:26 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 09:40:26 [INFO]\tONNX model saved in paddle/Bi_Two_LSTM_time.onnx\n"
     ]
    }
   ],
   "source": [
    "class One_LSTM_time(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((1,1,4))\n",
    "        c0 = paddle.zeros((1,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/One_LSTM_time\"\n",
    "model = One_LSTM_time()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n",
    "\n",
    "\n",
    "class Two_LSTM_time(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=True,num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((2,1,4))\n",
    "        c0 = paddle.zeros((2,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Two_LSTM_time\"\n",
    "model = Two_LSTM_time()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class Bi_Two_LSTM_time(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=True,direction=\"bidirect\",num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((4,1,4))\n",
    "        c0 = paddle.zeros((4,1,4))\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3,(h0,c0))\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Bi_Two_LSTM_time\"\n",
    "model = Bi_Two_LSTM_time()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.3 sequence_lens\n",
    "对于paddle lstm还有一个参数是 sequence_lens,这个是与pytorch不一样的。sequence_length用于指定time steps不小于sequence_length时， 就给截断了，多余的当做填充元素，只以单层LSTM，time_major=True来做个小试验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "W0803 15:08:57.771442 24847 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.4, Runtime API Version: 11.2\n",
      "W0803 15:08:57.774729 24847 device_context.cc:465] device: 0, cuDNN Version: 8.1.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-08-03 15:09:00 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 15:09:00 [INFO]\tONNX model saved in paddle/Seq_One_LSTM_time.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle2onnx/constant/dtypes.py:47: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  np.bool: core.VarDesc.VarType.BOOL,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle2onnx/constant/dtypes.py:48: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  core.VarDesc.VarType.FP32: np.float,\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle2onnx/constant/dtypes.py:53: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  core.VarDesc.VarType.BOOL: np.bool\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:77: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n",
      "  return (isinstance(seq, collections.Sequence) and\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/onnx/helper.py:343: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n",
      "  is_iterable = isinstance(value, collections.Iterable)\n",
      "/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages/onnx/numpy_helper.py:93: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. \n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  if arr.dtype == np.object:\n"
     ]
    }
   ],
   "source": [
    "class Seq_One_LSTM_time(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        h0 = paddle.zeros((1,1,4))\n",
    "        c0 = paddle.zeros((1,1,4))\n",
    "        sequence_lens = paddle.to_tensor([6]) # same shape to b\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(2,0,1))\n",
    "        out,_ = self.rnn(inputs=x3,initial_states=(h0,c0),sequence_length=sequence_lens)\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Seq_One_LSTM_time\"\n",
    "model = Seq_One_LSTM_time()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.4 无初始状态\n",
    "这一点是指我们在调用lstm的时候不会手动传入初始状态h0和c0,但内部会自动赋值初始状态为全0，pytorch也是这个原理，但是Onnx的结构图是不一样的，pytorch在不传入初始状态时的结构和paddle手动传入的结果是一样的，这个后边再说，综合对比所有的结构就可以看出差异"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2022-08-03 15:06:50 [INFO]\tONNX model generated is valid.\n",
      "2022-08-03 15:06:50 [INFO]\tONNX model saved in paddle/Ini_One_LSTM_time.onnx\n"
     ]
    }
   ],
   "source": [
    "class Ini_One_LSTM_time(nn.Layer):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,time_major=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = paddle.reshape(x,[b,c,h*w])\n",
    "        x2 = paddle.squeeze(x,2)\n",
    "        x3 = paddle.transpose(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "\n",
    "model_path = \"paddle/Ini_One_LSTM_time\"\n",
    "model = Ini_One_LSTM_time()\n",
    "model.eval()\n",
    "infer_shape = [1,3,1,6]\n",
    "input_spec=paddle.static.InputSpec(shape=infer_shape, dtype=\"float32\")\n",
    "paddle.onnx.export(model,model_path,input_spec=[input_spec],opset_version=11,enable_onnx_checker=True)\n",
    "\n",
    "model = onnx.load(model_path+'.onnx')\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = model_path+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "model = onnx.load(save_path)\n",
    "if model.ir_version<4:\n",
    "    print(\"Model with ir_version below 4 requires to in clude initializer in graph input\")\n",
    "    exit()\n",
    "inputs = model.graph.input\n",
    "name_to_input = {}\n",
    "for input in inputs:\n",
    "    name_to_input[input.name]=input\n",
    "for initializer in model.graph.initializer:\n",
    "    if initializer.name in name_to_input:\n",
    "        inputs.remove(name_to_input[initializer.name])\n",
    "passes=[\"extract_constant_to_initializer\",\"eliminate_unused_initializer\"]\n",
    "optimized_model = onnxoptimizer.optimize(model,passes)\n",
    "save_path = model_path+\"_sim_opt.onnx\"\n",
    "onnx.save(optimized_model,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.5 查看生成的onnx模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_batch.onnx', 'Bi_Two_LSTM_batch_sim.onnx', 'Bi_Two_LSTM_batch_sim_opt.onnx', 'Bi_Two_LSTM_time.onnx', 'Bi_Two_LSTM_time_sim.onnx', 'Bi_Two_LSTM_time_sim_opt.onnx', 'Ini_One_LSTM_time.onnx', 'Ini_One_LSTM_time_sim.onnx', 'Ini_One_LSTM_time_sim_opt.onnx', 'One_LSTM_batch.onnx', 'One_LSTM_batch_sim.onnx', 'One_LSTM_batch_sim_opt.onnx', 'One_LSTM_time.onnx', 'One_LSTM_time_sim.onnx', 'One_LSTM_time_sim_opt.onnx', 'Seq_One_LSTM_time.onnx', 'Seq_One_LSTM_time_sim.onnx', 'Seq_One_LSTM_time_sim_opt.onnx', 'Two_LSTM_batch.onnx', 'Two_LSTM_batch_sim.onnx', 'Two_LSTM_batch_sim_opt.onnx', 'Two_LSTM_time.onnx', 'Two_LSTM_time_sim.onnx', 'Two_LSTM_time_sim_opt.onnx']\n"
     ]
    }
   ],
   "source": [
    "paddle_onnx = sorted(os.listdir('paddle'))\n",
    "paddle_onnx_paths = sorted([os.path.join('paddle',path) for path in paddle_onnx])\n",
    "print(paddle_onnx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "16K\tpaddle/Bi_Two_LSTM_batch.onnx\n",
      "8.0K\tpaddle/Bi_Two_LSTM_batch_sim.onnx\n",
      "8.0K\tpaddle/Bi_Two_LSTM_batch_sim_opt.onnx\n",
      "16K\tpaddle/Bi_Two_LSTM_time.onnx\n",
      "8.0K\tpaddle/Bi_Two_LSTM_time_sim.onnx\n",
      "8.0K\tpaddle/Bi_Two_LSTM_time_sim_opt.onnx\n",
      "8.0K\tpaddle/Ini_One_LSTM_time.onnx\n",
      "4.0K\tpaddle/Ini_One_LSTM_time_sim.onnx\n",
      "4.0K\tpaddle/Ini_One_LSTM_time_sim_opt.onnx\n",
      "8.0K\tpaddle/One_LSTM_batch.onnx\n",
      "4.0K\tpaddle/One_LSTM_batch_sim.onnx\n",
      "4.0K\tpaddle/One_LSTM_batch_sim_opt.onnx\n",
      "8.0K\tpaddle/One_LSTM_time.onnx\n",
      "4.0K\tpaddle/One_LSTM_time_sim.onnx\n",
      "4.0K\tpaddle/One_LSTM_time_sim_opt.onnx\n",
      "8.0K\tpaddle/Seq_One_LSTM_time.onnx\n",
      "4.0K\tpaddle/Seq_One_LSTM_time_sim.onnx\n",
      "4.0K\tpaddle/Seq_One_LSTM_time_sim_opt.onnx\n",
      "12K\tpaddle/Two_LSTM_batch.onnx\n",
      "4.0K\tpaddle/Two_LSTM_batch_sim.onnx\n",
      "4.0K\tpaddle/Two_LSTM_batch_sim_opt.onnx\n",
      "12K\tpaddle/Two_LSTM_time.onnx\n",
      "4.0K\tpaddle/Two_LSTM_time_sim.onnx\n",
      "4.0K\tpaddle/Two_LSTM_time_sim_opt.onnx\n"
     ]
    }
   ],
   "source": [
    "# 查看每个模型的大小\n",
    "! du -sh paddle/*"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "加载onnx模型并推理，对比推理结果，两两一对"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def onnx_infer(model_path,data):\n",
    "    \"\"\"_summary_\n",
    "\n",
    "    Args:\n",
    "        model_path (_type_): _description_\n",
    "        data (_type_): _description_\n",
    "    \"\"\"\n",
    "    onnx_session=onnxruntime.InferenceSession(model_path)\n",
    "    input_name = onnx_session.get_inputs()[0].name\n",
    "    output_name = onnx_session.get_outputs()[0].name\n",
    "    result = onnx_session.run([output_name],{input_name:data})\n",
    "    return result[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-03 17:03:35.322915835 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322938940 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322945528 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322951708 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322957288 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322963180 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322968668 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322974083 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_14 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322979304 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_17 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322984535 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_26 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322990949 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_27 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.322996580 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392730468 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392757571 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392764542 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392770328 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392775836 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392781517 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392786892 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392792184 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_14 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392797446 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_17 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392802640 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_26 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392808940 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_27 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.392814520 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482187472 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_0 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482211960 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_0 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482218895 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482224351 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_1 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482229628 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_6 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482235089 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_7 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482240251 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_10 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482245348 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482250361 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_45 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482255449 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_46 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482267806 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_47 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482273280 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_48 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482278188 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_49 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.482283068 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_50 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544810887 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544836135 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544842815 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544848451 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544853781 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.544859396 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_batch.onnx', 'Bi_Two_LSTM_batch_sim.onnx', 'Bi_Two_LSTM_batch_sim_opt.onnx'] have same results\n",
      "['Bi_Two_LSTM_time.onnx', 'Bi_Two_LSTM_time_sim.onnx', 'Bi_Two_LSTM_time_sim_opt.onnx'] have same results\n",
      "['Ini_One_LSTM_time.onnx', 'Ini_One_LSTM_time_sim.onnx', 'Ini_One_LSTM_time_sim_opt.onnx'] have same results\n",
      "['One_LSTM_batch.onnx', 'One_LSTM_batch_sim.onnx', 'One_LSTM_batch_sim_opt.onnx'] have same results\n",
      "['One_LSTM_time.onnx', 'One_LSTM_time_sim.onnx', 'One_LSTM_time_sim_opt.onnx'] have same results\n",
      "['Seq_One_LSTM_time.onnx', 'Seq_One_LSTM_time_sim.onnx', 'Seq_One_LSTM_time_sim_opt.onnx'] have same results\n",
      "['Two_LSTM_batch.onnx', 'Two_LSTM_batch_sim.onnx', 'Two_LSTM_batch_sim_opt.onnx'] have same results\n",
      "['Two_LSTM_time.onnx', 'Two_LSTM_time_sim.onnx', 'Two_LSTM_time_sim_opt.onnx'] have same results\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-03 17:03:35.627142152 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.627166154 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.627172672 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.627178399 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.627184004 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.627189596 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.680342507 [W:onnxruntime:, graph.cc:3559 CleanUnusedInitializersAndNodeArgs] Removing initializer 'assign_0.tmp_0'. It is not used by any node and should be removed from the model.\n",
      "2022-08-03 17:03:35.684640360 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.684662133 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.684672193 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.684680922 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.684688933 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.684697379 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710238396 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710278276 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710292209 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710303902 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710315317 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710326920 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710337914 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710348837 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_14 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710359584 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_17 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710370872 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_26 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710383823 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_27 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.710395467 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810340323 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_4 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810366323 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_5 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810374143 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_8 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810380074 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_12 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810385473 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810391336 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_44 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810396558 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_13 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810401858 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_14 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810407006 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Concat_17 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810412186 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_26 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810417317 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Slice_27 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
      "2022-08-03 17:03:35.810424180 [W:onnxruntime:, graph.cc:1271 Graph] Initializer Constant_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
     ]
    }
   ],
   "source": [
    "test_data = np.random.random((1,3,1,6)).astype(np.float32) # batch,channel,height,width\n",
    "results={}\n",
    "\n",
    "for i,onnx_path in enumerate(paddle_onnx_paths):\n",
    "\n",
    "    result = onnx_infer(onnx_path,test_data)\n",
    "    results[os.path.basename(onnx_path)]=result\n",
    "\n",
    "    if i%3 ==2:\n",
    "        try:\n",
    "            values = list(results.values())\n",
    "            np.testing.assert_allclose(values[0],values[1],rtol=1e-5)\n",
    "            np.testing.assert_allclose(values[2],values[1],rtol=1e-5)\n",
    "            print(f\"{list(results.keys())} have same results\")\n",
    "        except:\n",
    "            print(f\"{list(results.keys())} have different results\")\n",
    "        finally:\n",
    "            results={}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "以上onnx模型的推理可以看到在1e-5（十万分之一，6位有效数字）的容差下，结果一完全一样的。关于那么多warning,是sim后缀的模型产生的，原始模型和opt结尾的模型没有这个问题"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "以下部分是生成的以opt结尾的onnx模型的结构图："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2 pytorch 生成LSTM"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "由于pytorch在导出onnx时，参数keep_initializers_as_inputs=False,所以只需要执行sim操作即可，否则要和paddle一样，多执行一个操作"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.1 batch_first=True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "sys.path.append('/home/tl/anaconda3/envs/pdle/lib/python3.7/site-packages')\n",
    "import torch\n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from onnxsim import simplify\n",
    "import onnxoptimizer\n",
    "import onnx\n",
    "import onnxruntime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output shape: torch.Size([1, 6, 4])\n",
      "export onnx to: torch/One_lstm_batch.onnx\n",
      "output shape: torch.Size([1, 6, 4])\n",
      "export onnx to: torch/Two_lstm_batch.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/tl/anaconda3/envs/ptch/lib/python3.7/site-packages/torch/onnx/symbolic_opset9.py:2192: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. \n",
      "  \"or define the initial states (h0/c0) as inputs of the model. \")\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output shape: torch.Size([1, 6, 8])\n",
      "export onnx to: torch/Bi_Two_lstm_batch.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n"
     ]
    }
   ],
   "source": [
    "class One_LSTM_batch(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = One_LSTM_batch()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/One_lstm_batch.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True,keep_initializers_as_inputs=False,input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "class Two_LSTM_batch(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=True,num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = Two_LSTM_batch()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/Two_lstm_batch.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, keep_initializers_as_inputs=False,input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "class Bi_Two_LSTM_batch(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=True,num_layers=2,bidirectional=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(0,2,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = Bi_Two_LSTM_batch()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/Bi_Two_lstm_batch.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, keep_initializers_as_inputs=False,input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "有一些warning,所以最好也可以手动传入参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2 batch_first=False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output shape: torch.Size([6, 1, 4])\n",
      "export onnx to: torch/One_lstm_time.onnx\n",
      "output shape: torch.Size([6, 1, 4])\n",
      "export onnx to: torch/Two_lstm_time.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/tl/anaconda3/envs/ptch/lib/python3.7/site-packages/torch/onnx/symbolic_opset9.py:2192: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. \n",
      "  \"or define the initial states (h0/c0) as inputs of the model. \")\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output shape: torch.Size([6, 1, 8])\n",
      "export onnx to: torch/Bi_Two_lstm_time.onnx\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n",
      "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.\n"
     ]
    }
   ],
   "source": [
    "class One_LSTM_time(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=False)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = One_LSTM_time()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/One_lstm_time.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "class Two_LSTM_time(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=False,num_layers=2)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = Two_LSTM_time()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/Two_lstm_time.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "class Bi_Two_LSTM_time(nn.Module):\n",
    "    def __init__(self,in_channels=3,out_channels=4):\n",
    "        super().__init__()\n",
    "        self.rnn = nn.LSTM(in_channels,out_channels,batch_first=False,num_layers=2,bidirectional=True)\n",
    "    def forward(self,x):\n",
    "        # b,c,h,w =x.shape\n",
    "        # x1 = torch.reshape(x,[b,c,h*w])\n",
    "        x2 = torch.squeeze(x,2)\n",
    "        x3 = torch.permute(x2,(2,0,1))\n",
    "        out,_ = self.rnn(x3)\n",
    "        return out # shape 1,6,4\n",
    "    \n",
    "model = Bi_Two_LSTM_time()\n",
    "model.to('cpu')\n",
    "model.eval()\n",
    "\n",
    "input = torch.randn(1,3,1,6)\n",
    "output = model(input)\n",
    "print(\"output shape:\",output.shape)\n",
    "\n",
    "input_shapes=[(1,3,1,6)]\n",
    "onnx_export_path = \"torch/Bi_Two_lstm_time.onnx\"\n",
    "dummy_input=[]\n",
    "for ele in input_shapes:\n",
    "    dummy_input.append(torch.randn(ele))\n",
    "dummy_input=tuple(dummy_input)\n",
    "\n",
    "# torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"], dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})\n",
    "torch.onnx.export(model, dummy_input, onnx_export_path,export_params=True,verbose=False, opset_version=11,do_constant_folding=True, input_names=[\"input\"], output_names=[\"output\"])\n",
    "print(\"export onnx to:\",onnx_export_path)\n",
    "\n",
    "onnx_model = onnx.load(onnx_export_path)\n",
    "model_sim ,check = simplify(onnx_model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = os.path.splitext(onnx_export_path)[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.3 查看生成的onnx模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_lstm_batch.onnx', 'Bi_Two_lstm_batch_sim.onnx', 'Bi_Two_lstm_time.onnx', 'Bi_Two_lstm_time_sim.onnx', 'One_lstm_batch.onnx', 'One_lstm_batch_sim.onnx', 'One_lstm_time.onnx', 'One_lstm_time_sim.onnx', 'Two_lstm_batch.onnx', 'Two_lstm_batch_sim.onnx', 'Two_lstm_time.onnx', 'Two_lstm_time_sim.onnx']\n"
     ]
    }
   ],
   "source": [
    "pytorch_onnx = sorted(os.listdir('torch'))\n",
    "pytorch_onnx_paths = sorted([os.path.join('torch',path) for path in pytorch_onnx])\n",
    "print(pytorch_onnx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "8.0K\ttorch/Bi_Two_lstm_batch.onnx\n",
      "8.0K\ttorch/Bi_Two_lstm_batch_sim.onnx\n",
      "8.0K\ttorch/Bi_Two_lstm_time.onnx\n",
      "8.0K\ttorch/Bi_Two_lstm_time_sim.onnx\n",
      "4.0K\ttorch/One_lstm_batch.onnx\n",
      "4.0K\ttorch/One_lstm_batch_sim.onnx\n",
      "4.0K\ttorch/One_lstm_time.onnx\n",
      "4.0K\ttorch/One_lstm_time_sim.onnx\n",
      "8.0K\ttorch/Two_lstm_batch.onnx\n",
      "4.0K\ttorch/Two_lstm_batch_sim.onnx\n",
      "8.0K\ttorch/Two_lstm_time.onnx\n",
      "4.0K\ttorch/Two_lstm_time_sim.onnx\n"
     ]
    }
   ],
   "source": [
    "! du -sh torch/*"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def onnx_infer(model_path,data):\n",
    "    \"\"\"_summary_\n",
    "\n",
    "    Args:\n",
    "        model_path (_type_): _description_\n",
    "        data (_type_): _description_\n",
    "    \"\"\"\n",
    "    onnx_session=onnxruntime.InferenceSession(model_path)\n",
    "    input_name = onnx_session.get_inputs()[0].name\n",
    "    output_name = onnx_session.get_outputs()[0].name\n",
    "    result = onnx_session.run([output_name],{input_name:data})\n",
    "    return result[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_lstm_batch.onnx', 'Bi_Two_lstm_batch_sim.onnx'] have same results\n",
      "['Bi_Two_lstm_time.onnx', 'Bi_Two_lstm_time_sim.onnx'] have same results\n",
      "['One_lstm_batch.onnx', 'One_lstm_batch_sim.onnx'] have same results\n",
      "['One_lstm_time.onnx', 'One_lstm_time_sim.onnx'] have same results\n",
      "['Two_lstm_batch.onnx', 'Two_lstm_batch_sim.onnx'] have same results\n",
      "['Two_lstm_time.onnx', 'Two_lstm_time_sim.onnx'] have same results\n"
     ]
    }
   ],
   "source": [
    "test_data = np.random.random((1,3,1,6)).astype(np.float32) # batch,channel,height,width\n",
    "results={}\n",
    "\n",
    "for i,onnx_path in enumerate(pytorch_onnx_paths):\n",
    "\n",
    "    result = onnx_infer(onnx_path,test_data)\n",
    "    results[os.path.basename(onnx_path)]=result\n",
    "\n",
    "    if i%2 ==1:\n",
    "        try:\n",
    "            values = list(results.values())\n",
    "            np.testing.assert_allclose(values[0],values[1],rtol=1e-7)\n",
    "            print(f\"{list(results.keys())} have same results\")\n",
    "        except:\n",
    "            print(f\"{list(results.keys())} have different results\")\n",
    "        finally:\n",
    "            results={}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "看起来pytorch转换成onnx在1e-7的精度下结果完全相同，相比paddle精度还是高一点"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3 Tensorflow2 生成LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-08-09 16:10:42.419611: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import tensorflow as tf\n",
    "import onnx\n",
    "import tf2onnx\n",
    "from onnxsim import simplify\n",
    "import onnxruntime\n",
    "import numpy as np\n",
    "from tensorflow.keras import layers as nn\n",
    "#only use cpu\n",
    "devices = tf.config.list_physical_devices(\"CPU\")\n",
    "tf.config.set_visible_devices(devices)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "因为tensorflow和pytorch默认是返回每一步的output的，而tensorflow是可以指定返回最后一步还是全部，由reture_sequences来决定，为了保持一致，设置为True. \n",
    "tensorflow的是初始输入是格式是B，H,W,C,以此为基础进行构建"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.1 time_major=False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_191_layer_call_fn, lstm_cell_191_layer_call_and_return_conditional_losses while saving (showing 2 of 2). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_batch/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_batch/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3ac810f8e0> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 16:19:10.687955: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:10.688047: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:10.706293: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.707378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.708381: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.709447: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.719888: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 87 nodes (60), 98 edges (68), time = 2.078ms.\n",
      "  function_optimizer: Graph size after: 87 nodes (0), 98 edges (0), time = 1.092ms.\n",
      "Optimization results for grappler item: while_cond_1209930\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_body_1209931\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "\n",
      "2022-08-16 16:19:10.780564: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:10.780636: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:10.798557: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.799642: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.800648: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.801728: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:10.812277: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 30 nodes (-23), 30 edges (-27), time = 1.377ms.\n",
      "  function_optimizer: Graph size after: 30 nodes (0), 30 edges (0), time = 0.6ms.\n",
      "  constant_folding: Graph size after: 30 nodes (0), 30 edges (0), time = 0.554ms.\n",
      "  function_optimizer: Graph size after: 30 nodes (0), 30 edges (0), time = 0.592ms.\n",
      "Optimization results for grappler item: while_cond_1209930\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.272ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.182ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1209931\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.766ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.646ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_192_layer_call_fn, lstm_cell_192_layer_call_and_return_conditional_losses, lstm_cell_193_layer_call_fn, lstm_cell_193_layer_call_and_return_conditional_losses while saving (showing 4 of 4). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_batch/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_batch/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39dc6fd2e0> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39bc2a58e0> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 16:19:16.650941: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:16.651052: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:16.669042: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.670119: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.671115: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.672189: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.689047: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 170 nodes (120), 193 edges (136), time = 3.802ms.\n",
      "  function_optimizer: Graph size after: 170 nodes (0), 193 edges (0), time = 2.068ms.\n",
      "Optimization results for grappler item: while_cond_1221920\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.005ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1221921\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.003ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_body_1221499\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1221498\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "\n",
      "2022-08-16 16:19:16.788220: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:16.788291: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:16.812080: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.813181: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.814187: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.815259: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:16.832669: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 56 nodes (-46), 57 edges (-54), time = 2.381ms.\n",
      "  function_optimizer: Graph size after: 56 nodes (0), 57 edges (0), time = 1.085ms.\n",
      "  constant_folding: Graph size after: 56 nodes (0), 57 edges (0), time = 0.99ms.\n",
      "  function_optimizer: Graph size after: 56 nodes (0), 57 edges (0), time = 1.093ms.\n",
      "Optimization results for grappler item: while_cond_1221920\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.284ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.182ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1221921\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.779ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.65ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1221499\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.776ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.645ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "Optimization results for grappler item: while_cond_1221498\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.271ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.182ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_195_layer_call_fn, lstm_cell_195_layer_call_and_return_conditional_losses, lstm_cell_196_layer_call_fn, lstm_cell_196_layer_call_and_return_conditional_losses, lstm_cell_198_layer_call_fn while saving (showing 5 of 8). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_batch/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_batch/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39dc655250> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39f4480d30> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39f44a2cd0> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f39c4533760> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 16:19:31.962845: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:31.962974: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:31.980983: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:31.982057: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:31.983048: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:31.984113: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:32.016037: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 348 nodes (244), 397 edges (276), time = 8.373ms.\n",
      "  function_optimizer: Graph size after: 348 nodes (0), 397 edges (0), time = 4.505ms.\n",
      "Optimization results for grappler item: while_cond_1256176\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1255323\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.003ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1255322\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1256601\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.003ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1255746\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1256177\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1255747\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1256600\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "\n",
      "2022-08-16 16:19:32.194241: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 16:19:32.194314: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 16:19:32.212231: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:32.213308: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:32.214300: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:32.215369: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 16:19:32.248503: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 120 nodes (-92), 125 edges (-108), time = 4.738ms.\n",
      "  function_optimizer: Graph size after: 120 nodes (0), 125 edges (0), time = 2.234ms.\n",
      "  constant_folding: Graph size after: 120 nodes (0), 125 edges (0), time = 2.266ms.\n",
      "  function_optimizer: Graph size after: 120 nodes (0), 125 edges (0), time = 2.25ms.\n",
      "Optimization results for grappler item: while_cond_1256176\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.274ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.181ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "Optimization results for grappler item: while_body_1255323\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.79ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.647ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1255322\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.266ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.18ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1256601\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.777ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.654ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1255746\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.276ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.181ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1256177\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.78ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.651ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_1255747\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.772ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.649ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_1256600\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.277ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.181ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "def One_LSTM_batch():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle = tf.squeeze(input,axis=1)\n",
    "    output = nn.LSTM(4,time_major=False,return_sequences=True,name='one')(middle)\n",
    "    model = tf.keras.models.Model(input,output,name=\"One_LSTM_batch\")\n",
    "    return model\n",
    "model = One_LSTM_batch()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/One_LSTM_batch\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Two_LSTM_batch():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle = tf.squeeze(input,axis=1)\n",
    "    output1 = nn.LSTM(4,time_major=False,return_sequences=True,name='one')(middle)\n",
    "    output = nn.LSTM(4,time_major=False,return_sequences=True,name='two')(output1)\n",
    "    model = tf.keras.models.Model(input,output,name=\"Two_LSTM_batch\")\n",
    "    return model\n",
    "model = Two_LSTM_batch()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Two_LSTM_batch\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Bi_Two_LSTM_batch():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle = tf.squeeze(input,axis=1)\n",
    "    output1 = nn.Bidirectional(nn.LSTM(4,time_major=False,return_sequences=True,name='one'),merge_mode=\"concat\")(middle)\n",
    "    output = nn.Bidirectional(nn.LSTM(4,time_major=False,return_sequences=True,name='two'),merge_mode=\"concat\")(output1)\n",
    "    model = tf.keras.models.Model(input,output,name=\"Bi_Two_LSTM_batch\")\n",
    "    return model\n",
    "model = Bi_Two_LSTM_batch()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Bi_Two_LSTM_batch\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.2 time_major=False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_8_layer_call_fn, lstm_cell_8_layer_call_and_return_conditional_losses while saving (showing 2 of 2). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_time/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_time/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3ae4727f70> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 15:43:52.434772: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:43:52.434862: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:43:52.452892: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.453967: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.454957: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.456033: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.465981: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 85 nodes (56), 96 edges (64), time = 1.955ms.\n",
      "  function_optimizer: Graph size after: 85 nodes (0), 96 edges (0), time = 1.076ms.\n",
      "Optimization results for grappler item: while_cond_48297\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_48298\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "\n",
      "2022-08-16 15:43:52.520244: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:43:52.520309: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:43:52.538162: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.539247: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.540262: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.541338: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:52.551627: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 28 nodes (-23), 28 edges (-27), time = 1.298ms.\n",
      "  function_optimizer: Graph size after: 28 nodes (0), 28 edges (0), time = 0.575ms.\n",
      "  constant_folding: Graph size after: 28 nodes (0), 28 edges (0), time = 0.512ms.\n",
      "  function_optimizer: Graph size after: 28 nodes (0), 28 edges (0), time = 0.588ms.\n",
      "Optimization results for grappler item: while_cond_48297\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.269ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.179ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_48298\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.769ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.642ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_9_layer_call_fn, lstm_cell_9_layer_call_and_return_conditional_losses, lstm_cell_10_layer_call_fn, lstm_cell_10_layer_call_and_return_conditional_losses while saving (showing 4 of 4). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_time/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_time/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3ac83b8d60> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3a7c209b80> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 15:43:57.663352: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:43:57.663442: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:43:57.681413: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.682504: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.683492: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.684558: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.701027: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 164 nodes (112), 187 edges (128), time = 3.95ms.\n",
      "  function_optimizer: Graph size after: 164 nodes (0), 187 edges (0), time = 2.055ms.\n",
      "Optimization results for grappler item: while_cond_59528\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_body_59937\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_cond_59936\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_59529\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "\n",
      "2022-08-16 15:43:57.789964: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:43:57.790031: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:43:57.807917: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.809002: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.809991: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.811055: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:43:57.832990: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 50 nodes (-46), 51 edges (-54), time = 2.266ms.\n",
      "  function_optimizer: Graph size after: 50 nodes (0), 51 edges (0), time = 1.037ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 51 edges (0), time = 0.898ms.\n",
      "  function_optimizer: Graph size after: 50 nodes (0), 51 edges (0), time = 1.077ms.\n",
      "Optimization results for grappler item: while_cond_59528\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.274ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.182ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_59937\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.767ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.655ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "Optimization results for grappler item: while_cond_59936\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.261ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.183ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_59529\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 2.66ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 2.102ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_12_layer_call_fn, lstm_cell_12_layer_call_and_return_conditional_losses, lstm_cell_13_layer_call_fn, lstm_cell_13_layer_call_and_return_conditional_losses, lstm_cell_15_layer_call_fn while saving (showing 5 of 8). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_time/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_time/assets\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3a844d8160> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3a7c14f400> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3a7c79bf70> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "WARNING:absl:<keras.layers.recurrent.LSTMCell object at 0x7f3a8465a160> has the same name 'LSTMCell' as a built-in Keras object. Consider renaming <class 'keras.layers.recurrent.LSTMCell'> to avoid naming conflicts when loading with `tf.keras.models.load_model`. If renaming is not possible, pass the object in the `custom_objects` parameter of the load function.\n",
      "2022-08-16 15:44:12.614816: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:44:12.614936: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:44:12.633044: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.634138: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.635134: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.636200: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.667021: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  function_optimizer: Graph size after: 334 nodes (228), 383 edges (260), time = 8.111ms.\n",
      "  function_optimizer: Graph size after: 334 nodes (0), 383 edges (0), time = 4.232ms.\n",
      "Optimization results for grappler item: while_body_93140\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.005ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_body_93550\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0ms.\n",
      "Optimization results for grappler item: while_cond_92313\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_93139\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_93549\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_92314\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_92723\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_92724\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "\n",
      "2022-08-16 15:44:12.837667: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-16 15:44:12.837739: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-16 15:44:12.855667: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.856749: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.857738: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.858802: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-16 15:44:12.896488: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:1164] Optimization results for grappler item: graph_to_optimize\n",
      "  constant_folding: Graph size after: 106 nodes (-92), 111 edges (-108), time = 4.422ms.\n",
      "  function_optimizer: Graph size after: 106 nodes (0), 111 edges (0), time = 2.035ms.\n",
      "  constant_folding: Graph size after: 106 nodes (0), 111 edges (0), time = 1.936ms.\n",
      "  function_optimizer: Graph size after: 106 nodes (0), 111 edges (0), time = 2.095ms.\n",
      "Optimization results for grappler item: while_body_93140\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.783ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.648ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_93550\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.778ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.65ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_92313\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.265ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.181ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_93139\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.258ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.179ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_cond_93549\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.893ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.633ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.004ms.\n",
      "Optimization results for grappler item: while_body_92314\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 1.257ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.959ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "Optimization results for grappler item: while_cond_92723\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.377ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 14 nodes (0), 4 edges (0), time = 0.274ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.001ms.\n",
      "Optimization results for grappler item: while_body_92724\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 1.113ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "  constant_folding: Graph size after: 50 nodes (0), 50 edges (0), time = 0.952ms.\n",
      "  function_optimizer: function_optimizer did nothing. time = 0.002ms.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "def One_LSTM_time():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output = nn.LSTM(4,time_major=True,return_sequences=True,name='one')(middle)\n",
    "    model = tf.keras.models.Model(input,output,name=\"One_LSTM_time\")\n",
    "    return model\n",
    "model = One_LSTM_time()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/One_LSTM_time\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Two_LSTM_time():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output1 = nn.LSTM(4,time_major=True,return_sequences=True,name='one')(middle)\n",
    "    output = nn.LSTM(4,time_major=True,return_sequences=True,name='two')(output1)\n",
    "    model = tf.keras.models.Model(input,output,name=\"Two_LSTM_time\")\n",
    "    return model\n",
    "model = Two_LSTM_time()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Two_LSTM_time\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Bi_Two_LSTM_time():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output1 = nn.Bidirectional(nn.LSTM(4,time_major=True,return_sequences=True,name='one'),merge_mode=\"concat\")(middle)\n",
    "    output = nn.Bidirectional(nn.LSTM(4,time_major=True,return_sequences=True,name='two'),merge_mode=\"concat\")(output1)\n",
    "    model = tf.keras.models.Model(input,output,name=\"Bi_Two_LSTM_time\")\n",
    "    return model\n",
    "model = Bi_Two_LSTM_time()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Bi_Two_LSTM_time\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.3 return_state=True\n",
    "支持上一层的state做为下一层的初始状态"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_151_layer_call_fn, lstm_cell_151_layer_call_and_return_conditional_losses while saving (showing 2 of 2). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_time_state/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/One_LSTM_time_state/assets\n",
      "2022-08-10 10:27:29.569744: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:29.569887: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:29.587666: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.588730: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.589789: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.590839: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.657141: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:29.657227: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:29.674878: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.675934: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.676968: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:29.678019: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_152_layer_call_fn, lstm_cell_152_layer_call_and_return_conditional_losses, lstm_cell_153_layer_call_fn, lstm_cell_153_layer_call_and_return_conditional_losses while saving (showing 4 of 4). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_time_state/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Two_LSTM_time_state/assets\n",
      "2022-08-10 10:27:34.989735: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:34.989854: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:35.007588: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.008641: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.009675: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.010708: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.118860: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:35.118956: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:35.136643: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.137703: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.138736: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:35.139769: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n",
      "WARNING:absl:Found untraced functions such as lstm_cell_155_layer_call_fn, lstm_cell_155_layer_call_and_return_conditional_losses, lstm_cell_156_layer_call_fn, lstm_cell_156_layer_call_and_return_conditional_losses, lstm_cell_158_layer_call_fn while saving (showing 5 of 8). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_time_state/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: tensorflow/Bi_Two_LSTM_time_state/assets\n",
      "2022-08-10 10:27:50.572328: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:50.572459: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:50.590225: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.591290: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.592334: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.593388: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.800566: I tensorflow/core/grappler/devices.cc:66] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 4\n",
      "2022-08-10 10:27:50.800672: I tensorflow/core/grappler/clusters/single_machine.cc:358] Starting new session\n",
      "2022-08-10 10:27:50.818458: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 14627 MB memory:  -> device: 0, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:da:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.819552: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 14627 MB memory:  -> device: 1, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:db:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.820605: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 14627 MB memory:  -> device: 2, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dc:00.0, compute capability: 7.0\n",
      "2022-08-10 10:27:50.821643: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 14627 MB memory:  -> device: 3, name: Tesla V100-PCIE-16GB-LS, pci bus id: 0000:dd:00.0, compute capability: 7.0\n"
     ]
    }
   ],
   "source": [
    "def One_LSTM_time_state():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output,h_state,c_state = nn.LSTM(4,time_major=True,return_sequences=True,return_state=True,name='one')(middle)\n",
    "    model = tf.keras.models.Model(inputs=input,outputs=[output,h_state,c_state],name=\"One_LSTM_time_state\")\n",
    "    return model\n",
    "model = One_LSTM_time_state()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/One_LSTM_time_state\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Two_LSTM_time_state():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output1,h_state,c_state = nn.LSTM(4,time_major=True,return_sequences=True,return_state=True,name='one')(middle)\n",
    "    output,h_state1,c_state1 = nn.LSTM(4,time_major=True,return_sequences=True,return_state=True,name='two')(output1,initial_state=(h_state,c_state))\n",
    "    model = tf.keras.models.Model(inputs=input,outputs=[output,h_state1,c_state1],name=\"Two_LSTM_time_state\")\n",
    "    return model\n",
    "model = Two_LSTM_time_state()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Two_LSTM_time_state\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)\n",
    "\n",
    "def Bi_Two_LSTM_time_state():\n",
    "    input = nn.Input(shape=[1,6,3],batch_size=1,name=\"input\")\n",
    "    middle1 = tf.squeeze(input,axis=1)\n",
    "    middle = tf.transpose(middle1,[1,0,2])\n",
    "    output1,h_state,c_state,h_state1,c_state1= nn.Bidirectional(nn.LSTM(4,time_major=True,return_sequences=True,return_state=True,name='one'),merge_mode=\"concat\")(middle)\n",
    "    output= nn.Bidirectional(nn.LSTM(4,time_major=True,return_sequences=True,return_state=True,name='two'),merge_mode=\"concat\")(output1,initial_state=(h_state,c_state,h_state1,c_state1))\n",
    "    model = tf.keras.models.Model(inputs=input,outputs=output,name=\"Bi_Two_LSTM_time_state\")\n",
    "    return model\n",
    "model = Bi_Two_LSTM_time_state()\n",
    "#tf.keras.utils.plot_model(model,to_file=f'tensorflow/{model.name}.png',show_shapes=True,show_layer_names=True,show_dtype=True)\n",
    "model.save(\"tensorflow/Bi_Two_LSTM_time_state\")\n",
    "spec = (tf.TensorSpec((1,1,6,3),tf.float32,name=\"input\"),)\n",
    "output_path=\"tensorflow/\"+model.name+'.onnx'\n",
    "model_proto,_=tf2onnx.convert.from_keras(model,input_signature=spec,opset=11,output_path=output_path)\n",
    "output_names=[n.name for n in model_proto.graph.output]\n",
    "model = onnx.load(output_path)\n",
    "model_sim ,check = simplify(model)\n",
    "assert check,\"simplified onnx model could not be validated\"\n",
    "save_path = output_path.split('.')[0]+\"_sim.onnx\"\n",
    "onnx.save(model_sim,save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.4 查看生成的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_batch', 'Bi_Two_LSTM_batch.onnx', 'Bi_Two_LSTM_batch_sim.onnx', 'Bi_Two_LSTM_time', 'Bi_Two_LSTM_time.onnx', 'Bi_Two_LSTM_time_sim.onnx', 'Bi_Two_LSTM_time_state', 'Bi_Two_LSTM_time_state.onnx', 'Bi_Two_LSTM_time_state_sim.onnx', 'One_LSTM_batch', 'One_LSTM_batch.onnx', 'One_LSTM_batch_sim.onnx', 'One_LSTM_time', 'One_LSTM_time.onnx', 'One_LSTM_time_sim.onnx', 'One_LSTM_time_state', 'One_LSTM_time_state.onnx', 'One_LSTM_time_state_sim.onnx', 'Two_LSTM_batch', 'Two_LSTM_batch.onnx', 'Two_LSTM_batch_sim.onnx', 'Two_LSTM_time', 'Two_LSTM_time.onnx', 'Two_LSTM_time_sim.onnx', 'Two_LSTM_time_state', 'Two_LSTM_time_state.onnx', 'Two_LSTM_time_state_sim.onnx']\n"
     ]
    }
   ],
   "source": [
    "tf_models = sorted(os.listdir('tensorflow'))\n",
    "tf_models_path=[os.path.join('tensorflow',p) for p in tf_models if p.endswith('onnx')]\n",
    "print(tf_models)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['tensorflow/Bi_Two_LSTM_batch.onnx',\n",
       " 'tensorflow/Bi_Two_LSTM_batch_sim.onnx',\n",
       " 'tensorflow/Bi_Two_LSTM_time.onnx',\n",
       " 'tensorflow/Bi_Two_LSTM_time_sim.onnx',\n",
       " 'tensorflow/Bi_Two_LSTM_time_state.onnx',\n",
       " 'tensorflow/Bi_Two_LSTM_time_state_sim.onnx',\n",
       " 'tensorflow/One_LSTM_batch.onnx',\n",
       " 'tensorflow/One_LSTM_batch_sim.onnx',\n",
       " 'tensorflow/One_LSTM_time.onnx',\n",
       " 'tensorflow/One_LSTM_time_sim.onnx',\n",
       " 'tensorflow/One_LSTM_time_state.onnx',\n",
       " 'tensorflow/One_LSTM_time_state_sim.onnx',\n",
       " 'tensorflow/Two_LSTM_batch.onnx',\n",
       " 'tensorflow/Two_LSTM_batch_sim.onnx',\n",
       " 'tensorflow/Two_LSTM_time.onnx',\n",
       " 'tensorflow/Two_LSTM_time_sim.onnx',\n",
       " 'tensorflow/Two_LSTM_time_state.onnx',\n",
       " 'tensorflow/Two_LSTM_time_state_sim.onnx']"
      ]
     },
     "execution_count": 85,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tf_models_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4.2M\ttensorflow/Bi_Two_LSTM_batch\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_batch.onnx\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_batch_sim.onnx\n",
      "4.0M\ttensorflow/Bi_Two_LSTM_time\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_time.onnx\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_time_sim.onnx\n",
      "4.0M\ttensorflow/Bi_Two_LSTM_time_state\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_time_state.onnx\n",
      "8.0K\ttensorflow/Bi_Two_LSTM_time_state_sim.onnx\n",
      "708K\ttensorflow/One_LSTM_batch\n",
      "4.0K\ttensorflow/One_LSTM_batch.onnx\n",
      "4.0K\ttensorflow/One_LSTM_batch_sim.onnx\n",
      "684K\ttensorflow/One_LSTM_time\n",
      "4.0K\ttensorflow/One_LSTM_time.onnx\n",
      "4.0K\ttensorflow/One_LSTM_time_sim.onnx\n",
      "696K\ttensorflow/One_LSTM_time_state\n",
      "4.0K\ttensorflow/One_LSTM_time_state.onnx\n",
      "4.0K\ttensorflow/One_LSTM_time_state_sim.onnx\n",
      "1.4M\ttensorflow/Two_LSTM_batch\n",
      "4.0K\ttensorflow/Two_LSTM_batch.onnx\n",
      "4.0K\ttensorflow/Two_LSTM_batch_sim.onnx\n",
      "1.3M\ttensorflow/Two_LSTM_time\n",
      "4.0K\ttensorflow/Two_LSTM_time.onnx\n",
      "4.0K\ttensorflow/Two_LSTM_time_sim.onnx\n",
      "1.3M\ttensorflow/Two_LSTM_time_state\n",
      "4.0K\ttensorflow/Two_LSTM_time_state.onnx\n",
      "4.0K\ttensorflow/Two_LSTM_time_state_sim.onnx\n"
     ]
    }
   ],
   "source": [
    "! du -sh tensorflow/*"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "def onnx_infer(model_path,data):\n",
    "    \"\"\"_summary_\n",
    "\n",
    "    Args:\n",
    "        model_path (_type_): _description_\n",
    "        data (_type_): _description_\n",
    "    \"\"\"\n",
    "    onnx_session=onnxruntime.InferenceSession(model_path)\n",
    "    input_name = onnx_session.get_inputs()[0].name\n",
    "    output_name = onnx_session.get_outputs()[0].name\n",
    "    result = onnx_session.run([output_name],{input_name:data})\n",
    "    return result[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf_models_path=[\"tensorflow/One_LSTM_time.onnx\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_batch', 'Bi_Two_LSTM_batch.onnx', 'Bi_Two_LSTM_batch_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_time', 'Bi_Two_LSTM_time.onnx', 'Bi_Two_LSTM_time_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Bi_Two_LSTM_time_state', 'Bi_Two_LSTM_time_state.onnx', 'Bi_Two_LSTM_time_state_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['One_LSTM_batch', 'One_LSTM_batch.onnx', 'One_LSTM_batch_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['One_LSTM_time', 'One_LSTM_time.onnx', 'One_LSTM_time_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['One_LSTM_time_state', 'One_LSTM_time_state.onnx', 'One_LSTM_time_state_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Two_LSTM_batch', 'Two_LSTM_batch.onnx', 'Two_LSTM_batch_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Two_LSTM_time', 'Two_LSTM_time.onnx', 'Two_LSTM_time_sim.onnx'] have same results\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Two_LSTM_time_state', 'Two_LSTM_time_state.onnx', 'Two_LSTM_time_state_sim.onnx'] have same results\n"
     ]
    }
   ],
   "source": [
    "test_data = np.random.random(size=(1,1,6,3)).astype(np.float32) # batch,channel,height,width\n",
    "for i,onnx_path in enumerate(tf_models_path):\n",
    "    base_path = os.path.splitext(onnx_path)[0]\n",
    "    if not base_path.endswith('sim'):\n",
    "        results={}\n",
    "        onnx_sim=base_path+'_sim.onnx'\n",
    "        tf_result = tf.keras.models.load_model(base_path)(tf.convert_to_tensor(test_data))\n",
    "        # print(f'base_path:{base_path} len:{len(tf_result)} type:{type(tf_result)}')\n",
    "        if isinstance(tf_result,list):\n",
    "            tf_result=tf_result[0].numpy()\n",
    "        results[os.path.basename(base_path)]=tf_result\n",
    "        onnx_result = onnx_infer(onnx_path,test_data)\n",
    "        results[os.path.basename(onnx_path)]=onnx_result\n",
    "        sim_result = onnx_infer(onnx_sim,test_data)\n",
    "        results[os.path.basename(onnx_sim)]=sim_result\n",
    "        try:\n",
    "            values = list(results.values())\n",
    "            np.testing.assert_allclose(values[0],values[1],rtol=1e-5)\n",
    "            np.testing.assert_allclose(values[1],values[2],rtol=1e-5)\n",
    "            print(f\"{list(results.keys())} have same results\")\n",
    "        except:\n",
    "            print(f\"{list(results.keys())} have different results\")\n",
    "        finally:\n",
    "            results={}"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.9.13 ('dl')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "0fcf723b185f3a92e8f998f46efc0889d792720e399a33feb07fad72f52c1606"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
