{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MyCell(\n",
      "  original_name=MyCell\n",
      "  (linear): Linear(original_name=Linear)\n",
      ")\n",
      "(tensor([[-0.3429,  0.3553,  0.8815,  0.7574],\n",
      "        [ 0.0276,  0.8834,  0.2964,  0.7879],\n",
      "        [ 0.4458,  0.6639,  0.9072,  0.6161]], grad_fn=<TanhBackward>), tensor([[-0.3429,  0.3553,  0.8815,  0.7574],\n",
      "        [ 0.0276,  0.8834,  0.2964,  0.7879],\n",
      "        [ 0.4458,  0.6639,  0.9072,  0.6161]], grad_fn=<TanhBackward>))\n",
      "图的形式 graph(%self.1 : __torch__.___torch_mangle_55.MyCell,\n",
      "      %input : Float(3:4, 4:1, requires_grad=0, device=cpu),\n",
      "      %h : Float(3:4, 4:1, requires_grad=0, device=cpu)):\n",
      "  %19 : __torch__.torch.nn.modules.linear.___torch_mangle_54.Linear = prim::GetAttr[name=\"linear\"](%self.1)\n",
      "  %21 : Tensor = prim::CallMethod[name=\"forward\"](%19, %input)\n",
      "  %12 : int = prim::Constant[value=1]() # <ipython-input-15-bf9f177d6203>:11:0\n",
      "  %13 : Float(3:4, 4:1, requires_grad=1, device=cpu) = aten::add(%21, %h, %12) # <ipython-input-15-bf9f177d6203>:11:0\n",
      "  %14 : Float(3:4, 4:1, requires_grad=1, device=cpu) = aten::tanh(%13) # <ipython-input-15-bf9f177d6203>:11:0\n",
      "  %15 : (Float(3:4, 4:1, requires_grad=1, device=cpu), Float(3:4, 4:1, requires_grad=1, device=cpu)) = prim::TupleConstruct(%14, %14)\n",
      "  return (%15)\n",
      "\n",
      "python的形式 def forward(self,\n",
      "    input: Tensor,\n",
      "    h: Tensor) -> Tuple[Tensor, Tensor]:\n",
      "  _0 = torch.add((self.linear).forward(input, ), h, alpha=1)\n",
      "  _1 = torch.tanh(_0)\n",
      "  return (_1, _1)\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": "tensor([[True, True, True, True],\n        [True, True, True, True],\n        [True, True, True, True]])"
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import torch.jit as jit\n",
    "\n",
    "# Tracing Models\n",
    "\n",
    "class MyCell(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MyCell,self).__init__()\n",
    "        self.linear = torch.nn.Linear(4,4)\n",
    "    def forward(self,x,h):\n",
    "        new_h = torch.tanh(self.linear(x)+h)\n",
    "        return new_h,new_h\n",
    "\n",
    "my_cell = MyCell()\n",
    "x,h = torch.rand(3,4),torch.rand(3,4)\n",
    "# 使用这个快速函数来创建一个torch.jit.ScriptModule实例\n",
    "traced_cell = torch.jit.trace(func=my_cell,example_inputs=(x,h))\n",
    "isinstance(traced_cell,torch.jit.ScriptModule)  # 看一下是不是一样的\n",
    "print(traced_cell)\n",
    "print(traced_cell(x,h))\n",
    "# 可以使用图的形式来表示这个脚本的计算过程\n",
    "print('图的形式',traced_cell.graph)\n",
    "# 当然可以使用python的形式来表示这个脚本的计算过程\n",
    "print('python的形式',traced_cell.code)\n",
    "# 看一下traced_cell是不是torch.jit.ScriptModule的一个实例\n",
    "isinstance(traced_cell,torch.jit.ScriptModule)\n",
    "# 可以看到使用torch.jit封装过的脚本和原来的模型执行是一样的结果。\n",
    "traced_cell(x,h)[0] == my_cell(x,h)[0]"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "def forward(self,\n",
      "    argument_1: Tensor) -> Tensor:\n",
      "  return torch.neg(argument_1)\n",
      "\n",
      "def forward(self,\n",
      "    input: Tensor,\n",
      "    h: Tensor) -> Tuple[Tensor, Tensor]:\n",
      "  _0 = (self.dg).forward((self.linear).forward(input, ), )\n",
      "  _1 = torch.tanh(torch.add(_0, h, alpha=1))\n",
      "  return (_1, _1)\n",
      "\n",
      "scripted_gate: def forward(self,\n",
      "    x: Tensor) -> Tensor:\n",
      "  _0 = bool(torch.gt(torch.sum(x, dtype=None), 0))\n",
      "  if _0:\n",
      "    _1 = x\n",
      "  else:\n",
      "    _1 = torch.neg(x)\n",
      "  return _1\n",
      "\n",
      "scripted_cell def forward(self,\n",
      "    x: Tensor,\n",
      "    h: Tensor) -> Tuple[Tensor, Tensor]:\n",
      "  _0 = (self.dg).forward((self.linear).forward(x, ), )\n",
      "  new_h = torch.tanh(torch.add(_0, h, alpha=1))\n",
      "  return (new_h, new_h)\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/hx/anaconda3/envs/DL/lib/python3.7/site-packages/ipykernel_launcher.py:4: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  after removing the cwd from sys.path.\n"
     ]
    }
   ],
   "source": [
    "# 使用Scripting来转换模型\n",
    "class MyDecisionGate(torch.nn.Module):\n",
    "    def forward(self,x):\n",
    "        if x.sum()>0:\n",
    "            return x\n",
    "        else:\n",
    "            return -x\n",
    "\n",
    "class MyCell(torch.nn.Module):\n",
    "    def __init__(self,dg):\n",
    "        super(MyCell,self).__init__()\n",
    "        self.dg = dg\n",
    "        self.linear = torch.nn.Linear(4,4)\n",
    "\n",
    "    def forward(self,x,h):\n",
    "        new_h = torch.tanh(self.dg(self.linear(x))+h)\n",
    "        return new_h,new_h\n",
    "\n",
    "my_cell = MyCell(MyDecisionGate())\n",
    "traced_cell = torch.jit.trace(func=my_cell,example_inputs=(x,h))\n",
    "print(traced_cell.dg.code)\n",
    "print(traced_cell.code)\n",
    "\n",
    "# 由上面的输出结果我们可以看得出来这个if—else分支结构并没有被保存，实际上trace应该知识模拟了一次执行的过程，但不幸的是无法保存分支结构\n",
    "# 可以用下面的代码来进行分析源代码\n",
    "scripted_gate = torch.jit.script(obj=MyDecisionGate())   # 可以看的出来script不需要输入一个模拟输入的过程，而是自己去分析\n",
    "my_cell = MyCell(scripted_gate)\n",
    "scripted_cell = torch.jit.script(obj=my_cell)\n",
    "print(\"scripted_gate:\",scripted_gate.code)\n",
    "print(\"scripted_cell\",scripted_cell.code)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "例子1： def forward(self,\n",
      "    xs: Tensor) -> Tuple[Tensor, Tensor]:\n",
      "  h = torch.zeros([3, 4], dtype=None, layout=None, device=None, pin_memory=None)\n",
      "  y = torch.zeros([3, 4], dtype=None, layout=None, device=None, pin_memory=None)\n",
      "  y0 = y\n",
      "  h0 = h\n",
      "  for i in range(torch.size(xs, 0)):\n",
      "    _0 = (self.cell).forward(torch.select(xs, 0, i), h0, )\n",
      "    y1, h1, = _0\n",
      "    y0, h0 = y1, h1\n",
      "  return (y0, h0)\n",
      "\n",
      "例子2 def forward(self,\n",
      "    argument_1: Tensor) -> Tensor:\n",
      "  _0, y, = (self.loop).forward(argument_1, )\n",
      "  return torch.relu(y)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 可以混合Scripting和Tracing\n",
    "\"\"\"\n",
    "在一些情形下需要使用trace而不是script，例如一些模块最后的结果是基于常量算出来的,我们就想让他不要出现在脚本中\n",
    "一般来说没有控制流的模型使用script就够了，但是有控制流的模型应该必须要使用script来进行\n",
    "\"\"\"\n",
    "# 例子1\n",
    "class MyRNNLoop(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MyRNNLoop,self).__init__()\n",
    "        self.cell = torch.jit.trace(MyCell(scripted_gate),(x,h))\n",
    "    def forward(self,xs):\n",
    "        h,y = torch.zeros(3,4),torch.zeros(3,4)\n",
    "        for i in range(xs.size(0)):\n",
    "            y,h = self.cell(xs[i],h)\n",
    "        return y,h\n",
    "\n",
    "rnn_loop = torch.jit.script(MyRNNLoop())\n",
    "print(\"例子1：\",rnn_loop.code)\n",
    "# 下一个例子\n",
    "class WrapRNN(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(WrapRNN,self).__init__()\n",
    "        self.loop = torch.jit.script(MyRNNLoop())\n",
    "    def forward(self,xs):\n",
    "        y,h = self.loop(xs)\n",
    "        return torch.relu(y)\n",
    "traced = torch.jit.trace(WrapRNN(),torch.rand(10,3,4))\n",
    "print(\"例子2\",traced.code)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'traced' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "\u001B[0;32m<ipython-input-1-7b521ae09e59>\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[1;32m      1\u001B[0m \u001B[0;31m# 保存和加载模型\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 2\u001B[0;31m \u001B[0mtraced\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0msave\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'wrapped_rnn.pt'\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m      3\u001B[0m \u001B[0mloaded\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mjit\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mload\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'wrapped_rnn.pt'\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m      4\u001B[0m \u001B[0mprint\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mloaded\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m      5\u001B[0m \u001B[0mprint\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mloaded\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mcode\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;31mNameError\u001B[0m: name 'traced' is not defined"
     ]
    }
   ],
   "source": [
    "# 保存和加载模型\n",
    "traced.save('wrapped_rnn.pt')\n",
    "loaded = torch.jit.load('wrapped_rnn.pt')\n",
    "print(loaded)\n",
    "print(loaded.code)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "data": {
      "text/plain": "True"
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 注意： 可以使用装饰器来装饰函数，\n",
    "import torch\n",
    "@torch.jit.script\n",
    "def temp(a:torch.Tensor):\n",
    "    return a**2\n",
    "isinstance(temp,torch.jit.ScriptFunction)   # 使用了这个装饰器的函数就变成了一个ScripFunction对象"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "可以通过设置一个全局的变量来把禁止使用torch.jit的功能\n",
    "通过设置一个全局变量即可,比方说使用python执行一个文件：temp.py可以使用如下的命令\n",
    "````python\n",
    "PYTORCH_JIT=0  python temp.py\n",
    "````"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "conda-env-DL-py",
   "language": "python",
   "display_name": "Python [conda env:DL] *"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}