<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 5.4.0">
  <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/images/logo.svg" color="#222">
  <meta name="google-site-verification" content="d44tDfSSWxm1_XP1dAq65hkgyD6zw70Ua9JdCaJqWGg">

<link rel="stylesheet" href="/css/main.css">


<link rel="stylesheet" href="/lib/font-awesome/css/all.min.css">

<script id="hexo-configurations">
    var NexT = window.NexT || {};
    var CONFIG = {"hostname":"zasdfgbnm.github.io","root":"/","scheme":"Muse","version":"7.8.0","exturl":false,"sidebar":{"position":"left","display":"post","padding":18,"offset":12,"onmobile":false},"copycode":{"enable":false,"show_result":false,"style":null},"back2top":{"enable":true,"sidebar":false,"scrollpercent":false},"bookmark":{"enable":false,"color":"#222","save":"auto"},"fancybox":false,"mediumzoom":false,"lazyload":false,"pangu":false,"comments":{"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},"algolia":{"hits":{"per_page":10},"labels":{"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}},"localsearch":{"enable":false,"trigger":"auto","top_n_per_article":1,"unescape":false,"preload":false},"motion":{"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}}};
  </script>

  <meta name="description" content="This is my note for reading PyTorch’s JIT source. We begin by looking at torch.jit.script to find the frontend that compiles the Python code into PyTorch’s tree views, and the backend that compiles tr">
<meta property="og:type" content="article">
<meta property="og:title" content="PyTorch JIT Source Code Read Note (Updated at Feb 2020)">
<meta property="og:url" content="https://zasdfgbnm.github.io/2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/index.html">
<meta property="og:site_name" content="zasdfgbnm">
<meta property="og:description" content="This is my note for reading PyTorch’s JIT source. We begin by looking at torch.jit.script to find the frontend that compiles the Python code into PyTorch’s tree views, and the backend that compiles tr">
<meta property="og:locale" content="en_US">
<meta property="article:published_time" content="2020-02-07T19:00:00.000Z">
<meta property="article:modified_time" content="2021-04-04T05:17:59.764Z">
<meta property="article:author" content="zasdfgbnm">
<meta property="article:tag" content="机器学习">
<meta property="article:tag" content="PyTorch">
<meta property="article:tag" content="深度学习">
<meta name="twitter:card" content="summary">

<link rel="canonical" href="https://zasdfgbnm.github.io/2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/">


<script id="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome : false,
    isPost : true,
    lang   : 'en'
  };
</script>

  <title>PyTorch JIT Source Code Read Note (Updated at Feb 2020) | zasdfgbnm</title>
  
    <script async src="https://www.googletagmanager.com/gtag/js?id=UA-7583294-5"></script>
    <script>
      if (CONFIG.hostname === location.hostname) {
        window.dataLayer = window.dataLayer || [];
        function gtag(){dataLayer.push(arguments);}
        gtag('js', new Date());
        gtag('config', 'UA-7583294-5');
      }
    </script>


  <script>
    var _hmt = _hmt || [];
    (function() {
      var hm = document.createElement("script");
      hm.src = "https://hm.baidu.com/hm.js?a56abdeb557a286a6b7a104348fdfbcd";
      var s = document.getElementsByTagName("script")[0];
      s.parentNode.insertBefore(hm, s);
    })();
  </script>




  <noscript>
  <style>
  .use-motion .brand,
  .use-motion .menu-item,
  .sidebar-inner,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line-before i { left: initial; }
  .use-motion .logo-line-after i { right: initial; }
  </style>
</noscript>

<link rel="alternate" href="/atom.xml" title="zasdfgbnm" type="application/atom+xml">
</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container use-motion">
    <div class="headband"></div>

    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-nav-toggle">
    <div class="toggle" aria-label="Toggle navigation bar">
      <span class="toggle-line toggle-line-first"></span>
      <span class="toggle-line toggle-line-middle"></span>
      <span class="toggle-line toggle-line-last"></span>
    </div>
  </div>

  <div class="site-meta">

    <a href="/" class="brand" rel="start">
      <span class="logo-line-before"><i></i></span>
      <h1 class="site-title">zasdfgbnm</h1>
      <span class="logo-line-after"><i></i></span>
    </a>
  </div>

  <div class="site-nav-right">
    <div class="toggle popup-trigger">
    </div>
  </div>
</div>




<nav class="site-nav">
  <ul id="menu" class="main-menu menu">
        <li class="menu-item menu-item-home">

    <a href="/" rel="section"><i class="fa fa-home fa-fw"></i>Home</a>

  </li>
        <li class="menu-item menu-item-archives">

    <a href="/archives/" rel="section"><i class="fa fa-archive fa-fw"></i>Archives</a>

  </li>
        <li class="menu-item menu-item-about">

    <a href="/about/" rel="section"><i class="fa fa-user fa-fw"></i>About</a>

  </li>
        <li class="menu-item menu-item-tags">

    <a href="/tags/" rel="section"><i class="fa fa-tags fa-fw"></i>Tags</a>

  </li>
        <li class="menu-item menu-item-categories">

    <a href="/categories/" rel="section"><i class="fa fa-th fa-fw"></i>Categories</a>

  </li>
        <li class="menu-item menu-item-sitemap">

    <a href="/sitemap.xml" rel="section"><i class="fa fa-sitemap fa-fw"></i>Sitemap</a>

  </li>
  </ul>
</nav>




</div>
    </header>

    
  <div class="back-to-top">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>


    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          

          <div class="content post posts-expand">
            

    
  
  
  <article itemscope itemtype="http://schema.org/Article" class="post-block" lang="en">
    <link itemprop="mainEntityOfPage" href="https://zasdfgbnm.github.io/2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/images/avatar.gif">
      <meta itemprop="name" content="zasdfgbnm">
      <meta itemprop="description" content="">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="zasdfgbnm">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          PyTorch JIT Source Code Read Note (Updated at Feb 2020)
        </h1>

        <div class="post-meta">
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-calendar"></i>
              </span>
              <span class="post-meta-item-text">Posted on</span>

              <time title="Created: 2020-02-07 14:00:00" itemprop="dateCreated datePublished" datetime="2020-02-07T14:00:00-05:00">2020-02-07</time>
            </span>
              <span class="post-meta-item">
                <span class="post-meta-item-icon">
                  <i class="far fa-calendar-check"></i>
                </span>
                <span class="post-meta-item-text">Edited on</span>
                <time title="Modified: 2021-04-04 01:17:59" itemprop="dateModified" datetime="2021-04-04T01:17:59-04:00">2021-04-04</time>
              </span>
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-folder"></i>
              </span>
              <span class="post-meta-item-text">In</span>
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" itemprop="url" rel="index"><span itemprop="name">机器学习</span></a>
                </span>
            </span>

          
  
  <span class="post-meta-item">
    
      <span class="post-meta-item-icon">
        <i class="far fa-comment"></i>
      </span>
      <span class="post-meta-item-text">Disqus: </span>
    
    <a title="disqus" href="/2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/#disqus_thread" itemprop="discussionUrl">
      <span class="post-comments-count disqus-comment-count" data-disqus-identifier="2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/" itemprop="commentCount"></span>
    </a>
  </span>
  
  

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">

      
        <p>This is my note for reading PyTorch’s JIT source. We begin by looking at <code>torch.jit.script</code> to find the frontend that compiles the Python code into PyTorch’s tree views, and the backend that compiles tree views to graph. We also read the structure of the internal representation of PyTorch’s graph. Finally we go to graph executor to look at how the computation graph is further compiled into instructions and how the action of these instructions are defined and executed.</p>
<span id="more"></span>

<p>PyTorch is under very active development. So the PyTorch’s source code at the time the reader reading this article won’t be the same as when I wrote this article. To get the same source code as in this article, the readers could run the following command:</p>
<figure class="highlight bash"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">git checkout c6fa6d82aebc1dcd7561ea29ec5d41c5a211bae1</span><br></pre></td></tr></table></figure>


<h1 id="Starting-point-torch-jit-script"><a href="#Starting-point-torch-jit-script" class="headerlink" title="Starting point: torch.jit.script"></a>Starting point: torch.jit.script</h1><p>In PyTorch, a Python function can be just-in-time compiled by doing something like:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">@torch.jit.script</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">x</span>):</span></span><br><span class="line">    <span class="keyword">return</span> x + x</span><br></pre></td></tr></table></figure>

<p>the <code>torch.jit.script</code> is a decorator of your function <code>f</code>. If you are unfamiliar with Python’s decorator, please refer to <a target="_blank" rel="noopener" href="https://realpython.com/primer-on-python-decorators/">this article</a>.</p>
<p>We will start by looking at <code>torch.jit.script</code>. To read <code>torch.jit.script</code>, we begin by looking at <code>torch/jit/__init__.py</code>. To quickly locate <code>script</code>, search <code>def script</code> in your editor, and you will immediately find it:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">script</span>(<span class="params">obj, optimize=<span class="literal">True</span>, _frames_up=<span class="number">0</span></span>):</span></span><br><span class="line">    ....</span><br><span class="line">    <span class="keyword">if</span> inspect.isclass(obj):</span><br><span class="line">      ...</span><br><span class="line">    <span class="keyword">else</span>:</span><br><span class="line">      _check_directly_compile_overloaded(obj)</span><br><span class="line">      maybe_already_compiled_fn = _try_get_jit_cached_function(obj)</span><br><span class="line">      <span class="keyword">if</span> maybe_already_compiled_fn:</span><br><span class="line">          <span class="keyword">return</span> maybe_already_compiled_fn</span><br><span class="line">      ast = get_jit_def(obj)</span><br><span class="line">      <span class="keyword">if</span> _rcb <span class="keyword">is</span> <span class="literal">None</span>:</span><br><span class="line">          _rcb = _jit_internal.createResolutionCallbackFromClosure(obj)</span><br><span class="line">      fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(obj))</span><br><span class="line">      <span class="comment"># Forward docstrings</span></span><br><span class="line">      fn.__doc__ = obj.__doc__</span><br><span class="line">      _set_jit_function_cache(obj, fn)</span><br><span class="line">      <span class="keyword">return</span> fn</span><br></pre></td></tr></table></figure>

<p>Here we want to quickly get a big picture of <code>torch.jit</code>, so we will only look at how a function is compiled. We will ignore the compilation of a module or class, etc.</p>
<p>The core of the above code is this four lines</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">ast = get_jit_def(obj)</span><br><span class="line"><span class="keyword">if</span> _rcb <span class="keyword">is</span> <span class="literal">None</span>:</span><br><span class="line">    _rcb = _jit_internal.createResolutionCallbackFromClosure(obj)</span><br><span class="line">fn = torch._C._jit_script_compile(qualified_name, ast, _rcb, get_default_args(obj))</span><br></pre></td></tr></table></figure>

<p>Just by reading the English, we can tell that what it does is roughly get the abstract syntax tree(AST) and then call <code>torch._C._jit_script_compile</code> to compile it to PyTorch’s internal representation.</p>
<p>From the beginning of <code>__init__.py</code>, we know that <code>get_jit_def</code> is imported from <code>torch.jit.frontend</code>. From the name of this function and its owning module, we can tell that this is the frontend of PyTorch’s JIT compiler that compiles the source code of the scripted function into AST.</p>
<p>Then <code>createResolutionCallbackFromClosure</code> is called. This function is from <code>_jit_internal.py</code>, by looking at its definition and the codes around, we can tell that it roughly gets the symbols available to the function so that they could be accessed through C++ when executing the graph. We will not go into details about how this works.</p>
<p>The next line uses <code>torch._C._jit_script_compile</code> to compiles the AST obtained in the previous step into computation graph. The <code>torch._C</code> tells us that <code>_jit_script_compile</code> is implemented in C++.</p>
<h1 id="The-Python-frontend"><a href="#The-Python-frontend" class="headerlink" title="The Python frontend"></a>The Python frontend</h1><p>A good starting point of the frontend is the <code>get_jit_def</code> we just saw. This function is defined at <code>torch/jit/frontend.py</code>. The code is:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_jit_def</span>(<span class="params">fn, self_name=<span class="literal">None</span></span>):</span></span><br><span class="line">    sourcelines, file_lineno, filename = get_source_lines_and_file(fn)</span><br><span class="line">    source = <span class="string">&#x27;&#x27;</span>.join(sourcelines)</span><br><span class="line">    dedent_src = dedent(source)</span><br><span class="line">    py_ast = ast.parse(dedent_src)</span><br><span class="line">    <span class="keyword">if</span> <span class="built_in">len</span>(py_ast.body) != <span class="number">1</span> <span class="keyword">or</span> <span class="keyword">not</span> <span class="built_in">isinstance</span>(py_ast.body[<span class="number">0</span>], ast.FunctionDef):</span><br><span class="line">        <span class="keyword">raise</span> RuntimeError(<span class="string">&quot;Expected a single top-level function&quot;</span>)</span><br><span class="line">    leading_whitespace_len = <span class="built_in">len</span>(source.split(<span class="string">&#x27;\n&#x27;</span>, <span class="number">1</span>)[<span class="number">0</span>]) - <span class="built_in">len</span>(dedent_src.split(<span class="string">&#x27;\n&#x27;</span>, <span class="number">1</span>)[<span class="number">0</span>])</span><br><span class="line">    type_line = torch.jit.annotations.get_type_line(source)</span><br><span class="line">    ctx = SourceContext(source, filename, file_lineno, leading_whitespace_len, _uses_true_division(fn))</span><br><span class="line">    <span class="keyword">return</span> build_def(ctx, py_ast.body[<span class="number">0</span>], type_line, self_name)</span><br></pre></td></tr></table></figure>

<p>The first 7 lines of function body just use the standard tools provided by Python, <code>dedent</code>, <code>inspect</code>, and <code>ast</code>, to construct the Python AST, do some check to make sure the thing being compiled is “a single top-level function”, as well as getting the position, line numbers, etc. so that useful information could be printted to the user for debugging when there is an error.</p>
<p>The following line <code>type_line = torch.jit.annotations.get_type_line(source)</code> is interesting. After looking at <code>torch/jit/annotations.py</code>, we can see that PyTorch’s JIT allows the user to specify the type of arguments and return value by writing something like <code># type: (Tensor, torch.Tensor) -&gt; Tuple[Tensor, Tensor]</code>.</p>
<p>In the next line <code>ctx = SourceContext(.....)</code>, the <code>_uses_true_division</code> is defined in the same file to handle the different behavior of <code>/</code> in Python2 with or without <code>from __future__ import division</code> (see <a target="_blank" rel="noopener" href="https://www.python.org/dev/peps/pep-0238/">PEP 238</a> for the difference). The <code>SourceContext</code> is also defined in the same file. It is a subclass of <code>SourceRangeFactory</code> with additional field to store if the division is true division. The <code>SourceRangeFactory</code> is imported by <code>from torch._C._jit_tree_views import *</code>. After reading its definition at <code>torch/csrc/jit/script/python_tree_views.cpp</code>, we can see that this is basically a class designed to store the range of source code, e.g. where in the source code a token is located.</p>
<p>The core is the <code>build_def</code> in the last line, so we move on:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">build_def</span>(<span class="params">ctx, py_def, type_line, self_name=<span class="literal">None</span></span>):</span></span><br><span class="line">    body = py_def.body</span><br><span class="line">    r = ctx.make_range(py_def.lineno + <span class="built_in">len</span>(py_def.decorator_list),</span><br><span class="line">                       py_def.col_offset,</span><br><span class="line">                       py_def.col_offset + <span class="built_in">len</span>(<span class="string">&quot;def&quot;</span>))</span><br><span class="line">    param_list = build_param_list(ctx, py_def.args, self_name)</span><br><span class="line">    return_type = <span class="literal">None</span></span><br><span class="line">    <span class="keyword">if</span> <span class="built_in">getattr</span>(py_def, <span class="string">&#x27;returns&#x27;</span>, <span class="literal">None</span>) <span class="keyword">is</span> <span class="keyword">not</span> <span class="literal">None</span>:</span><br><span class="line">        return_type = build_expr(ctx, py_def.returns)</span><br><span class="line">    decl = Decl(r, param_list, return_type)</span><br><span class="line">    is_method = self_name <span class="keyword">is</span> <span class="keyword">not</span> <span class="literal">None</span></span><br><span class="line">    <span class="keyword">if</span> type_line <span class="keyword">is</span> <span class="keyword">not</span> <span class="literal">None</span>:</span><br><span class="line">        type_comment_decl = torch._C.parse_type_comment(type_line)</span><br><span class="line">        decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)</span><br><span class="line">    <span class="keyword">return</span> Def(Ident(r, py_def.name),</span><br><span class="line">               decl,</span><br><span class="line">               build_stmts(ctx, body))</span><br></pre></td></tr></table></figure>

<p>Reading through this, we can see that what basically this does is to convert the Python’s AST into the internal representation. Names like <code>Decl</code>, <code>Def</code>, <code>Ident</code> are all imported by <code>from torch._C._jit_tree_views import *</code>. In the last line, we can see that the function body is constructed by <code>build_stmts</code>, so let’s go further to read <code>build_stmts</code>:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">build_stmts</span>(<span class="params">ctx, stmts</span>):</span></span><br><span class="line">    stmts = [build_stmt(ctx, s) <span class="keyword">for</span> s <span class="keyword">in</span> stmts]</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">list</span>(<span class="built_in">filter</span>(<span class="literal">None</span>, stmts))</span><br></pre></td></tr></table></figure>

<p>This is a very simple function: call <code>build_stmt</code> for each item and filter out those not needed. But what is <code>build_stmt</code>? It is defined as: <code>build_stmt = StmtBuilder()</code>. The definition of <code>StmtBuilder</code> looks like:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">StmtBuilder</span>(<span class="params">Builder</span>):</span></span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">build_Expr</span>(<span class="params">ctx, stmt</span>):</span></span><br><span class="line">        value = stmt.value</span><br><span class="line">        <span class="keyword">if</span> value.__class__.__name__ == <span class="string">&#x27;Str&#x27;</span>:</span><br><span class="line">            <span class="comment"># If a statement is a string literal expression,</span></span><br><span class="line">            <span class="comment"># then it is a docstring. Just ignore it.</span></span><br><span class="line">            <span class="keyword">return</span> <span class="literal">None</span></span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            <span class="keyword">return</span> ExprStmt(build_expr(ctx, value))</span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">build_Assign</span>(<span class="params">ctx, expr</span>):</span></span><br><span class="line">        <span class="comment"># ...</span></span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">build_AnnAssign</span>(<span class="params">ctx, stmt</span>):</span></span><br><span class="line">        <span class="comment">#...</span></span><br><span class="line">    <span class="comment"># ......</span></span><br></pre></td></tr></table></figure>

<p>We can see that, this is a class with many static methods that define what to do for different types of Python AST. I will not go deep into how each type is handled. Since at this point, the readers should be able to catch all the details on how each type of nodes in Python AST are dealt with by themselves. So We will stop our frontend reading right here.</p>
<h1 id="From-Python-AST-to-PyTorch-IR-part-1"><a href="#From-Python-AST-to-PyTorch-IR-part-1" class="headerlink" title="From Python AST to PyTorch IR: part 1"></a><a name="ast2ir"></a>From Python AST to PyTorch IR: part 1</h1><p>Now let’s move on to read <code>_jit_script_compile</code>. To find where it is located, simply run the command <code>grep _jit_script_compile -r .</code>. We will find something like:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">./torch/csrc/jit/script/init.cpp:      <span class="string">&quot;_jit_script_compile&quot;</span>,</span><br></pre></td></tr></table></figure>
<p>So, <code>torch/csrc/jit/script/init.cpp</code> would be a good start point. The complete definition of <code>_jit_script_compile</code> is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line">m.<span class="built_in">def</span>(</span><br><span class="line">    <span class="string">&quot;_jit_script_compile&quot;</span>,</span><br><span class="line">    [](<span class="keyword">const</span> std::string&amp; qualname,</span><br><span class="line">       <span class="keyword">const</span> Def&amp; def,</span><br><span class="line">       ResolutionCallback rcb,</span><br><span class="line">       <span class="keyword">const</span> FunctionDefaults&amp; defaults) &#123;</span><br><span class="line">      <span class="built_in">C10_LOG_API_USAGE_ONCE</span>(<span class="string">&quot;torch.script.compile&quot;</span>);</span><br><span class="line">      <span class="keyword">const</span> <span class="keyword">auto</span> name = c10::<span class="built_in">QualifiedName</span>(qualname);</span><br><span class="line">      <span class="built_in">TORCH_INTERNAL_ASSERT</span>(name.<span class="built_in">name</span>() == def.<span class="built_in">name</span>().<span class="built_in">name</span>());</span><br><span class="line">      <span class="keyword">return</span> <span class="built_in">script_compile_function</span>(name, def, defaults, std::<span class="built_in">move</span>(rcb));</span><br><span class="line">    &#125;);</span><br></pre></td></tr></table></figure>

<p>So, let’s move on to <code>script_compile_function</code>. This is a function defined in the same file</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">static</span> StrongFunctionPtr <span class="title">script_compile_function</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> c10::QualifiedName&amp; name,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> Def&amp; def,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> FunctionDefaults&amp; defaults,</span></span></span><br><span class="line"><span class="function"><span class="params">    ResolutionCallback rcb)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> cu = <span class="built_in">get_python_cu</span>();</span><br><span class="line">  <span class="keyword">auto</span> defined_functions = cu-&gt;<span class="built_in">define</span>(</span><br><span class="line">      <span class="built_in">QualifiedName</span>(name.<span class="built_in">prefix</span>()),</span><br><span class="line">      &#123;def&#125;,</span><br><span class="line">      &#123;<span class="built_in">pythonResolver</span>(std::<span class="built_in">move</span>(rcb))&#125;,</span><br><span class="line">      <span class="literal">nullptr</span>,</span><br><span class="line">      <span class="literal">true</span>);</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(defined_functions.<span class="built_in">size</span>() == <span class="number">1</span>);</span><br><span class="line">  <span class="keyword">auto</span>&amp; defined = defined_functions[<span class="number">0</span>];</span><br><span class="line">  defined-&gt;<span class="built_in">setSchema</span>(<span class="built_in">getSchemaWithNameAndDefaults</span>(</span><br><span class="line">      def.<span class="built_in">range</span>(), defined-&gt;<span class="built_in">getSchema</span>(), def.<span class="built_in">name</span>().<span class="built_in">name</span>(), defaults));</span><br><span class="line">  <span class="function">StrongFunctionPtr <span class="title">ret</span><span class="params">(std::move(cu), defined)</span></span>;</span><br><span class="line">  <span class="built_in">didFinishEmitFunction</span>(ret);</span><br><span class="line">  <span class="keyword">return</span> ret;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Which part of this function does the acutal job of compiling tree views to PyTorch IR? It could be <code>cu-&gt;define</code>, or constructor of <code>StrongFunctionPtr</code><br>or <code>didFinishEmitFunction</code>. It is not clear right now, but seems mostly likely to be in <code>cu-&gt;define</code>, but let’s skim through these functions:</p>
<p>By greping <code>get_python_cu</code>, we can find this line</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">inline std::shared_ptr&lt;script::CompilationUnit&gt; get_python_cu() &#123;</span><br></pre></td></tr></table></figure>

<p>which tells us that the <code>cu</code> in the code is a <code>CompilationUnit</code>. By greping <code>CompilationUnit::define</code>, we find its definition in <code>torch/csrc/jit/script/compiler.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">std::unique_ptr&lt;Function&gt; <span class="title">CompilationUnit::define</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> c10::optional&lt;QualifiedName&gt;&amp; prefix,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> Def&amp; def,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> ResolverPtr&amp; resolver,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> Self* self,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> std::unordered_map&lt;std::string, Function*&gt;&amp; function_table,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">bool</span> shouldMangle)</span> <span class="keyword">const</span> </span>&#123;</span><br><span class="line">  <span class="comment">// .......</span></span><br><span class="line">  <span class="keyword">auto</span> creator = [def, _resolver, self](Function&amp; method) &#123;</span><br><span class="line">    <span class="comment">// .......</span></span><br><span class="line">    <span class="built_in">to_ir</span>(def, _resolver, self, method);</span><br><span class="line">  &#125;;</span><br><span class="line">  <span class="comment">// .......</span></span><br><span class="line">  <span class="keyword">auto</span> fn = torch::make_unique&lt;Function&gt;(</span><br><span class="line">      std::<span class="built_in">move</span>(name), std::make_shared&lt;Graph&gt;(), creator);</span><br><span class="line">  <span class="keyword">if</span> (self) &#123;</span><br><span class="line">    <span class="comment">// Register this as a method on `self`&#x27;s type</span></span><br><span class="line">    self-&gt;<span class="built_in">getClassType</span>()-&gt;<span class="built_in">addMethod</span>(fn.<span class="built_in">get</span>());</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">return</span> fn;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Less important parts of the code is omitted. From above, we can find that the core of compiling an AST into a compute graph is done at <code>to_ir</code>. It is defined in the same file. Skimming through <code>to_ir</code> we find that it is a struct of ~3000 lines of code, with member functions that handles different cases of Python AST. Without knowing PyTorch’s IR, it’s not easy to understand what <code>to_ir</code> does. So let’s pause a little bit to take a look at PyTorch IR and come back later.</p>
<h1 id="The-PyTorch-IR"><a href="#The-PyTorch-IR" class="headerlink" title="The PyTorch IR"></a>The PyTorch IR</h1><p>PyTorch now has a very detailed design doc at <a target="_blank" rel="noopener" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/OVERVIEW.md"><code>torch/csrc/jit/docs/OVERVIEW.md</code></a>. We should read through this document before coming back.</p>
<h1 id="From-Python-AST-to-PyTorch-IR-part-2"><a href="#From-Python-AST-to-PyTorch-IR-part-2" class="headerlink" title="From Python AST to PyTorch IR: part 2"></a>From Python AST to PyTorch IR: part 2</h1><p>Let’s continue the reading by looking at the definition of <code>to_ir</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">to_ir</span>(</span><br><span class="line">    <span class="keyword">const</span> Def&amp; def,</span><br><span class="line">    ResolverPtr resolver_,</span><br><span class="line">    <span class="keyword">const</span> Self* self,</span><br><span class="line">    Function&amp; method) <span class="comment">// method being constructed</span></span><br><span class="line">    : <span class="built_in">method</span>(method),</span><br><span class="line">      <span class="built_in">graph</span>(method.<span class="built_in">graph</span>()),</span><br><span class="line">      <span class="built_in">resolver</span>(std::<span class="built_in">move</span>(resolver_)),</span><br><span class="line">      <span class="built_in">typeParser_</span>(resolver),</span><br><span class="line">      <span class="built_in">environment_stack</span>(<span class="literal">nullptr</span>) &#123;</span><br><span class="line">  <span class="built_in">AT_ASSERT</span>(resolver);</span><br><span class="line">  <span class="built_in">pushFrame</span>(graph-&gt;<span class="built_in">block</span>(), <span class="comment">/*starts_def=*/</span><span class="literal">true</span>);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Type annotations exclude explicitly typing the &quot;self&quot; parameter, so in</span></span><br><span class="line">  <span class="comment">// the case that this is a method with self we expect one fewer parameter</span></span><br><span class="line">  <span class="comment">// annotation than the number of parameters this Def takes.</span></span><br><span class="line">  <span class="keyword">if</span> (self &amp;&amp; def.<span class="built_in">decl</span>().<span class="built_in">params</span>().<span class="built_in">size</span>() == <span class="number">0</span>) &#123;</span><br><span class="line">    <span class="keyword">throw</span> <span class="built_in">ErrorReport</span>(def.<span class="built_in">decl</span>().<span class="built_in">params</span>().<span class="built_in">range</span>())</span><br><span class="line">        &lt;&lt; <span class="string">&quot;methods must have a self argument&quot;</span>;</span><br><span class="line">  &#125;</span><br><span class="line">  method.<span class="built_in">setSchema</span>(<span class="built_in">emitDef</span>(def, self, graph-&gt;<span class="built_in">block</span>()));</span><br><span class="line"></span><br><span class="line">  <span class="comment">// NB ORDERING: SSA conversion has to occur before</span></span><br><span class="line">  <span class="comment">// lifting of closures and forks, this way closures are converted</span></span><br><span class="line">  <span class="comment">// to SSA while part of their original graph, and closures are ready to</span></span><br><span class="line">  <span class="comment">// be inlined into forked closures</span></span><br><span class="line">  <span class="built_in">ConvertToSSA</span>(graph);</span><br><span class="line">  <span class="comment">// convert loops with an iter and body condition specified to</span></span><br><span class="line">  <span class="comment">// python-recognize while loops. we do this so they can be exported,</span></span><br><span class="line">  <span class="comment">// and run the pass early to avoid jitter. Like conversion to SSA,</span></span><br><span class="line">  <span class="comment">// it only needs to run once.</span></span><br><span class="line">  <span class="built_in">CanonicalizeModifiedLoops</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="built_in">runCleanupPasses</span>(graph);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can clearly see that we first convert from tree view to PyTorch IR in <code>emitDef</code>, the convert the PyTorch IR to SSA in <code>ConvertToSSA</code>, and then do some special handling of loops and then cleanup. Looking around, it is also not hard to find that <code>ConvertToSSA</code> is defined in <code>torch/csrc/jit/script/convert_to_ssa.cpp</code>. Roughly the flow chart is:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">Python AST --[torch.jit.frontend]--&gt; Tree View --[compiler.cpp]--&gt; PyTorch IR --[convert_to_ssa.cpp]--&gt; SSA ----&gt; canonicalized and cleanuped SSA</span><br></pre></td></tr></table></figure>

<p>Now let’s continue looking at <code>emitDef</code> for the <code>Tree View --&gt; PyTorch IR</code> conversion:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">FunctionSchema <span class="title">emitDef</span><span class="params">(<span class="keyword">const</span> Def&amp; def, <span class="keyword">const</span> Self* self, Block* block)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> schema = typeParser_.<span class="built_in">parseSchemaFromDef</span>(def, <span class="built_in"><span class="keyword">bool</span></span>(self));</span><br><span class="line">  <span class="comment">// TODO need guards on init returning none</span></span><br><span class="line">  <span class="keyword">if</span> (schema.<span class="built_in">returns</span>().<span class="built_in">size</span>() == <span class="number">1</span>) &#123;</span><br><span class="line">    def_stack_.<span class="built_in">back</span>().declared_return_type_ = schema.<span class="built_in">returns</span>().<span class="built_in">at</span>(<span class="number">0</span>).<span class="built_in">type</span>();</span><br><span class="line">  &#125;</span><br><span class="line">  std::vector&lt;Argument&gt; arguments =</span><br><span class="line">      <span class="built_in">emitFormalArguments</span>(def, self, schema, block);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// body</span></span><br><span class="line">  <span class="keyword">auto</span> stmts_list = def.<span class="built_in">statements</span>();</span><br><span class="line">  <span class="built_in">emitStatements</span>(stmts_list.<span class="built_in">begin</span>(), stmts_list.<span class="built_in">end</span>());</span><br><span class="line">  <span class="built_in">handleMaybeNoReturn</span>(def, block);</span><br><span class="line">  std::vector&lt;Argument&gt; returns = &#123;<span class="built_in">emitOutput</span>(def.<span class="built_in">range</span>(), schema, block)&#125;;</span><br><span class="line">  <span class="keyword">return</span> &#123;def.<span class="built_in">name</span>().<span class="built_in">name</span>(), <span class="string">&quot;&quot;</span>, std::<span class="built_in">move</span>(arguments), std::<span class="built_in">move</span>(returns)&#125;;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Basically what it does is:</p>
<ol>
<li>parse schema and arguments</li>
<li>compile function body by <code>emitStatements</code></li>
<li>handle return</li>
</ol>
<p>We will not go into the details about schema and how the return is handled. We only continue looking at <code>emitStatements</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitStatements</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    List&lt;Stmt&gt;::const_iterator begin,</span></span></span><br><span class="line"><span class="function"><span class="params">    List&lt;Stmt&gt;::const_iterator end)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span> (; begin != end; ++begin) &#123;</span><br><span class="line">    <span class="keyword">auto</span> stmt = *begin;</span><br><span class="line">    ErrorReport::CallStack::<span class="built_in">update_pending_range</span>(stmt.<span class="built_in">range</span>());</span><br><span class="line">    <span class="built_in"><span class="keyword">switch</span></span> (stmt.<span class="built_in">kind</span>()) &#123;</span><br><span class="line">      <span class="keyword">case</span> TK_IF:</span><br><span class="line">        <span class="built_in">emitIf</span>(<span class="built_in">If</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_WHILE:</span><br><span class="line">        <span class="built_in">emitWhile</span>(<span class="built_in">While</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_FOR:</span><br><span class="line">        <span class="built_in">emitFor</span>(<span class="built_in">For</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_ASSIGN:</span><br><span class="line">        <span class="built_in">emitAssignment</span>(<span class="built_in">Assign</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_AUG_ASSIGN:</span><br><span class="line">        <span class="built_in">emitAugAssignment</span>(<span class="built_in">AugAssign</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_EXPR_STMT: &#123;</span><br><span class="line">        <span class="keyword">auto</span> expr = <span class="built_in">ExprStmt</span>(stmt).<span class="built_in">expr</span>();</span><br><span class="line">        <span class="built_in">emitSugaredExpr</span>(expr, <span class="number">0</span>);</span><br><span class="line">      &#125; <span class="keyword">break</span>;</span><br><span class="line">      <span class="comment">// .......</span></span><br><span class="line">      <span class="keyword">default</span>:</span><br><span class="line">        <span class="keyword">throw</span> <span class="built_in">ErrorReport</span>(stmt)</span><br><span class="line">            &lt;&lt; <span class="string">&quot;Unrecognized statement kind &quot;</span> &lt;&lt; <span class="built_in">kindToString</span>(stmt.<span class="built_in">kind</span>());</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that the handle of each kind of statement is dispatched by <code>stmt.kind()</code>. For each specialized emit, we would expect it take the tree view as input, recursively traverse the tree view to emit the code. Let’s first take a look at a simple specialization of this kind of emit:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Currently we do not support assigning exceptions to variables,</span></span><br><span class="line"><span class="comment">// a = Exception(&quot;hi&quot;)</span></span><br><span class="line"><span class="comment">// raise a</span></span><br><span class="line"><span class="comment">//</span></span><br><span class="line"><span class="comment">// We ignore the expression following raise</span></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitRaise</span><span class="params">(<span class="keyword">const</span> SourceRange&amp; loc)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">const</span> std::string exception = <span class="string">&quot;Exception&quot;</span>;</span><br><span class="line">  <span class="keyword">auto</span> string_input = <span class="built_in">insertConstant</span>(*graph, exception, loc);</span><br><span class="line">  graph-&gt;<span class="built_in">insert</span>(prim::RaiseException, &#123;string_input&#125;, &#123;&#125;, loc);</span><br><span class="line">  exit_blocks.<span class="built_in">insert</span>(environment_stack-&gt;<span class="built_in">block</span>());</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we see that it just insert a <code>prim::RaiseException</code> into the graph.</p>
<p>When designing a compiler, expression is always an important sublanguage to pay attention to. Let’s take a look at how PyTorch JIT handles expression here. The place to look at is <code>emitSugaredExpr</code>. Before we start, it worth mentioning that, Python is a dynamically typed language, while PyTorch IR is statically typed. So we would expect <code>emitSugaredExpr</code> to have a type system to infer types. The <code>emitSugaredExpr</code> is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// any expression that can produce a SugaredValue is handled here</span></span><br><span class="line"><span class="comment">// expressions that only return a single Value* are handled in emitSimpleExpr</span></span><br><span class="line"><span class="comment">// type_hint is set if there is a type that this value is expected to be</span></span><br><span class="line"><span class="comment">// e.g. a : List[int] = []</span></span><br><span class="line"><span class="comment">// or a = torch.jit.annotate(List[int], [])</span></span><br><span class="line"><span class="comment">// the caller is responsible for checking that the result matches type_hint</span></span><br><span class="line"><span class="comment">// emitSugaredExpr is free to ignore it.</span></span><br><span class="line"><span class="function">std::shared_ptr&lt;SugaredValue&gt; <span class="title">emitSugaredExpr</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> Expr&amp; tree,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">size_t</span> n_binders,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> TypePtr&amp; type_hint = <span class="literal">nullptr</span>)</span> </span>&#123;</span><br><span class="line">  <span class="built_in"><span class="keyword">switch</span></span> (tree.<span class="built_in">kind</span>()) &#123;</span><br><span class="line">    <span class="keyword">case</span> TK_VAR:</span><br><span class="line">      <span class="keyword">return</span> environment_stack-&gt;<span class="built_in">getSugaredVar</span>(<span class="built_in">Var</span>(tree).<span class="built_in">name</span>());</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;.&#x27;</span>: &#123;</span><br><span class="line">      <span class="keyword">auto</span> select = <span class="built_in">Select</span>(tree);</span><br><span class="line">      <span class="keyword">auto</span> sv = <span class="built_in">emitSugaredExpr</span>(select.<span class="built_in">value</span>(), <span class="number">1</span>);</span><br><span class="line">      <span class="keyword">return</span> sv-&gt;<span class="built_in">attr</span>(select.<span class="built_in">range</span>(), method, select.<span class="built_in">selector</span>().<span class="built_in">name</span>());</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="keyword">case</span> TK_APPLY: &#123;</span><br><span class="line">      <span class="keyword">auto</span> apply = <span class="built_in">Apply</span>(tree);</span><br><span class="line">      <span class="keyword">return</span> <span class="built_in">emitApplyExpr</span>(apply, n_binders);</span><br><span class="line">    &#125; <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">default</span>:</span><br><span class="line">      <span class="keyword">return</span> std::make_shared&lt;SimpleValue&gt;(<span class="built_in">emitSimpleExpr</span>(tree, type_hint));</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Here we will only go deep into simple expressions. An example of simple expression is <code>a + b</code>, and we will look at how this specific example is handled. Now let’s look at <code>emitSimpleExpr</code>, it is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">Value* <span class="title">emitSimpleExpr</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> TreeRef&amp; tree,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> TypePtr&amp; type_hint = <span class="literal">nullptr</span>)</span> </span>&#123;</span><br><span class="line">  <span class="built_in"><span class="keyword">switch</span></span> (tree-&gt;<span class="built_in">kind</span>()) &#123;</span><br><span class="line">    <span class="comment">// .....</span></span><br><span class="line">    <span class="keyword">case</span> TK_IN:</span><br><span class="line">    <span class="keyword">case</span> TK_POW:</span><br><span class="line">    <span class="keyword">case</span> TK_NE:</span><br><span class="line">    <span class="keyword">case</span> TK_EQ:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;&lt;&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;&gt;&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> TK_LE:</span><br><span class="line">    <span class="keyword">case</span> TK_GE:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;*&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;/&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;+&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;-&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;%&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;&amp;&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;|&#x27;</span>:</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;^&#x27;</span>: &#123;</span><br><span class="line">      <span class="keyword">const</span> <span class="keyword">auto</span>&amp; inputs = tree-&gt;<span class="built_in">trees</span>();</span><br><span class="line">      <span class="keyword">auto</span> kind = <span class="built_in">getNodeKind</span>(tree-&gt;<span class="built_in">kind</span>(), inputs.<span class="built_in">size</span>());</span><br><span class="line">      <span class="keyword">auto</span> overload = <span class="built_in">getOperatorOverload</span>(tree-&gt;<span class="built_in">kind</span>(), inputs.<span class="built_in">size</span>());</span><br><span class="line">      <span class="keyword">auto</span> named_values = <span class="built_in">getNamedValues</span>(inputs, <span class="comment">/*maybe_unpack=*/</span><span class="literal">false</span>);</span><br><span class="line"></span><br><span class="line">      <span class="keyword">if</span> (tree-&gt;<span class="built_in">kind</span>() == TK_IN) &#123;</span><br><span class="line">        <span class="comment">// For `in` the arguments are in reverse order (the object being</span></span><br><span class="line">        <span class="comment">// checked is second)</span></span><br><span class="line">        std::<span class="built_in">iter_swap</span>(named_values.<span class="built_in">begin</span>() + <span class="number">0</span>, named_values.<span class="built_in">begin</span>() + <span class="number">1</span>);</span><br><span class="line">      &#125;</span><br><span class="line"></span><br><span class="line">      <span class="keyword">return</span> <span class="built_in">asSimple</span>(</span><br><span class="line">          <span class="built_in">makeMagic</span>(</span><br><span class="line">              overload, std::make_shared&lt;BuiltinFunction&gt;(kind, at::nullopt))</span><br><span class="line">              -&gt;<span class="built_in">call</span>(tree-&gt;<span class="built_in">range</span>(), method, named_values, &#123;&#125;, <span class="number">0</span>));</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="comment">// ....</span></span><br><span class="line">    <span class="keyword">default</span>:</span><br><span class="line">      <span class="keyword">throw</span> <span class="built_in">ErrorReport</span>(tree) &lt;&lt; <span class="string">&quot;Cannot emit expr for: &quot;</span> &lt;&lt; tree;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that, the actual work is done inside <code>BuiltinFunction::call</code>. This function is defined in <code>sugared_value.cpp</code> as:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">std::shared_ptr&lt;SugaredValue&gt; <span class="title">BuiltinFunction::call</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> SourceRange&amp; loc,</span></span></span><br><span class="line"><span class="function"><span class="params">    Function&amp; m,</span></span></span><br><span class="line"><span class="function"><span class="params">    at::ArrayRef&lt;NamedValue&gt; inputs,</span></span></span><br><span class="line"><span class="function"><span class="params">    at::ArrayRef&lt;NamedValue&gt; attributes,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">size_t</span> n_binders)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> std::make_shared&lt;SimpleValue&gt;(</span><br><span class="line">      <span class="built_in">emitBuiltinCall</span>(loc, *m.<span class="built_in">graph</span>(), symbol, inputs, attributes, self));</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>where <code>emitBuiltinCall</code> is defined in <code>schema_matching.cpp</code> as:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Search for operators matching the provided symbol name and input types.</span></span><br><span class="line"><span class="comment">// If one is found, emit a node to the graph for that operator.</span></span><br><span class="line"><span class="function">Value* <span class="title">emitBuiltinCall</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> SourceRange&amp; loc,</span></span></span><br><span class="line"><span class="function"><span class="params">    Graph&amp; graph,</span></span></span><br><span class="line"><span class="function"><span class="params">    Symbol name,</span></span></span><br><span class="line"><span class="function"><span class="params">    at::ArrayRef&lt;NamedValue&gt; inputs,</span></span></span><br><span class="line"><span class="function"><span class="params">    at::ArrayRef&lt;NamedValue&gt; attributes,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> c10::optional&lt;NamedValue&gt;&amp; self)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">const</span> <span class="keyword">auto</span>&amp; variants = <span class="built_in">getAllOperatorsFor</span>(name);</span><br><span class="line">  <span class="keyword">const</span> <span class="keyword">auto</span>&amp; builtin_functions = <span class="built_in">getAllBuiltinFunctionsFor</span>(name);</span><br><span class="line"></span><br><span class="line">  std::stringstream failure_messages;</span><br><span class="line">  std::vector&lt;<span class="keyword">const</span> FunctionSchema*&gt; schemas;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">const</span> std::shared_ptr&lt;Operator&gt;&amp; op : variants) &#123;</span><br><span class="line">    schemas.<span class="built_in">push_back</span>(&amp;op-&gt;<span class="built_in">schema</span>());</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">const</span> <span class="keyword">auto</span> method : builtin_functions) &#123;</span><br><span class="line">    method-&gt;<span class="built_in">ensure_defined</span>();</span><br><span class="line">    schemas.<span class="built_in">push_back</span>(&amp;method-&gt;<span class="built_in">getSchema</span>());</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// no operators found with the same name, print out similarly named operators</span></span><br><span class="line">  <span class="keyword">if</span> (schemas.<span class="built_in">size</span>() == <span class="number">0</span>) &#123;</span><br><span class="line">    <span class="keyword">const</span> <span class="keyword">auto</span> close_symbols = <span class="built_in">findSimilarOperators</span>(name);</span><br><span class="line">    <span class="keyword">auto</span> error = <span class="built_in">ErrorReport</span>(loc);</span><br><span class="line">    <span class="keyword">const</span> <span class="keyword">auto</span>&amp; user_function_name = name.<span class="built_in">toQualString</span>();</span><br><span class="line">    error &lt;&lt; <span class="string">&quot;Unknown builtin op: &quot;</span> &lt;&lt; user_function_name &lt;&lt; <span class="string">&quot;.\n&quot;</span>;</span><br><span class="line">    <span class="keyword">if</span> (close_symbols.<span class="built_in">size</span>() == <span class="number">0</span>) &#123;</span><br><span class="line">      error</span><br><span class="line">          &lt;&lt; <span class="string">&quot;Could not find any similar ops to &quot;</span> &lt;&lt; user_function_name</span><br><span class="line">          &lt;&lt; <span class="string">&quot;. This op may not exist or may not be currently supported in TorchScript.\n&quot;</span>;</span><br><span class="line">    &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">      error &lt;&lt; <span class="string">&quot;Here are some suggestions: \n&quot;</span>;</span><br><span class="line">      <span class="keyword">for</span> (<span class="keyword">const</span> <span class="keyword">auto</span>&amp; sym : close_symbols) &#123;</span><br><span class="line">        error &lt;&lt; <span class="string">&quot;\t&quot;</span> &lt;&lt; sym.<span class="built_in">toQualString</span>() &lt;&lt; <span class="string">&quot;\n&quot;</span>;</span><br><span class="line">      &#125;</span><br><span class="line">      error &lt;&lt; <span class="string">&quot;\nThe original call is&quot;</span>;</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="keyword">throw</span> error;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> matched = <span class="built_in">matchSchemas</span>(schemas, loc, graph, inputs, attributes, self);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">if</span> (matched.first &lt; variants.<span class="built_in">size</span>()) &#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">emitBuiltinNode</span>(matched.second, loc, graph, name);</span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    Function* fn = builtin_functions[matched.first - variants.<span class="built_in">size</span>()];</span><br><span class="line">    <span class="comment">// we inline builtin calls because they are normally very small</span></span><br><span class="line">    <span class="comment">// wrappers and are not useful for keeping around to debug</span></span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">insertGraph</span>(graph, *fn-&gt;<span class="built_in">graph</span>(), matched.second.inputs).<span class="built_in">at</span>(<span class="number">0</span>);</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that it look up the database of schemas and find the matching one, then use <code>emitBuiltinNode</code> to emit PyTorch IR. <code>emitBuiltinNode</code> is implemented as:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Given a successful match between operator schema and symbol, emit a node</span></span><br><span class="line"><span class="comment">// with the appropriate inputs and outputs.</span></span><br><span class="line"><span class="function"><span class="keyword">static</span> Value* <span class="title">emitBuiltinNode</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> MatchedSchema&amp; matched_schema,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> SourceRange&amp; loc,</span></span></span><br><span class="line"><span class="function"><span class="params">    Graph&amp; graph,</span></span></span><br><span class="line"><span class="function"><span class="params">    Symbol name)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> n = graph.<span class="built_in">insertNode</span>(graph.<span class="built_in">create</span>(name, matched_schema.inputs, <span class="number">0</span>))</span><br><span class="line">               -&gt;<span class="built_in">setSourceRange</span>(loc);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span>&amp; ret : matched_schema.return_types) &#123;</span><br><span class="line">    n-&gt;<span class="built_in">addOutput</span>()-&gt;<span class="built_in">setType</span>(ret);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// assert that we did indeed create an op that has implementation</span></span><br><span class="line">  <span class="comment">// otherwise schema and dispatch are not in sync</span></span><br><span class="line">  <span class="built_in">getOperation</span>(n);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">packOutputs</span>(graph, n-&gt;<span class="built_in">outputs</span>(), matched_schema.return_field_names);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that the type is infered by the schema in the following line</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">for</span> (<span class="keyword">auto</span>&amp; ret : matched_schema.return_types) &#123;</span><br><span class="line">  n-&gt;<span class="built_in">addOutput</span>()-&gt;<span class="built_in">setType</span>(ret);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Reaching this point, we have already get the big picture of the <code>Tree View --&gt; PyTorch IR</code>: dispatch by node kind of tree view, and use schemas to do type inference.</p>
<p>After <code>to_ir</code> called, we should have a graph of PyTorch IR which is staticly typed. As described before, this graph will be converted to SSA and loops will be canonicalized. The conversion to SSA is a pretty standard task of compilers in general. Readers could refer to <a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Static_single_assignment_form#Converting_to_SSA">Wikipedia</a> for how this could be done and read <code>convert_to_ssa.cpp</code> in PyTorch for detail. We will not go into details about loop canonicalization either.</p>
<h1 id="The-Graph-Executor"><a href="#The-Graph-Executor" class="headerlink" title="The Graph Executor"></a>The Graph Executor</h1><p>Now we have seen how the compilation is done and what does PyTorch JIT’s IR looks like, the thing left is how the IR are executed. As we have already seen in <a href="#ast2ir">From Python AST to PyTorch IR: part 1</a>, <code>script_compile_function</code> returns a pointer to a class <code>Function</code>. By looking at the implementation of <code>Function</code> at <code>torch/csrc/jit/function.&#123;h, cpp&#125;</code> we can easily see how a graph is executed:</p>
<p>In <code>functions.h</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">#<span class="meta-keyword">pragma</span> once</span></span><br><span class="line"><span class="meta">#<span class="meta-keyword">include</span> <span class="meta-string">&lt;torch/csrc/jit/graph_executor.h&gt;</span></span></span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">std::shared_ptr&lt;Graph&gt; <span class="title">optimized_graph</span><span class="params">()</span> <span class="keyword">const</span> </span>&#123;</span><br><span class="line">  <span class="function">std::lock_guard&lt;std::recursive_mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">  <span class="keyword">if</span> (optimized_graph_) &#123;</span><br><span class="line">    <span class="keyword">return</span> *optimized_graph_;</span><br><span class="line">  &#125;</span><br><span class="line">  optimized_graph_ = graph_-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">preoptimizeGraph</span>(*optimized_graph_);</span><br><span class="line">  <span class="keyword">return</span> *optimized_graph_;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">GraphExecutor&amp; <span class="title">get_executor</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  <span class="built_in">ensure_defined</span>();</span><br><span class="line">  <span class="function">std::lock_guard&lt;std::recursive_mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">  <span class="keyword">if</span> (executor_) &#123;</span><br><span class="line">    <span class="keyword">return</span> executor_;</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="built_in">check_single_output</span>();</span><br><span class="line">  executor_ = <span class="built_in">GraphExecutor</span>(<span class="built_in">optimized_graph</span>());</span><br><span class="line">  <span class="keyword">return</span> executor_;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>and in <code>functions.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// functions.cpp</span></span><br><span class="line"><span class="meta">#<span class="meta-keyword">include</span> <span class="meta-string">&lt;torch/csrc/jit/passes/inliner.h&gt;</span></span></span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">Function::run</span><span class="params">(Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">get_executor</span>().<span class="built_in">run</span>(stack);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">Function::run</span><span class="params">(Stack&amp;&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">run</span>(stack);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function">IValue <span class="title">Function::operator</span><span class="params">()</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    std::vector&lt;IValue&gt; stack,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> Kwargs&amp; kwargs)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">getSchema</span>().<span class="built_in">checkAndNormalizeInputs</span>(stack, kwargs);</span><br><span class="line">  <span class="built_in">run</span>(stack);</span><br><span class="line">  <span class="keyword">return</span> stack.<span class="built_in">front</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">preoptimizeGraph</span><span class="params">(std::shared_ptr&lt;Graph&gt;&amp; graph)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// <span class="doctag">TODO:</span> Invoke cleanup passes before and after inlining to reduce amount of</span></span><br><span class="line">  <span class="comment">// code we&#x27;re copying.</span></span><br><span class="line">  <span class="built_in">Inline</span>(*graph);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that, when we want to execute a graph, PyTorch first inline the graph using the inliner defined in <code>torch/csrc/jit/passes/inliner.h</code>, then create a <code>GraphExecutor</code> for the inlined graph. We will not go into details on how the graph is inlined. We will move to <code>GraphExecutor</code> instead.</p>
<p>The <code>GraphExecutor</code> is defined in <code>torch/csrc/jit/graph_executor.&#123;h, cpp&#125;</code>.</p>
<p>The constructor and <code>run</code> tells us that <code>GraphExecutor</code> is just a wrapper of <code>GraphExecutorImplBase</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">GraphExecutor::<span class="built_in">GraphExecutor</span>(std::shared_ptr&lt;Graph&gt; graph)</span><br><span class="line">    : <span class="built_in">pImpl</span>(</span><br><span class="line">          <span class="built_in">getExecutorMode</span>() ? <span class="keyword">dynamic_cast</span>&lt;GraphExecutorImplBase*&gt;(</span><br><span class="line">                                  <span class="keyword">new</span> <span class="built_in">ProfilingGraphExecutorImpl</span>(graph))</span><br><span class="line">                            : <span class="keyword">dynamic_cast</span>&lt;GraphExecutorImplBase*&gt;(</span><br><span class="line">                                  <span class="keyword">new</span> <span class="built_in">GraphExecutorImpl</span>(graph))) &#123;&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">GraphExecutor::run</span><span class="params">(Stack&amp; inputs)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> pImpl-&gt;<span class="built_in">run</span>(inputs);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can also tell that there are actually two implementations of graph executor: <code>GraphExecutorImpl</code> and <code>ProfilingGraphExecutorImpl</code>. Both are subclasses of <code>GraphExecutorImplBase</code>. <code>GraphExecutorImpl</code> is implemented in <code>graph_executor.cpp</code>, while <code>ProfilingGraphExecutorImpl</code> is implemented in <code>profiling_graph_executor_impl.cpp</code>. We will look at both implementations.</p>
<p>Before looking at any implementation, let’s first look at the base class, which is also implemented in <code>graph_executor.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">GraphExecutorImplBase::run</span><span class="params">(Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">TORCH_CHECK</span>(</span><br><span class="line">      stack.<span class="built_in">size</span>() &gt;= num_inputs,</span><br><span class="line">      <span class="string">&quot;expected &quot;</span>,</span><br><span class="line">      num_inputs,</span><br><span class="line">      <span class="string">&quot; inputs, but got only &quot;</span>,</span><br><span class="line">      stack.<span class="built_in">size</span>());</span><br><span class="line"></span><br><span class="line">  <span class="built_in">C10_LOG_API_USAGE_ONCE</span>(<span class="string">&quot;torch.graph_executor.run&quot;</span>);</span><br><span class="line">  logging::<span class="built_in">getLogger</span>()-&gt;<span class="built_in">addStatValue</span>(</span><br><span class="line">      logging::runtime_counters::GRAPH_EXECUTOR_INVOCATIONS, <span class="number">1.0</span>);</span><br><span class="line"></span><br><span class="line">  ExecutionPlan plan =</span><br><span class="line">      <span class="built_in">getPlanFor</span>(stack, GraphExecutor::<span class="built_in">getDefaultNumBailOuts</span>());</span><br><span class="line">  <span class="built_in">InterpreterState</span>(plan.code).<span class="built_in">run</span>(stack);</span><br><span class="line">  last_executed_optimized_graph = plan.graph;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that it first get an <code>ExecutionPlan</code>, create a state machine <code>InterpreterState</code>, and run the state machine on the stack.</p>
<h2 id="GraphExecutorImpl"><a href="#GraphExecutorImpl" class="headerlink" title="GraphExecutorImpl"></a>GraphExecutorImpl</h2><p>Now let’s move on to <code>GraphExecutorImpl</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">ExecutionPlan <span class="title">getPlanFor</span><span class="params">(Stack&amp; stack, <span class="keyword">size_t</span> remaining_bailout_depth)</span></span></span><br><span class="line"><span class="function">    <span class="keyword">override</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">getGraphExecutorOptimize</span>() ? <span class="built_in">getOrCompile</span>(stack)</span><br><span class="line">                                    : <span class="built_in">getOrCompileFallback</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that the second argument <code>remaining_bailout_depth</code> is completely ignored, which indicate that <code>GraphExecutorImpl</code> does not have a bailout mechanism.</p>
<p>We also see that the graph is compiled at the first time it runs to get an execution plan. Compilation of graph to execution plan is done by <code>getOrCompile</code> or <code>getOrCompileFallback</code> depending on if optimization is enabled. These two methods are copied below:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">const</span> ExecutionPlan&amp; <span class="title">getOrCompileFallback</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">  <span class="keyword">if</span> (!fallback) &#123;</span><br><span class="line">    <span class="keyword">auto</span> graph_ = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">    <span class="built_in">runRequiredPasses</span>(graph_);</span><br><span class="line">    fallback = <span class="built_in">ExecutionPlan</span>(graph_);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">return</span> fallback;</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">const</span> ExecutionPlan&amp; <span class="title">getOrCompile</span><span class="params">(<span class="keyword">const</span> Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// outside lock guard, to minimize the time holding the lock on the fast</span></span><br><span class="line">  <span class="comment">// path ArgumentSpec even computes its hashCode here.</span></span><br><span class="line">  ArgumentSpec spec =</span><br><span class="line">      arg_spec_creator_.<span class="built_in">create</span>(autograd::GradMode::<span class="built_in">is_enabled</span>(), stack);</span><br><span class="line">  &#123;</span><br><span class="line">    <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">    <span class="keyword">auto</span> it = plan_cache.<span class="built_in">find</span>(spec);</span><br><span class="line">    <span class="keyword">if</span> (it != plan_cache.<span class="built_in">end</span>()) &#123;</span><br><span class="line">      logging::<span class="built_in">getLogger</span>()-&gt;<span class="built_in">addStatValue</span>(</span><br><span class="line">          logging::runtime_counters::EXECUTION_PLAN_CACHE_HIT, <span class="number">1.0</span>);</span><br><span class="line">      <span class="keyword">return</span> it-&gt;second;</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="keyword">auto</span> plan = <span class="built_in">compileSpec</span>(spec);</span><br><span class="line">    <span class="keyword">auto</span> r = plan_cache.<span class="built_in">emplace</span>(std::<span class="built_in">move</span>(spec), std::<span class="built_in">move</span>(plan));</span><br><span class="line">    logging::<span class="built_in">getLogger</span>()-&gt;<span class="built_in">addStatValue</span>(</span><br><span class="line">        logging::runtime_counters::EXECUTION_PLAN_CACHE_MISS, <span class="number">1.0</span>);</span><br><span class="line">    <span class="keyword">return</span> r.first-&gt;second;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>These code explain itself well: if optimization is turned off, then we only run required passes and cache the result. Otherwise, depending on the characteristic of inputs (<code>ArgumentSpec</code>), we run full optimization and cache the generated plan for each different <code>ArgumentSpec</code>. The plan is created by the constructor of <code>ExecutionPlan</code>.</p>
<p>It worth a look at what passes are called:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">ExecutionPlan <span class="title">compileSpec</span><span class="params">(<span class="keyword">const</span> ArgumentSpec&amp; spec)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> opt_graph = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">SOURCE_DUMP</span>(<span class="string">&quot;Optimizing the following function:&quot;</span>, opt_graph);</span><br><span class="line">  arg_spec_creator_.<span class="built_in">specializeTypes</span>(*opt_graph, spec);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 0. Inline functions, then clean up any artifacts that the inliner</span></span><br><span class="line">  <span class="comment">//          left in that may inhibit optimization</span></span><br><span class="line">  <span class="built_in">Inline</span>(*opt_graph);</span><br><span class="line">  <span class="built_in">LowerGradOf</span>(*opt_graph);</span><br><span class="line">  <span class="built_in">specializeAutogradZero</span>(*opt_graph);</span><br><span class="line">  <span class="built_in">LowerSimpleTuples</span>(opt_graph);</span><br><span class="line">  <span class="built_in">ConstantPooling</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 1. Specialize to input definedness (this is very important for</span></span><br><span class="line">  <span class="comment">//          gradient graphs), and run required passes to bring the graph</span></span><br><span class="line">  <span class="comment">//          to an executable form.</span></span><br><span class="line">  <span class="built_in">runRequiredPasses</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 2. Propagate detailed information about the spec through the</span></span><br><span class="line">  <span class="comment">//          graph (enabled more specializations in later passes).</span></span><br><span class="line">  <span class="comment">//          Shape propagation sometimes depends on certain arguments being</span></span><br><span class="line">  <span class="comment">//          constants, and constant propagation doesn&#x27;t need shape</span></span><br><span class="line">  <span class="comment">//          information anyway, so it&#x27;s better to run it first.</span></span><br><span class="line">  <span class="built_in">ConstantPropagation</span>(opt_graph);</span><br><span class="line">  <span class="built_in">PropagateInputShapes</span>(opt_graph);</span><br><span class="line">  <span class="built_in">PropagateRequiresGrad</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 3. Run differentiable optimizations (i.e. simple graph rewrites</span></span><br><span class="line">  <span class="comment">//          that we can still execute using autograd).</span></span><br><span class="line">  <span class="built_in">runOptimization</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 4. If this graph will be differentiated, we need to slice out the</span></span><br><span class="line">  <span class="comment">//          symbolically differentiable subgraphs for further optimizations.</span></span><br><span class="line">  <span class="comment">// Phase 5. Apply non-differentiable optimizations to the graphs we&#x27;ve found</span></span><br><span class="line">  <span class="comment">//          (or the whole graph if we know we won&#x27;t need its derivative).</span></span><br><span class="line">  <span class="keyword">if</span> (<span class="built_in">needsGradient</span>(opt_graph)) &#123;</span><br><span class="line">    <span class="keyword">auto</span> diff_nodes = <span class="built_in">CreateAutodiffSubgraphs</span>(</span><br><span class="line">        opt_graph,</span><br><span class="line">        autodiff_subgraph_inlining ? autodiffSubgraphNodeThreshold : <span class="number">1</span>);</span><br><span class="line">    <span class="keyword">for</span> (Node* dnode : diff_nodes) &#123;</span><br><span class="line">      <span class="keyword">auto</span> diff_graph = std::<span class="built_in">move</span>(dnode-&gt;<span class="built_in">g</span>(attr::Subgraph));</span><br><span class="line">      Gradient gradient = <span class="built_in">differentiate</span>(diff_graph);</span><br><span class="line">      <span class="comment">// Run post differentiation optimizations, Autodiff will replace some</span></span><br><span class="line">      <span class="comment">// parts of graph with new graph, these new graphs usually consists of</span></span><br><span class="line">      <span class="comment">// control flows and miss shape information on nodes, so we run shape</span></span><br><span class="line">      <span class="comment">// prop and differentiable optimizations to ensure the graph is</span></span><br><span class="line">      <span class="comment">// optimized</span></span><br><span class="line">      <span class="built_in">PropagateInputShapes</span>(gradient.f);</span><br><span class="line">      <span class="built_in">runOptimization</span>(gradient.f);</span><br><span class="line">      <span class="comment">// run non diff optimization on the forward graph</span></span><br><span class="line">      <span class="built_in">runNondiffOptimization</span>(gradient.f);</span><br><span class="line">      <span class="built_in">packGradient</span>(gradient, dnode);</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="built_in">InlineAutodiffSubgraphs</span>(</span><br><span class="line">        opt_graph,</span><br><span class="line">        autodiff_subgraph_inlining ? autodiffSubgraphInlineThreshold : <span class="number">1</span>);</span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    <span class="built_in">runNondiffOptimization</span>(opt_graph);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="comment">// Make sure there are no leftovers from any passes.</span></span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(opt_graph);</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">ExecutionPlan</span>(opt_graph);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runNondiffOptimization</span><span class="params">(std::shared_ptr&lt;Graph&gt;&amp; graph)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// decomposition pass, decompose certain ops that will be used in the</span></span><br><span class="line">  <span class="comment">// following passes (like batchmm and jit fusion)</span></span><br><span class="line">  <span class="keyword">if</span> (!<span class="built_in">getProfilingMode</span>()) &#123;</span><br><span class="line">    <span class="built_in">DecomposeOps</span>(graph);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// TupleConstruct / TupleUnpack pairs can still be present at this point</span></span><br><span class="line">  <span class="comment">// and must be removed for fusion.</span></span><br><span class="line">  <span class="built_in">LowerSimpleTuples</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Rewrite subgraphs with many MMs into expressions that batch them.</span></span><br><span class="line">  <span class="built_in">BatchMM</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Fuse the dequant - op - quant patterns into quantized ops</span></span><br><span class="line">  <span class="built_in">QuantFusion</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="built_in">FuseGraph</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Run custom passes that different backends can register.</span></span><br><span class="line">  <span class="comment">// This is done last to give internal optimization passes priority.</span></span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">const</span> <span class="keyword">auto</span>&amp; pass : <span class="built_in">getCustomPasses</span>()) &#123;</span><br><span class="line">    <span class="built_in">pass</span>(graph);</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runOptimization</span><span class="params">(std::shared_ptr&lt;Graph&gt;&amp; graph)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// Basic graph preprocessing to eliminate noise.</span></span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(graph);</span><br><span class="line">  <span class="built_in">EliminateCommonSubexpression</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="built_in">PeepholeOptimize</span>(graph);</span><br><span class="line">  <span class="built_in">ConstantPropagation</span>(graph);</span><br><span class="line">  <span class="built_in">ConstantPooling</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Unroll small loops, and eliminate expressions that are the same at every</span></span><br><span class="line">  <span class="comment">// iteration.</span></span><br><span class="line">  <span class="built_in">UnrollLoops</span>(graph);</span><br><span class="line">  <span class="built_in">EliminateCommonSubexpression</span>(graph);</span><br><span class="line"></span><br><span class="line">  <span class="built_in">CheckInplace</span>(graph);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>I will not go deep into most of these passes in this note, interested readers can read them at <code>torch/csrc/jit/passes/</code>.</p>
<h2 id="ProfilingGraphExecutorImpl"><a href="#ProfilingGraphExecutorImpl" class="headerlink" title="ProfilingGraphExecutorImpl"></a>ProfilingGraphExecutorImpl</h2><p>Now let’s take a look at the profiling graph executor. We also start from <code>getPlanFor</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">ExecutionPlan <span class="title">ProfilingGraphExecutorImpl::getPlanFor</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    Stack&amp; stack,</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">size_t</span> remaining_bailout_depth)</span> </span>&#123;</span><br><span class="line">  <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">  <span class="built_in">GRAPH_DEBUG</span>(<span class="string">&quot;Running ProfilingGraphExecutorImpl &quot;</span>, <span class="keyword">this</span>);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">if</span> (optimized_plan_) &#123;</span><br><span class="line">    <span class="keyword">return</span> *optimized_plan_;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// simple executor</span></span><br><span class="line">  <span class="keyword">if</span> (remaining_bailout_depth == <span class="number">0</span>) &#123;</span><br><span class="line">    <span class="keyword">auto</span> copy = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">    <span class="built_in">runProfilingInsensitiveOptimizations</span>(copy);</span><br><span class="line">    <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;Optimized SimpleExecutor Graph : &quot;</span>, copy);</span><br><span class="line">    optimized_plan_ = <span class="built_in">ExecutionPlan</span>(copy);</span><br><span class="line">    <span class="keyword">return</span> *optimized_plan_;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// if a profiling graph hasn&#x27;t been created yet</span></span><br><span class="line">  <span class="keyword">if</span> (!pr_) &#123;</span><br><span class="line">    <span class="keyword">auto</span> copy = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">    <span class="built_in">runProfilingInsensitiveOptimizations</span>(copy);</span><br><span class="line">    pr_ = ProfilingRecord::<span class="built_in">instrumentGraph</span>(copy);</span><br><span class="line">    <span class="keyword">auto</span> pr_copy = pr_-&gt;<span class="built_in">graph</span>()-&gt;<span class="built_in">copy</span>();</span><br><span class="line">    <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;Profiled Graph: &quot;</span>, pr_copy);</span><br><span class="line">    profiling_plan_ = <span class="built_in">ExecutionPlan</span>(pr_copy);</span><br><span class="line">    <span class="comment">// fall-through</span></span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="comment">// profile until a graph is ready</span></span><br><span class="line">  <span class="keyword">if</span> (!pr_-&gt;<span class="built_in">ready</span>()) &#123;</span><br><span class="line">    <span class="keyword">return</span> *profiling_plan_;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> copy = pr_-&gt;<span class="built_in">graph</span>()-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">runProfilingOptimizations</span>(copy);</span><br><span class="line">  <span class="comment">// cache</span></span><br><span class="line">  optimized_plan_ = <span class="built_in">ExecutionPlan</span>(copy, remaining_bailout_depth);</span><br><span class="line">  <span class="keyword">return</span> *optimized_plan_;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that there is a “bailout” mechanism with limited depth, if the depth is reached, then the executor just do “profiling insentive optimizations”. But what is “bailout”, what is “profiling”, and what is “profiling insentive optimizations”? It is still not clear at this point. We can also see that <code>remaining_bailout_depth</code> is passed to the constructor of <code>ExecutionPlan</code>, so the bailout mechanism must be a collaboration of interpreter and graph executor. The optimizations are:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">ProfilingGraphExecutorImpl::runProfilingOptimizations</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    std::shared_ptr&lt;Graph&gt;&amp; copy)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">if</span> (!<span class="built_in">getGraphExecutorOptimize</span>()) &#123;</span><br><span class="line">    <span class="built_in">LowerGradOf</span>(*copy);</span><br><span class="line">    <span class="built_in">runRequiredPasses</span>(copy);</span><br><span class="line">    <span class="keyword">return</span>;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="built_in">InsertGuards</span>(copy);</span><br><span class="line">  <span class="built_in">LowerGradOf</span>(*copy);</span><br><span class="line">  <span class="built_in">EliminateRedundantGuards</span>(copy);</span><br><span class="line">  <span class="built_in">InsertBailOuts</span>(copy);</span><br><span class="line">  <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;After InsertBailOuts: &quot;</span>, copy);</span><br><span class="line">  <span class="built_in">specializeAutogradZero</span>(*copy);</span><br><span class="line"></span><br><span class="line">  <span class="built_in">runRequiredPasses</span>(copy);</span><br><span class="line">  <span class="built_in">ConstantPropagation</span>(copy);</span><br><span class="line">  <span class="built_in">runOptimization</span>(copy);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">if</span> (<span class="built_in">needsGradientInProfilingMode</span>(copy-&gt;<span class="built_in">block</span>())) &#123;</span><br><span class="line">    <span class="keyword">auto</span> diff_nodes = <span class="built_in">CreateAutodiffSubgraphs</span>(</span><br><span class="line">        copy,</span><br><span class="line">        <span class="built_in">getAutodiffSubgraphInlining</span>() ? autodiffSubgraphNodeThreshold : <span class="number">1</span>);</span><br><span class="line">    <span class="keyword">for</span> (Node* dnode : diff_nodes) &#123;</span><br><span class="line">      <span class="keyword">auto</span> diff_graph = std::<span class="built_in">move</span>(dnode-&gt;<span class="built_in">g</span>(attr::Subgraph));</span><br><span class="line">      Gradient gradient = <span class="built_in">differentiate</span>(diff_graph);</span><br><span class="line">      <span class="built_in">runOptimization</span>(gradient.f);</span><br><span class="line">      <span class="comment">// run non diff optimization on the forward graph</span></span><br><span class="line">      <span class="built_in">runNondiffOptimization</span>(gradient.f);</span><br><span class="line">      <span class="built_in">packGradient</span>(gradient, dnode);</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="built_in">InlineAutodiffSubgraphs</span>(</span><br><span class="line">        copy,</span><br><span class="line">        <span class="built_in">getAutodiffSubgraphInlining</span>() ? autodiffSubgraphInlineThreshold : <span class="number">1</span>);</span><br><span class="line"></span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    <span class="built_in">runNondiffOptimization</span>(copy);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(copy);</span><br><span class="line">  <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;Optimized Graph : &quot;</span>, copy);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">ProfilingGraphExecutorImpl::runProfilingInsensitiveOptimizations</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    std::shared_ptr&lt;Graph&gt;&amp; copy)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">LowerGradOf</span>(*copy);</span><br><span class="line">  <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;runProfilingInsensitiveOptimizations&quot;</span>, copy);</span><br><span class="line">  <span class="comment">// clear any residual undefinedness</span></span><br><span class="line">  <span class="comment">// as double backward graph inputs&#x27;</span></span><br><span class="line">  <span class="comment">// may carry over undefinedness</span></span><br><span class="line">  <span class="comment">// from profiled backward graphs</span></span><br><span class="line">  <span class="built_in">ClearUndefinedness</span>(copy);</span><br><span class="line">  <span class="built_in">runRequiredPasses</span>(copy);</span><br><span class="line">  <span class="keyword">if</span> (!<span class="built_in">getGraphExecutorOptimize</span>()) &#123;</span><br><span class="line">    <span class="keyword">return</span>;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="built_in">DecomposeOps</span>(copy);</span><br><span class="line">  <span class="built_in">ConstantPropagation</span>(copy);</span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(copy);</span><br><span class="line">  <span class="built_in">EliminateCommonSubexpression</span>(copy);</span><br><span class="line">  <span class="built_in">ConstantPooling</span>(copy);</span><br><span class="line">  <span class="built_in">PeepholeOptimize</span>(copy);</span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(copy);</span><br><span class="line">  <span class="built_in">CheckInplace</span>(copy);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We will postpone the reading of profiling and bailout after we have read how the interpreter works.</p>
<h1 id="PyTorch-IR-–-gt-Interpreter-Instructions"><a href="#PyTorch-IR-–-gt-Interpreter-Instructions" class="headerlink" title="PyTorch IR –&gt; Interpreter Instructions"></a>PyTorch IR –&gt; Interpreter Instructions</h1><p>Now it’s time to look at <code>ExecutionPlan</code> defined at <code>graph_executor.h</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">ExecutionPlan</span> &#123;</span></span><br><span class="line">  <span class="built_in">ExecutionPlan</span>() = <span class="keyword">default</span>;</span><br><span class="line">  <span class="built_in">ExecutionPlan</span>(</span><br><span class="line">      std::shared_ptr&lt;Graph&gt; graph,</span><br><span class="line">      <span class="keyword">size_t</span> remaining_bailout_depth = <span class="number">0</span>)</span><br><span class="line">      : <span class="built_in">code</span>(graph, remaining_bailout_depth), <span class="built_in">graph</span>(std::<span class="built_in">move</span>(graph)) &#123;&#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">operator</span> <span class="title">bool</span><span class="params">()</span> <span class="keyword">const</span> </span>&#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="keyword">static_cast</span>&lt;<span class="keyword">bool</span>&gt;(graph);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  Code code;</span><br><span class="line">  std::shared_ptr&lt;Graph&gt; graph;</span><br><span class="line">&#125;;</span><br></pre></td></tr></table></figure>

<p>It just convert the graph into an object of <code>Code</code>, and the running is done by <code>InterpreterState</code>.</p>
<p><code>Code</code> and <code>InterpreterState</code> are defined in <code>torch/csrc/jit/interpreter.&#123;h,cpp&#125;</code>. These two classes are just a wrapper of its implementations:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">Code::<span class="built_in">Code</span>(<span class="keyword">const</span> std::shared_ptr&lt;Graph&gt;&amp; graph, <span class="keyword">size_t</span> remaining_bailout_depth)</span><br><span class="line">    : <span class="built_in">pImpl</span>(<span class="keyword">new</span> <span class="built_in">CodeImpl</span>(graph, remaining_bailout_depth)) &#123;&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">InterpreterState::<span class="built_in">InterpreterState</span>(<span class="keyword">const</span> Code&amp; code)</span><br><span class="line">    : <span class="built_in">pImpl</span>(c10::make_intrusive&lt;InterpreterStateImpl&gt;(code)) &#123;&#125;</span><br></pre></td></tr></table></figure>

<p><code>CodeImpl</code> is a long struct, but quite logical. A selected list of fields it has is listed below:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">PreprocessGraph preprocess;</span><br><span class="line">std::vector&lt;Instruction&gt; instructions;</span><br></pre></td></tr></table></figure>

<p>Its constructor is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">CodeImpl</span>(<span class="keyword">const</span> std::shared_ptr&lt;Graph&gt;&amp; graph, <span class="keyword">size_t</span> remaining_bailout_depth)</span><br><span class="line">    : <span class="built_in">preprocess_</span>(*graph),</span><br><span class="line">      <span class="built_in">current_node_</span>(preprocess_.graph-&gt;<span class="built_in">return_node</span>()),</span><br><span class="line">      <span class="built_in">remaining_bailout_depth_</span>(remaining_bailout_depth) &#123;</span><br><span class="line">  graph_ = preprocess_.graph;</span><br><span class="line">  n_outputs = graph_-&gt;<span class="built_in">outputs</span>().<span class="built_in">size</span>();</span><br><span class="line">  <span class="keyword">if</span> (n_outputs == <span class="number">1</span>) &#123;</span><br><span class="line">    return_type_ = graph-&gt;<span class="built_in">outputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">type</span>();</span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    return_type_ = TupleType::<span class="built_in">create</span>(</span><br><span class="line">        <span class="built_in">fmap</span>(graph-&gt;<span class="built_in">outputs</span>(), [](<span class="keyword">const</span> Value* v) &#123; <span class="keyword">return</span> v-&gt;<span class="built_in">type</span>(); &#125;));</span><br><span class="line">  &#125;</span><br><span class="line">  n_inputs = graph_-&gt;<span class="built_in">inputs</span>().<span class="built_in">size</span>();</span><br><span class="line">  <span class="comment">// std::cout &lt;&lt; *graph_ &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">  <span class="built_in">emitCodeForBlock</span>(graph_-&gt;<span class="built_in">block</span>());</span><br><span class="line">  <span class="built_in">insertInstruction</span>(RET);</span><br><span class="line">  <span class="comment">// we deferred the emission of bailout blocks so they appear at the end</span></span><br><span class="line">  <span class="comment">// emit them now and patch up the jumps</span></span><br><span class="line">  <span class="built_in">insertBailoutBlocks</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Clearly we can see what it does is:</p>
<ol>
<li>preprocess the graph, and then</li>
<li>emit instructions for interpreter.</li>
<li>insert bailout blocks</li>
</ol>
<p>The preprocessing of graph is very well explained in the beginning of file:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Before we translate to intepreter instructions, we do</span></span><br><span class="line"><span class="comment">// some preprocessing of the graph to turn it into a form that is closer</span></span><br><span class="line"><span class="comment">// to what the instructions will look like.</span></span><br><span class="line"><span class="comment">// In particular we:</span></span><br><span class="line"><span class="comment">// *  Computes whether a input to a node is the last use, so we can issue MOVE</span></span><br><span class="line"><span class="comment">//    rather than LOAD instructions.</span></span><br><span class="line"><span class="comment">// *  Drop nodes are inserted for any node that is unused to create a dummy use</span></span><br><span class="line"><span class="comment">//    that will cause the interpreter to free the node.</span></span><br><span class="line"><span class="comment">//    A drop node just pops its input off the stack to  ensure the interpreter</span></span><br><span class="line"><span class="comment">//    releases references to nodes that are never used. Drop nodes are also</span></span><br><span class="line"><span class="comment">//    inserted when the last use of a node is in some conditionally run control</span></span><br><span class="line"><span class="comment">//    flow (e.g. one side of an If) and the interpreter must free the node only</span></span><br><span class="line"><span class="comment">//    after the control flow has reconverged</span></span><br><span class="line"><span class="comment">// Outputs are:</span></span><br><span class="line"><span class="comment">// * graph - the post processed copy of g</span></span><br><span class="line"><span class="comment">// * move_flags[n] - a list of booleans, one for each input,</span></span><br><span class="line"><span class="comment">//   indicating whether this is the last use of the value. The interpreter</span></span><br><span class="line"><span class="comment">//   should generate a move rather than a copy in this case.</span></span><br></pre></td></tr></table></figure>

<p>The <code>emitCodeForBlock</code> emits instructions:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitCodeForBlock</span><span class="params">(Block* block)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">emitNodeAtBlockLevel</span>(block-&gt;<span class="built_in">param_node</span>());</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span> node : block-&gt;<span class="built_in">nodes</span>()) &#123;</span><br><span class="line">    <span class="built_in">emitNodeAtBlockLevel</span>(node);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="built_in">emitNodeAtBlockLevel</span>(block-&gt;<span class="built_in">return_node</span>());</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Since the nodes are topologically sorted, we just need to iterate the linked list and generate code for each node.</p>
<p>The <code>emitNodeAtBlockLevel</code> is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitNodeAtBlockLevel</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="function">WithCurrentNode <span class="title">guard</span><span class="params">(&amp;current_node_, node)</span></span>;</span><br><span class="line">  <span class="built_in"><span class="keyword">switch</span></span> (node-&gt;<span class="built_in">kind</span>()) &#123;</span><br><span class="line">    <span class="keyword">case</span> prim::Constant:</span><br><span class="line">      <span class="built_in">emitConstant</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::Return:</span><br><span class="line">      <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">default</span>:</span><br><span class="line">      <span class="keyword">if</span> (!preprocess_.can_emit_inline[node]) &#123;</span><br><span class="line">        <span class="built_in">emitNode</span>(node);</span><br><span class="line">        <span class="built_in">emitStoreOutputs</span>(node);</span><br><span class="line">      &#125;</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>where <code>emitNode</code> dispatches according to node kind:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitNode</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="function">WithCurrentNode <span class="title">guard</span><span class="params">(&amp;current_node_, node)</span></span>;</span><br><span class="line">  <span class="built_in"><span class="keyword">switch</span></span> (node-&gt;<span class="built_in">kind</span>()) &#123;</span><br><span class="line">    <span class="keyword">default</span>:</span><br><span class="line">      <span class="built_in">emitOperator</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::Drop:</span><br><span class="line">      <span class="built_in">emitDrop</span>(node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::Constant:</span><br><span class="line">      <span class="built_in">emitConstant</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::If:</span><br><span class="line">      <span class="built_in">emitIf</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::Loop:</span><br><span class="line">      <span class="built_in">emitLoop</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> aten::wait:</span><br><span class="line">      <span class="built_in">emitWait</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::Param:</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::CallFunction:</span><br><span class="line">      <span class="built_in">emitCall</span>(</span><br><span class="line">          node-&gt;<span class="built_in">inputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">type</span>()-&gt;expect&lt;FunctionType&gt;()-&gt;<span class="built_in">function</span>(),</span><br><span class="line">          node-&gt;<span class="built_in">inputs</span>().<span class="built_in">slice</span>(<span class="number">1</span>));</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::CallMethod:</span><br><span class="line">      <span class="keyword">if</span> (<span class="keyword">auto</span> class_type = node-&gt;<span class="built_in">inputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">type</span>()-&gt;cast&lt;ClassType&gt;()) &#123;</span><br><span class="line">        <span class="built_in">emitCall</span>(class_type-&gt;<span class="built_in">getMethod</span>(node-&gt;<span class="built_in">s</span>(attr::name)), node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">      &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">        <span class="built_in">emitInterfaceCall</span>(node-&gt;<span class="built_in">s</span>(attr::name), node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">      &#125;</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::BailOut:</span><br><span class="line">      <span class="built_in">emitBailOut</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::GetAttr:</span><br><span class="line">      <span class="built_in">emitGetAttr</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">case</span> prim::SetAttr:</span><br><span class="line">      <span class="built_in">emitSetAttr</span>(node);</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Let’s further take a look at <code>emitOperator</code>, <code>emitIf</code> and <code>emitBailOut</code> as example.</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitOperator</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">  <span class="built_in">insertInstruction</span>(OP, operator_table_.<span class="built_in">size</span>());</span><br><span class="line">  operator_table_.<span class="built_in">emplace_back</span>(<span class="built_in">getOperation</span>(node));</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitIf</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>());</span><br><span class="line">  <span class="keyword">size_t</span> start_if = instructions_.<span class="built_in">size</span>();</span><br><span class="line">  <span class="built_in">insertInstruction</span>(JF, <span class="number">0</span>); <span class="comment">// dummy offset to be filled in</span></span><br><span class="line">  <span class="built_in">emitCodeForBlock</span>(node-&gt;<span class="built_in">blocks</span>().<span class="built_in">at</span>(<span class="number">0</span>));</span><br><span class="line">  <span class="built_in">insertInstruction</span>(JMP, <span class="number">0</span>); <span class="comment">// dummy offset</span></span><br><span class="line">  <span class="keyword">size_t</span> start_else = instructions_.<span class="built_in">size</span>();</span><br><span class="line">  instructions_[start_if].X = start_else - start_if;</span><br><span class="line">  <span class="built_in">emitCodeForBlock</span>(node-&gt;<span class="built_in">blocks</span>().<span class="built_in">at</span>(<span class="number">1</span>));</span><br><span class="line">  instructions_[start_else - <span class="number">1</span>].X = instructions_.<span class="built_in">size</span>() - (start_else - <span class="number">1</span>);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>these two are pretty standard compiler implementations.</p>
<p>Now let’s take a look at how bailout works:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitBailOut</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> jf_index = <span class="built_in">emitGuard</span>(node);</span><br><span class="line">  <span class="keyword">auto</span> unoptimized_graph = node-&gt;<span class="built_in">inputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">node</span>()-&gt;<span class="built_in">g</span>(attr::Subgraph);</span><br><span class="line">  <span class="comment">// note, guaded input is already loaded onto the stack</span></span><br><span class="line">  <span class="comment">// for GUARD instruction</span></span><br><span class="line">  <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>().<span class="built_in">slice</span>(<span class="number">2</span>));</span><br><span class="line">  <span class="built_in">insertInstruction</span>(TAIL_CALL, function_table_.<span class="built_in">size</span>());</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(node-&gt;<span class="built_in">kind</span>() == prim::BailOut);</span><br><span class="line">  <span class="keyword">auto</span> bailout_index = node-&gt;<span class="built_in">i</span>(attr::index);</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(bailout_index &gt;= <span class="number">0</span>);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> build_bailout_graph = [bailout_index,</span><br><span class="line">                              unoptimized_graph](Function &amp;func) &#123;</span><br><span class="line"></span><br><span class="line">    <span class="built_in">BuildBailOutGraphFrom</span>(bailout_index, unoptimized_graph, func.<span class="built_in">graph</span>());</span><br><span class="line">  &#125;;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> empty_graph = std::make_shared&lt;Graph&gt;();</span><br><span class="line">  <span class="keyword">auto</span> func = torch::make_unique&lt;Function&gt;(</span><br><span class="line">      <span class="string">&quot;bailout&quot;</span>, empty_graph, build_bailout_graph);</span><br><span class="line">  function_table_.<span class="built_in">emplace_back</span>(func.<span class="built_in">get</span>());</span><br><span class="line">  bailout_functions_.<span class="built_in">emplace_back</span>(std::<span class="built_in">move</span>(func));</span><br><span class="line">  <span class="built_in">createBailoutBlock</span>(jf_index);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>The <code>emitBailOut</code> seems to save an unoptimized graph and make a function out of it. We will not go into details about this right now, because this requires a big picture of how the profiling is done in the executor. For now we just want a brief organization of the code generally. But we will do a thorough read of this in <a href="#profiling">a separate section</a>. We will move to the virtual machine right now.</p>
<h1 id="The-Virtual-Machine"><a href="#The-Virtual-Machine" class="headerlink" title="The Virtual Machine"></a>The Virtual Machine</h1><p><code>InterpreterStateImpl</code> is the virtual machine that executes instructions. The related functions are located here:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">(Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">if</span> (<span class="built_in">runImpl</span>(stack)) &#123;</span><br><span class="line">    future_-&gt;<span class="built_in">wait</span>();</span><br><span class="line"></span><br><span class="line">    <span class="keyword">auto</span> num_outputs = frames.<span class="built_in">front</span>().function-&gt;n_outputs;</span><br><span class="line">    <span class="keyword">if</span> (num_outputs == <span class="number">1</span>) &#123;</span><br><span class="line">      <span class="built_in">push</span>(stack, future_-&gt;<span class="built_in">value</span>());</span><br><span class="line">    &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">      <span class="keyword">auto</span> tuple = future_-&gt;<span class="built_in">value</span>().<span class="built_in">toTuple</span>();</span><br><span class="line">      <span class="keyword">for</span> (<span class="keyword">const</span> IValue&amp; value : tuple-&gt;<span class="built_in">elements</span>()) &#123;</span><br><span class="line">        <span class="built_in">push</span>(stack, value);</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>which invoke <code>runImpl</code> to run asynchronously and wait for the async run to finish(therefore sync in effect). The <code>runImpl</code> looks like:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">bool</span> <span class="title">runImpl</span><span class="params">(Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// if we have never run before, then we might have to return the</span></span><br><span class="line">  <span class="comment">// stack when we suspend, record where it starts so we return the right</span></span><br><span class="line">  <span class="comment">// stack</span></span><br><span class="line">  <span class="keyword">if</span> (stack_start_ == <span class="number">-1</span>) &#123;</span><br><span class="line">    <span class="built_in">TORCH_INTERNAL_ASSERT</span>(stack.<span class="built_in">size</span>() &gt;= frames.<span class="built_in">back</span>().function-&gt;n_inputs);</span><br><span class="line">    stack_start_ = stack.<span class="built_in">size</span>() - frames.<span class="built_in">back</span>().function-&gt;n_inputs;</span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    <span class="comment">// during restarts, all of the stack is always our own, so we leave</span></span><br><span class="line">    <span class="comment">// nothing</span></span><br><span class="line">    stack_start_ = <span class="number">0</span>;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="function">ActiveFrame <span class="title">af</span><span class="params">(frames.back())</span></span>;</span><br><span class="line">  <span class="keyword">try</span> &#123;</span><br><span class="line">    <span class="keyword">while</span> (<span class="literal">true</span>) &#123;</span><br><span class="line"><span class="comment">//         std::cout &lt;&lt; &quot;RUNNING &quot;;</span></span><br><span class="line"><span class="comment">//         frames.back().function-&gt;dump(std::cout, af.pc);</span></span><br><span class="line">      Instruction inst = af.instructions[af.pc];</span><br><span class="line">      <span class="built_in"><span class="keyword">switch</span></span> (inst.op) &#123;</span><br><span class="line">        <span class="keyword">case</span> OP:</span><br><span class="line">          af.operators[inst.X](stack);</span><br><span class="line">          ++af.pc;</span><br><span class="line">          <span class="keyword">break</span>;</span><br><span class="line">        <span class="keyword">case</span> OPN:</span><br><span class="line">          <span class="built_in">AT_ERROR</span>(<span class="string">&quot;OPN is currently supported in mobile mode only.&quot;</span>);</span><br><span class="line">          <span class="keyword">break</span>;</span><br><span class="line">        <span class="keyword">case</span> LOAD:</span><br><span class="line">          stack.<span class="built_in">emplace_back</span>(<span class="built_in">reg</span>(inst.X));</span><br><span class="line">          ++af.pc;</span><br><span class="line">          <span class="keyword">break</span>;</span><br><span class="line">        <span class="keyword">case</span> MOVE:</span><br><span class="line">          stack.<span class="built_in">emplace_back</span>(std::<span class="built_in">move</span>(<span class="built_in">reg</span>(inst.X)));</span><br><span class="line">          ++af.pc;</span><br><span class="line">          <span class="keyword">break</span>;</span><br><span class="line">        <span class="keyword">case</span> STORE: ...</span><br><span class="line">        <span class="keyword">case</span> STOREN: ...</span><br><span class="line">        <span class="keyword">case</span> DROP: ...</span><br><span class="line">        <span class="keyword">case</span> DROPR: ...</span><br><span class="line">        <span class="keyword">case</span> LOADC: ...</span><br><span class="line">        <span class="keyword">case</span> GET_ATTR: ...</span><br><span class="line">        <span class="keyword">case</span> SET_ATTR: ...</span><br><span class="line">        <span class="keyword">case</span> JF: ...</span><br><span class="line">        <span class="keyword">case</span> JMP: ...</span><br><span class="line">        <span class="keyword">case</span> LOOP: ...</span><br><span class="line">        <span class="keyword">case</span> CALL:</span><br><span class="line">        <span class="keyword">case</span> INTERFACE_CALL: ...</span><br><span class="line">        <span class="keyword">case</span> RET: ...</span><br><span class="line">        <span class="keyword">case</span> WAIT: ...</span><br><span class="line">        <span class="keyword">case</span> FAIL_GUARD: ...</span><br><span class="line">        <span class="keyword">case</span> GUARD: ...</span><br><span class="line">        <span class="keyword">case</span> TAIL_CALL: ...</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125; <span class="built_in"><span class="keyword">catch</span></span> (std::exception&amp; e) &#123;</span><br><span class="line">    frames.<span class="built_in">back</span>().pc = af.pc;</span><br><span class="line">    <span class="keyword">bool</span> is_jit_exception = <span class="keyword">dynamic_cast</span>&lt;JITException*&gt;(&amp;e);</span><br><span class="line">    <span class="built_in">handleError</span>(<span class="built_in">ExceptionMessage</span>(e), is_jit_exception);</span><br><span class="line">    <span class="keyword">return</span> <span class="literal">false</span>;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>There is nothing special, just mimicking the behavior of processors.</p>
<h1 id="Profiling-and-bailout"><a href="#Profiling-and-bailout" class="headerlink" title="Profiling and bailout"></a><a name="profiling"></a>Profiling and bailout</h1><p>A good starting point for profiling and bailout is the official design doc <a target="_blank" rel="noopener" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/OVERVIEW.md"><code>torch/csrc/jit/docs/OVERVIEW.md</code></a>:</p>
<figure class="highlight markdown"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="section"># Profiling Programs</span></span><br><span class="line"></span><br><span class="line"><span class="code">`prim::profile`</span> nodes are inserted on every <span class="strong">**use**</span> of a value by <span class="code">`ProfilingRecord::instrumentBlock`</span>. Every <span class="code">`prim::profile`</span> node runs a lambda that uses a captured, initial type value and the type of an incoming tensor and merges the two into a refined <span class="code">`TensorType`</span></span><br><span class="line"></span><br><span class="line"><span class="code">`prim::profile`</span> nodes are replaced with <span class="code">`prim::Guard`</span> nodes by <span class="code">`InsertGuards`</span>. <span class="code">`prim::Guard`</span> nodes are inserted to guarantee that beyond the guard a guarded tensor will always be of the profiled shape. This guarantee will enable optimizations and codegens to generate more efficient code.</span><br><span class="line"></span><br><span class="line">JIT attempts to reduce the number of <span class="code">`prim::Guard`</span> nodes as these nodes may interefere with optimizations.</span><br><span class="line"><span class="bullet">*</span> First, <span class="code">`GuardElimination::moveGuardsToDefs`</span> tries to move <span class="code">`prim::Guards`</span> to their definitions, so the guards guarding the same tensor follow the definition directly or another guard on the same tensor. This step is done in</span><br><span class="line"><span class="bullet">*</span> This ordering allows us to <span class="strong">**coalesce**</span> (done in <span class="code">`GuardElimination::coalesceGuards`</span>) multiple guards into a single one.</span><br><span class="line"><span class="bullet">*</span> After guards are  <span class="strong">**coaslesced**</span> , <span class="code">`GuardElimination::eliminateGuards`</span> attempts to eliminate more guards as follows: it inspects each operation and its inputs. It checks if inputs to the operation are guarded and also if the operation produces the consistent shapes given the guarded inputs. For example, if two inputs to <span class="code">`add`</span> are guaranteed to be of shape <span class="code">`(2, 3) `</span>, the output shape will also always be <span class="code">`(2, 3)`</span> If this property holds, JIT is allowed to remove the guard guarding operation&#x27;s output.</span><br><span class="line"></span><br><span class="line">Lastly, JIT needs to be handle cases when the assumptions about tensor shapes fail at runtime. To handle guard failures, JIT needs to be able to run the original code i.e. the code  that doesn&#x27;t rely on assumptions about shapes. As guards can be inserted and moved (by Optimizer) at/to arbitrary points in a computional graph, JIT needs to be able to resume execution starting from those arbitrary points onward.</span><br><span class="line"></span><br><span class="line"><span class="code">`InsertBailoutNodes`</span> builds deoptimized versions of the original computational graph, that contain the rest of computations starting from their corresponding guard failure poins and, also, captures live values needed to execute those deoptimized graphs. In other words, the pass replaces <span class="code">`prim::Guard`</span> nodes with <span class="code">`prim::BailOut`</span> nodes which have the<span class="code">`attr::Subgraph`</span> attributes set to the deoptimized versions of the  remaining computations at their corresponding <span class="code">`prim::Guard`</span>s.</span><br></pre></td></tr></table></figure>

<p>After getting the rough idea, let’s look at the code. In <code>ProfilingGraphExecutorImpl::getPlanFor</code>, which runs everytime when we execute a graph, we have:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// if a profiling graph hasn&#x27;t been created yet</span></span><br><span class="line"><span class="keyword">if</span> (!pr_) &#123;</span><br><span class="line">  <span class="keyword">auto</span> copy = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">runProfilingInsensitiveOptimizations</span>(copy);</span><br><span class="line">  pr_ = ProfilingRecord::<span class="built_in">instrumentGraph</span>(copy);</span><br><span class="line">  <span class="keyword">auto</span> pr_copy = pr_-&gt;<span class="built_in">graph</span>()-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;Profiled Graph: &quot;</span>, pr_copy);</span><br><span class="line">  profiling_plan_ = <span class="built_in">ExecutionPlan</span>(pr_copy);</span><br><span class="line">  <span class="comment">// fall-through</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="comment">// profile until a graph is ready</span></span><br><span class="line"><span class="keyword">if</span> (!pr_-&gt;<span class="built_in">ready</span>()) &#123;</span><br><span class="line">  <span class="keyword">return</span> *profiling_plan_;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>From this, we can see that for the first few runs the graph executor instrument graph with profiling nodes and execute until no more profiling is needed.<br>Let go deep to see how the graph is instrumented. <code>ProfilingRecord::instrumentGraph</code> is defined in <code>torch/csrc/jit/profiling_record.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">std::unique_ptr&lt;ProfilingRecord&gt; <span class="title">ProfilingRecord::instrumentGraph</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    <span class="keyword">const</span> std::shared_ptr&lt;Graph&gt;&amp; graph)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> new_g = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="keyword">auto</span> pr = std::unique_ptr&lt;ProfilingRecord&gt;(<span class="keyword">new</span> <span class="built_in">ProfilingRecord</span>(new_g));</span><br><span class="line">  <span class="keyword">auto</span> raw_pr = pr.<span class="built_in">get</span>();</span><br><span class="line">  <span class="built_in">unprofileGraphInputs</span>(new_g);</span><br><span class="line">  <span class="built_in">unprofileBlock</span>(new_g-&gt;<span class="built_in">block</span>());</span><br><span class="line">  pr-&gt;<span class="built_in">instrumentBlock</span>(new_g-&gt;<span class="built_in">block</span>());</span><br><span class="line"></span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span> i : new_g-&gt;<span class="built_in">return_node</span>()-&gt;<span class="built_in">inputs</span>()) &#123;</span><br><span class="line">    <span class="keyword">if</span> (i-&gt;<span class="built_in">type</span>()-&gt;<span class="built_in">isSubtypeOf</span>(TensorType::<span class="built_in">get</span>())) &#123;</span><br><span class="line">      pr-&gt;<span class="built_in">insertShapeProfile</span>(new_g-&gt;<span class="built_in">return_node</span>(), i);</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">  std::function&lt;<span class="built_in"><span class="keyword">void</span></span>(Stack&amp;)&gt; counter = [raw_pr](Stack&amp;) &#123;</span><br><span class="line">    std::lock_guard&lt;std::mutex&gt; <span class="built_in">lock</span>(raw_pr-&gt;mutex_);</span><br><span class="line">    <span class="keyword">if</span> (raw_pr-&gt;profiling_count_ &gt; <span class="number">0</span>)</span><br><span class="line">    &#123;</span><br><span class="line">        raw_pr-&gt;profiling_count_--;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> pop = pr-&gt;<span class="built_in">createProfileNode</span>(counter, &#123;&#125;);</span><br><span class="line">  new_g-&gt;<span class="built_in">appendNode</span>(pop);</span><br><span class="line">  <span class="keyword">return</span> pr;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Combining the knowledge from the docs, we can see that this function calls <code>instrumentBlock</code> to instruct the main block of the graph. Let’s now move on to <code>instrumentBlock</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">ProfilingRecord::instrumentBlock</span><span class="params">(Block *block)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span> it = block-&gt;<span class="built_in">nodes</span>().<span class="built_in">begin</span>(); it != block-&gt;<span class="built_in">nodes</span>().<span class="built_in">end</span>(); ++it) &#123;</span><br><span class="line">    <span class="keyword">auto</span> n = *it;</span><br><span class="line">    <span class="keyword">for</span> (<span class="keyword">auto</span> i : n-&gt;<span class="built_in">inputs</span>()) &#123;</span><br><span class="line">      <span class="keyword">if</span> (!i-&gt;<span class="built_in">type</span>()-&gt;<span class="built_in">isSubtypeOf</span>(TensorType::<span class="built_in">get</span>()) ||</span><br><span class="line">          i-&gt;<span class="built_in">node</span>()-&gt;<span class="built_in">kind</span>() == prim::profile) &#123;</span><br><span class="line">        <span class="keyword">continue</span>;</span><br><span class="line">      &#125;</span><br><span class="line"></span><br><span class="line">      <span class="built_in">insertShapeProfile</span>(n, i);</span><br><span class="line">    &#125;</span><br><span class="line"></span><br><span class="line">    <span class="keyword">for</span> (<span class="keyword">auto</span> b : n-&gt;<span class="built_in">blocks</span>()) &#123;</span><br><span class="line">      <span class="built_in">instrumentBlock</span>(b);</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that it iterate the whole graph, and for every node that has tensor input, we do <code>insertShapeProfile</code>. And we also recursively instrument blocks. The <code>ProfilingRecord</code> looks like:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">ProfilingRecord::insertShapeProfile</span><span class="params">(Node *n, Value *i)</span> </span>&#123;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> pn = <span class="built_in">createProfileNode</span>(<span class="literal">nullptr</span>, &#123;i&#125;);</span><br><span class="line">  <span class="keyword">auto</span> pno = pn-&gt;<span class="built_in">addOutput</span>();</span><br><span class="line">  <span class="keyword">bool</span> first = <span class="literal">true</span>;</span><br><span class="line">  pno-&gt;<span class="built_in">setType</span>(TensorType::<span class="built_in">get</span>());</span><br><span class="line">  std::function&lt;<span class="built_in"><span class="keyword">void</span></span>(Stack &amp;)&gt; shape_profiler = [<span class="keyword">this</span>, pno,</span><br><span class="line">                                                 first](Stack &amp;stack) <span class="keyword">mutable</span> &#123;</span><br><span class="line">    IValue t;</span><br><span class="line">    <span class="built_in">pop</span>(stack, t);</span><br><span class="line">    <span class="keyword">if</span> (t.<span class="built_in">isTensor</span>()) &#123;</span><br><span class="line"></span><br><span class="line">      <span class="keyword">if</span> (t.<span class="built_in">toTensor</span>().<span class="built_in">defined</span>()) &#123;</span><br><span class="line">        <span class="keyword">auto</span> pttp = <span class="built_in">tensorTypeInCurrentExecutionContext</span>(t.<span class="built_in">toTensor</span>());</span><br><span class="line">        <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(<span class="keyword">this</span>-&gt;mutex_)</span></span>;</span><br><span class="line">        <span class="keyword">if</span> (<span class="keyword">auto</span> type = pno-&gt;<span class="built_in">type</span>()-&gt;cast&lt;TensorType&gt;()) &#123;</span><br><span class="line">          <span class="keyword">if</span> (!first) &#123;</span><br><span class="line">            pttp = pttp-&gt;<span class="built_in">merge</span>(type);</span><br><span class="line">          &#125;</span><br><span class="line">          pno-&gt;<span class="built_in">setType</span>(pttp);</span><br><span class="line">          first = <span class="literal">false</span>;</span><br><span class="line">        &#125;</span><br><span class="line">      &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">        pno-&gt;<span class="built_in">setType</span>(TensorType::<span class="built_in">get</span>()-&gt;<span class="built_in">withUndefined</span>());</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line"></span><br><span class="line">    <span class="comment">// passing t through</span></span><br><span class="line">    <span class="built_in">push</span>(stack, t);</span><br><span class="line"></span><br><span class="line">  &#125;;</span><br><span class="line"></span><br><span class="line">  pn-&gt;<span class="built_in">setCallback</span>(shape_profiler);</span><br><span class="line">  pn-&gt;<span class="built_in">insertBefore</span>(n);</span><br><span class="line">  n-&gt;<span class="built_in">replaceInputWith</span>(i, pn-&gt;<span class="built_in">output</span>());</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that it inserts a profile node before, and the call back of the proflie node set the the lambda <code>shape_profiler</code>. We will stop going deeper on <code>ProfilingRecord</code> at here since we already have a rough idea on what it does.</p>
<p>Now let’s move to <code>InsertGuards</code> which is called in <code>ProfilingGraphExecutorImpl::runProfilingOptimizations</code>, which is called in <code>ProfilingGraphExecutorImpl::getPlanFor</code>. The <code>InsertGuards</code> is defined at <code>torch/csrc/jit/passes/insert_guards.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">GuardInserter</span> &#123;</span></span><br><span class="line">  <span class="built_in">GuardInserter</span>(std::shared_ptr&lt;Graph&gt; graph) : <span class="built_in">graph_</span>(std::<span class="built_in">move</span>(graph)) &#123;&#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>&#123;</span><br><span class="line">    <span class="built_in">insertGuards</span>(graph_-&gt;<span class="built_in">block</span>());</span><br><span class="line">    <span class="built_in">removeProfilingNodes</span>(graph_-&gt;<span class="built_in">block</span>());</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span>:</span><br><span class="line">  <span class="function"><span class="keyword">void</span> <span class="title">removeProfilingNodes</span><span class="params">(Block* b)</span> </span>&#123;</span><br><span class="line">    <span class="keyword">for</span> (<span class="keyword">auto</span> it = b-&gt;<span class="built_in">nodes</span>().<span class="built_in">begin</span>(); it != b-&gt;<span class="built_in">nodes</span>().<span class="built_in">end</span>(); it++) &#123;</span><br><span class="line">      <span class="keyword">if</span> (it-&gt;<span class="built_in">kind</span>() == prim::profile) &#123;</span><br><span class="line">        it.<span class="built_in">destroyCurrent</span>();</span><br><span class="line">      &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">        <span class="keyword">for</span> (Block* ib : it-&gt;<span class="built_in">blocks</span>()) &#123;</span><br><span class="line">          <span class="built_in">removeProfilingNodes</span>(ib);</span><br><span class="line">        &#125;</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">void</span> <span class="title">insertGuards</span><span class="params">(Block* b)</span> </span>&#123;</span><br><span class="line">    <span class="keyword">for</span> (<span class="keyword">auto</span> it = b-&gt;<span class="built_in">nodes</span>().<span class="built_in">begin</span>(); it != b-&gt;<span class="built_in">nodes</span>().<span class="built_in">end</span>(); it++) &#123;</span><br><span class="line">      <span class="keyword">auto</span> n = *it;</span><br><span class="line">      <span class="keyword">if</span> (n-&gt;<span class="built_in">kind</span>() == prim::profile &amp;&amp; n-&gt;<span class="built_in">outputs</span>().<span class="built_in">size</span>() == <span class="number">1</span>) &#123;</span><br><span class="line">        <span class="keyword">auto</span> pttp = n-&gt;<span class="built_in">output</span>()-&gt;<span class="built_in">type</span>()-&gt;cast&lt;TensorType&gt;();</span><br><span class="line">        <span class="keyword">if</span> (pttp) &#123;</span><br><span class="line">          <span class="keyword">auto</span> guard = graph_-&gt;<span class="built_in">create</span>(prim::Guard, &#123;n-&gt;<span class="built_in">input</span>()&#125;, <span class="number">1</span>);</span><br><span class="line">          <span class="keyword">auto</span> go = guard-&gt;<span class="built_in">output</span>();</span><br><span class="line">          go-&gt;<span class="built_in">setType</span>(pttp);</span><br><span class="line">          guard-&gt;<span class="built_in">insertBefore</span>(n);</span><br><span class="line">          n-&gt;<span class="built_in">output</span>()-&gt;<span class="built_in">replaceAllUsesWith</span>(go);</span><br><span class="line">        &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">          <span class="comment">// we didn&#x27;t go down this path i.e</span></span><br><span class="line">          <span class="comment">// no profiling information is available</span></span><br><span class="line">          n-&gt;<span class="built_in">output</span>()-&gt;<span class="built_in">replaceAllUsesWith</span>(n-&gt;<span class="built_in">input</span>());</span><br><span class="line">        &#125;</span><br><span class="line">        it.<span class="built_in">destroyCurrent</span>();</span><br><span class="line">      &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">        <span class="keyword">for</span> (Block* ib : n-&gt;<span class="built_in">blocks</span>()) &#123;</span><br><span class="line">          <span class="built_in">insertGuards</span>(ib);</span><br><span class="line">        &#125;</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  std::shared_ptr&lt;Graph&gt; graph_;</span><br><span class="line">&#125;;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">InsertGuards</span><span class="params">(std::shared_ptr&lt;Graph&gt; graph)</span> </span>&#123;</span><br><span class="line">  <span class="function">GuardInserter <span class="title">gi</span><span class="params">(std::move(graph))</span></span>;</span><br><span class="line">  gi.<span class="built_in">run</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>It is just a standard recursive graph traverse that replaces insert the guard before profiling node and then remove all profiling nodes.</p>
<p>We will skip the part on how the guard is moved for optimization. Let’s move on how the guard is converted to bailouts. From the document, we know that is is done at <code>InsertBailoutNodes</code>, but after some searching, we can not find <code>InsertBailoutNodes</code>. The docs must already been outdated. But after looking around, we see in <code>ProfilingGraphExecutorImpl::runProfilingOptimizations</code>, we have</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">InsertGuards</span>(copy);</span><br><span class="line"><span class="built_in">LowerGradOf</span>(*copy);</span><br><span class="line"><span class="built_in">EliminateRedundantGuards</span>(copy);</span><br><span class="line"><span class="built_in">InsertBailOuts</span>(copy);</span><br><span class="line"><span class="built_in">GRAPH_DUMP</span>(<span class="string">&quot;After InsertBailOuts: &quot;</span>, copy);</span><br></pre></td></tr></table></figure>

<p>So it must be <code>InsertBailOuts</code> actually doing the job. This function is defined at <code>torch/csrc/jit/passes/bailout_graph.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">InsertBailOuts</span><span class="params">(std::shared_ptr&lt;Graph&gt; graph)</span> </span>&#123;</span><br><span class="line">  <span class="function">BailOutInserter <span class="title">ibo</span><span class="params">(std::move(graph))</span></span>;</span><br><span class="line">  ibo.<span class="built_in">run</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// `BailOutInserter` replaces prim::Guard nodes with</span></span><br><span class="line"><span class="comment">// prim::BailOut nodes that allow interpreter to</span></span><br><span class="line"><span class="comment">// resume execution of the unoptimized(deoptimized)</span></span><br><span class="line"><span class="comment">// version of an original graph from a particular point</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">BailOutInserter</span> &#123;</span></span><br><span class="line">  <span class="function"><span class="keyword">explicit</span> <span class="title">BailOutInserter</span><span class="params">(std::shared_ptr&lt;Graph&gt; graph)</span></span></span><br><span class="line"><span class="function">      : graph_(std::move(graph)), bailout_index_(<span class="number">0</span>) &#123;</span>&#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>&#123;</span><br><span class="line">    liveness_sets_ = <span class="built_in">BuildLivenessSets</span>(graph_);</span><br><span class="line">    <span class="built_in">insertBailOuts</span>(graph_-&gt;<span class="built_in">block</span>());</span><br><span class="line">    <span class="built_in">replaceGuardsWithBailouts</span>();</span><br><span class="line">    <span class="comment">// embed a full original graph</span></span><br><span class="line">    <span class="built_in">addUnoptimizedFuncToBailouts</span>();</span><br><span class="line">  &#125;</span><br><span class="line">......</span><br></pre></td></tr></table></figure>

<p>We see a term <code>liveness</code>, this is an analysis in compilers, and it is explained in <a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Live_variable_analysis">Wikipedia: Live variable analysis</a>. We won’t go deep into it, all we need to know is its definition:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">A variable is live at some point if it holds a value that may be needed in the future, or equivalently if its value may be read before the next time the variable is written to.</span><br></pre></td></tr></table></figure>

<p>Now, let’s look at <code>insertBailOuts</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Inserts prim::BailOut nodes for every prim::Guard</span></span><br><span class="line"><span class="comment">// Each BailOut point takes the set of inputs live</span></span><br><span class="line"><span class="comment">// at that particular execution point.</span></span><br><span class="line"><span class="comment">// An input is live if it&#x27;s used beyond the guard/BailOut</span></span><br><span class="line"><span class="comment">// point to compute graph&#x27;s outputs</span></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">insertBailOuts</span><span class="params">(Block* b)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span> it = b-&gt;<span class="built_in">nodes</span>().<span class="built_in">begin</span>(); it != b-&gt;<span class="built_in">nodes</span>().<span class="built_in">end</span>(); ++it) &#123;</span><br><span class="line">    <span class="keyword">if</span> (it-&gt;<span class="built_in">kind</span>() == prim::Guard) &#123;</span><br><span class="line">      <span class="keyword">auto</span> bailout_node = b-&gt;<span class="built_in">owningGraph</span>()-&gt;<span class="built_in">create</span>(prim::BailOut);</span><br><span class="line">      bailouts_.<span class="built_in">push_back</span>(bailout_node);</span><br><span class="line"></span><br><span class="line">      <span class="keyword">const</span> <span class="keyword">auto</span>&amp; live_inputs = liveness_sets_[*it];</span><br><span class="line"></span><br><span class="line">      <span class="comment">// guarded inputs come first</span></span><br><span class="line">      <span class="comment">// currently, there&#x27;s always one guarded input</span></span><br><span class="line">      bailout_node-&gt;<span class="built_in">addInput</span>(it-&gt;<span class="built_in">input</span>());</span><br><span class="line">      <span class="keyword">for</span> (<span class="keyword">auto</span> li : live_inputs) &#123;</span><br><span class="line">        <span class="comment">// Guarded inputs have already been added</span></span><br><span class="line">        <span class="comment">// Also, skip some inputs that BailOutGraphBuilder can</span></span><br><span class="line">        <span class="comment">// materialize into bailout graphs directly</span></span><br><span class="line">        <span class="keyword">if</span> (!<span class="built_in">shouldBeCapturedInByBailOut</span>(li-&gt;<span class="built_in">node</span>()) || li == it-&gt;<span class="built_in">input</span>()) &#123;</span><br><span class="line">          <span class="keyword">continue</span>;</span><br><span class="line">        &#125;</span><br><span class="line">        bailout_node-&gt;<span class="built_in">addInput</span>(li);</span><br><span class="line">      &#125;</span><br><span class="line"></span><br><span class="line">      bailout_node-&gt;<span class="built_in">output</span>()-&gt;<span class="built_in">setType</span>(it-&gt;<span class="built_in">output</span>()-&gt;<span class="built_in">type</span>());</span><br><span class="line">      bailout_node-&gt;<span class="built_in">i_</span>(attr::index, bailout_index_++);</span><br><span class="line">      <span class="comment">// we can&#x27;t immediately replace nodes since this action will corrupt</span></span><br><span class="line">      <span class="comment">// the liveness sets of following BailOut nodes if any of their</span></span><br><span class="line">      <span class="comment">// arguments are BailOut nodes themselves</span></span><br><span class="line">      replacements_.<span class="built_in">insert</span>(&#123;it-&gt;<span class="built_in">output</span>(), bailout_node-&gt;<span class="built_in">output</span>()&#125;);</span><br><span class="line"></span><br><span class="line">    &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">      <span class="keyword">for</span> (<span class="keyword">auto</span> ib : it-&gt;<span class="built_in">blocks</span>()) &#123;</span><br><span class="line">        <span class="built_in">insertBailOuts</span>(ib);</span><br><span class="line">      &#125;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that it recursively traverse the graph, at each <code>prim::Guard</code> node, it creates a new node of kind <code>prim::BailOut</code> and set its input to live input at current point. The newly created node is not inserted into the graph, but stored at <code>replacements_</code> instead.</p>
<p>Then in <code>replaceGuardsWithBailouts</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// replace each prim::Guard</span></span><br><span class="line"><span class="comment">// with its corresponding prim::BailOut</span></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">replaceGuardsWithBailouts</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">auto</span> e : replacements_) &#123;</span><br><span class="line">    e.first-&gt;<span class="built_in">replaceAllUsesWith</span>(e.second);</span><br><span class="line">    e.second-&gt;<span class="built_in">node</span>()-&gt;<span class="built_in">insertAfter</span>(e.first-&gt;<span class="built_in">node</span>());</span><br><span class="line">    e.first-&gt;<span class="built_in">node</span>()-&gt;<span class="built_in">destroy</span>();</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Knowing that how the guards are converted to bailout nodes, the last step is to see how the bailout node is executed. From previous reading, we know that instructions for different kind of nodes are emitted in the constructor of <code>CodeImpl</code>, which calls <code>emitCodeForBlock</code>, which calls <code>emitNodeAtBlockLevel</code>, which calls <code>emitNode</code> which dispatch on node kind and calls <code>emitBailOut</code> for bailout nodes.</p>
<p>Now let’s go back to see <code>emitBailOut</code> again:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitBailOut</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> jf_index = <span class="built_in">emitGuard</span>(node);</span><br><span class="line">  <span class="keyword">auto</span> unoptimized_graph = node-&gt;<span class="built_in">inputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">node</span>()-&gt;<span class="built_in">g</span>(attr::Subgraph);</span><br><span class="line">  <span class="comment">// note, guaded input is already loaded onto the stack</span></span><br><span class="line">  <span class="comment">// for GUARD instruction</span></span><br><span class="line">  <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>().<span class="built_in">slice</span>(<span class="number">2</span>));</span><br><span class="line">  <span class="built_in">insertInstruction</span>(TAIL_CALL, function_table_.<span class="built_in">size</span>());</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(node-&gt;<span class="built_in">kind</span>() == prim::BailOut);</span><br><span class="line">  <span class="keyword">auto</span> bailout_index = node-&gt;<span class="built_in">i</span>(attr::index);</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(bailout_index &gt;= <span class="number">0</span>);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> build_bailout_graph = [bailout_index,</span><br><span class="line">                              unoptimized_graph](Function &amp;func) &#123;</span><br><span class="line"></span><br><span class="line">    <span class="built_in">BuildBailOutGraphFrom</span>(bailout_index, unoptimized_graph, func.<span class="built_in">graph</span>());</span><br><span class="line">  &#125;;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> empty_graph = std::make_shared&lt;Graph&gt;();</span><br><span class="line">  <span class="keyword">auto</span> func = torch::make_unique&lt;Function&gt;(</span><br><span class="line">      <span class="string">&quot;bailout&quot;</span>, empty_graph, build_bailout_graph);</span><br><span class="line">  function_table_.<span class="built_in">emplace_back</span>(func.<span class="built_in">get</span>());</span><br><span class="line">  bailout_functions_.<span class="built_in">emplace_back</span>(std::<span class="built_in">move</span>(func));</span><br><span class="line">  <span class="built_in">createBailoutBlock</span>(jf_index);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can see that inside <code>emitBailOut</code>, it first <code>emitGuard</code> then emit a <code>TAIL_CALL</code> instruction. The <code>emitGuard</code> is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">size_t</span> <span class="title">emitGuard</span><span class="params">(Node* node)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// unoptimized graph is at index 0</span></span><br><span class="line">  <span class="comment">// guarded input is at index 1</span></span><br><span class="line">  <span class="comment">// the rest of args follow</span></span><br><span class="line">  <span class="built_in">emitLoadInputs</span>(node-&gt;<span class="built_in">inputs</span>().<span class="built_in">slice</span>(<span class="number">1</span>, <span class="number">1</span>));</span><br><span class="line">  <span class="built_in">insertInstruction</span>(GUARD, type_table_.<span class="built_in">size</span>());</span><br><span class="line"></span><br><span class="line">  type_table_.<span class="built_in">emplace_back</span>(node-&gt;<span class="built_in">outputs</span>().<span class="built_in">at</span>(<span class="number">0</span>)-&gt;<span class="built_in">type</span>());</span><br><span class="line">  <span class="built_in">insertInstruction</span>(JF, <span class="number">0</span> <span class="comment">/* to be patched */</span>);</span><br><span class="line"></span><br><span class="line">  <span class="keyword">return</span> instructions_.<span class="built_in">size</span>() - <span class="number">1</span>;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>we can see that it emits a <code>GUARD</code> instruction and a <code>JF</code> instruction. The definition of how each instruction is executed is defined in <code>InterpreterStateImpl::runImpl</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br></pre></td><td class="code"><pre><span class="line"></span><br><span class="line"><span class="keyword">case</span> JF:</span><br><span class="line">  af.pc += (<span class="built_in">pop</span>(stack).<span class="built_in">toBool</span>()) ? <span class="number">1</span> : inst.X;</span><br><span class="line">  <span class="keyword">break</span>;</span><br><span class="line">.......</span><br><span class="line"><span class="keyword">case</span> GUARD: &#123;</span><br><span class="line">  <span class="keyword">auto</span> t = stack.<span class="built_in">back</span>().<span class="built_in">toTensor</span>();</span><br><span class="line">  <span class="keyword">const</span> TypePtr&amp; expected = af.types[inst.X];</span><br><span class="line">  <span class="keyword">bool</span> comp = expected-&gt;cast&lt;TensorType&gt;()</span><br><span class="line">                  -&gt;<span class="built_in">isCompatibleWithInCurrentExecutionContext</span>(t);</span><br><span class="line">  <span class="built_in">push</span>(stack, comp);</span><br><span class="line">  ++af.pc;</span><br><span class="line">&#125; <span class="keyword">break</span>;</span><br><span class="line"><span class="keyword">case</span> TAIL_CALL: &#123;</span><br><span class="line">  <span class="built_in">GRAPH_DEBUG</span>(<span class="string">&quot;running TAIL_CALL for &quot;</span>, inst.X);</span><br><span class="line">  af.functions[inst.X]-&gt;<span class="built_in">ensure_defined</span>();</span><br><span class="line">  <span class="keyword">size_t</span> remaining_bailout_depth =</span><br><span class="line">      frames.<span class="built_in">back</span>().function-&gt;remaining_bailout_depth_ &gt; <span class="number">0</span></span><br><span class="line">      ? frames.<span class="built_in">back</span>().function-&gt;remaining_bailout_depth_ - <span class="number">1</span></span><br><span class="line">      : <span class="number">0</span>;</span><br><span class="line">  <span class="keyword">const</span> Code&amp; code = af.functions[inst.X]</span><br><span class="line">                          -&gt;<span class="built_in">get_executor</span>()</span><br><span class="line">                          .<span class="built_in">getPlanFor</span>(stack, remaining_bailout_depth)</span><br><span class="line">                          .code;</span><br><span class="line">  <span class="keyword">size_t</span> num_inputs = code.<span class="built_in">num_inputs</span>();</span><br><span class="line">  <span class="keyword">size_t</span> base_pointer = frames.<span class="built_in">back</span>().base_pointer;</span><br><span class="line">  <span class="built_in">TORCH_INTERNAL_ASSERT</span>(stack.<span class="built_in">size</span>() &gt;= num_inputs);</span><br><span class="line">  <span class="keyword">size_t</span> inputs_start = stack.<span class="built_in">size</span>() - num_inputs;</span><br><span class="line">  <span class="keyword">for</span> (<span class="keyword">size_t</span> i = <span class="number">0</span>; i &lt; num_inputs; ++i) &#123;</span><br><span class="line">    stack.<span class="built_in">at</span>(base_pointer + i) =</span><br><span class="line">        std::<span class="built_in">move</span>(stack.<span class="built_in">at</span>(inputs_start + i));</span><br><span class="line">  &#125;</span><br><span class="line">  stack.<span class="built_in">resize</span>(base_pointer + num_inputs);</span><br><span class="line">  <span class="built_in">leaveFrame</span>();</span><br><span class="line">  <span class="built_in">enterFrame</span>(code, base_pointer);</span><br><span class="line">  af = <span class="built_in">ActiveFrame</span>(frames.<span class="built_in">back</span>());</span><br><span class="line">&#125; <span class="keyword">break</span>;</span><br></pre></td></tr></table></figure>

<p>We will not worry about how the jump address is computed, but we can see that the big picture is: check if the tensor on the stack is compatible, if not, then call the backup unoptimized graph.</p>

    </div>

    
    
    
        

  <div class="followme">
    <p>Welcome to my other publishing channels</p>

    <div class="social-list">

        <div class="social-item">
          <a target="_blank" class="social-link" href="https://twitter.com/gaoxiang_ai">
            <span class="icon">
              <i class="fab fa-twitter"></i>
            </span>

            <span class="label">Twitter</span>
          </a>
        </div>

        <div class="social-item">
          <a target="_blank" class="social-link" href="/atom.xml">
            <span class="icon">
              <i class="fa fa-rss"></i>
            </span>

            <span class="label">RSS</span>
          </a>
        </div>
    </div>
  </div>


      <footer class="post-footer">
          <div class="post-tags">
              <a href="/tags/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" rel="tag"># 机器学习</a>
              <a href="/tags/PyTorch/" rel="tag"># PyTorch</a>
              <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/" rel="tag"># 深度学习</a>
          </div>

        


        
    <div class="post-nav">
      <div class="post-nav-item">
    <a href="/2018/11/09/GRE%E3%80%81Wireguard%E3%80%81%E7%BD%91%E6%A1%A5%E4%B8%8EIPv6/" rel="prev" title="GRE、Wireguard、网桥与IPv6">
      <i class="fa fa-chevron-left"></i> GRE、Wireguard、网桥与IPv6
    </a></div>
      <div class="post-nav-item">
    <a href="/2021/04/04/Moderna-%E8%BE%89%E7%91%9E-%E4%BB%A5%E5%8F%8A%E5%BC%BA%E7%94%9F%E4%B8%89%E7%A7%8D%E7%96%AB%E8%8B%97%E4%BF%A1%E6%81%AF%E6%80%BB%E7%BB%93/" rel="next" title="Moderna, Pfizer, 以及强生三种疫苗信息总结">
      Moderna, Pfizer, 以及强生三种疫苗信息总结 <i class="fa fa-chevron-right"></i>
    </a></div>
    </div>
      </footer>
    
  </article>
  
  
  



          </div>
          
    
  <div class="comments">
    <div id="disqus_thread">
      <noscript>Please enable JavaScript to view the comments powered by Disqus.</noscript>
    </div>
  </div>
  

<script>
  window.addEventListener('tabs:register', () => {
    let { activeClass } = CONFIG.comments;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      let commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>

        </div>
          
  
  <div class="toggle sidebar-toggle">
    <span class="toggle-line toggle-line-first"></span>
    <span class="toggle-line toggle-line-middle"></span>
    <span class="toggle-line toggle-line-last"></span>
  </div>

  <aside class="sidebar">
    <div class="sidebar-inner">

      <ul class="sidebar-nav motion-element">
        <li class="sidebar-nav-toc">
          Table of Contents
        </li>
        <li class="sidebar-nav-overview">
          Overview
        </li>
      </ul>

      <!--noindex-->
      <div class="post-toc-wrap sidebar-panel">
          <div class="post-toc motion-element"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#Starting-point-torch-jit-script"><span class="nav-number">1.</span> <span class="nav-text">Starting point: torch.jit.script</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Python-frontend"><span class="nav-number">2.</span> <span class="nav-text">The Python frontend</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#From-Python-AST-to-PyTorch-IR-part-1"><span class="nav-number">3.</span> <span class="nav-text">From Python AST to PyTorch IR: part 1</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-PyTorch-IR"><span class="nav-number">4.</span> <span class="nav-text">The PyTorch IR</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#From-Python-AST-to-PyTorch-IR-part-2"><span class="nav-number">5.</span> <span class="nav-text">From Python AST to PyTorch IR: part 2</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Graph-Executor"><span class="nav-number">6.</span> <span class="nav-text">The Graph Executor</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#GraphExecutorImpl"><span class="nav-number">6.1.</span> <span class="nav-text">GraphExecutorImpl</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#ProfilingGraphExecutorImpl"><span class="nav-number">6.2.</span> <span class="nav-text">ProfilingGraphExecutorImpl</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#PyTorch-IR-%E2%80%93-gt-Interpreter-Instructions"><span class="nav-number">7.</span> <span class="nav-text">PyTorch IR –&gt; Interpreter Instructions</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Virtual-Machine"><span class="nav-number">8.</span> <span class="nav-text">The Virtual Machine</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Profiling-and-bailout"><span class="nav-number">9.</span> <span class="nav-text">Profiling and bailout</span></a></li></ol></div>
      </div>
      <!--/noindex-->

      <div class="site-overview-wrap sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
  <p class="site-author-name" itemprop="name">zasdfgbnm</p>
  <div class="site-description" itemprop="description"></div>
</div>
<div class="site-state-wrap motion-element">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/archives/">
        
          <span class="site-state-item-count">11</span>
          <span class="site-state-item-name">posts</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/categories/">
          
        <span class="site-state-item-count">5</span>
        <span class="site-state-item-name">categories</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/tags/">
          
        <span class="site-state-item-count">56</span>
        <span class="site-state-item-name">tags</span></a>
      </div>
  </nav>
</div>
  <div class="links-of-author motion-element">
      <span class="links-of-author-item">
        <a href="https://github.com/zasdfgbnm" title="GitHub → https:&#x2F;&#x2F;github.com&#x2F;zasdfgbnm" rel="noopener" target="_blank"><i class="fab fa-github fa-fw"></i>GitHub</a>
      </span>
      <span class="links-of-author-item">
        <a href="https://twitter.com/gaoxiang_ai" title="Twitter → https:&#x2F;&#x2F;twitter.com&#x2F;gaoxiang_ai" rel="noopener" target="_blank"><i class="fab fa-twitter fa-fw"></i>Twitter</a>
      </span>
  </div>



      </div>

    </div>
  </aside>
  <div id="sidebar-dimmer"></div>


      </div>
    </main>

    <footer class="footer">
      <div class="footer-inner">
        

        

<div class="copyright">
  
  &copy; 
  <span itemprop="copyrightYear">2021</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">zasdfgbnm</span>
</div>
  <div class="powered-by">Powered by <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> & <a href="https://muse.theme-next.org/" class="theme-link" rel="noopener" target="_blank">NexT.Muse</a>
  </div>

        








      </div>
    </footer>
  </div>

  
  <script src="/lib/anime.min.js"></script>
  <script src="/lib/velocity/velocity.min.js"></script>
  <script src="/lib/velocity/velocity.ui.min.js"></script>

<script src="/js/utils.js"></script>

<script src="/js/motion.js"></script>


<script src="/js/schemes/muse.js"></script>


<script src="/js/next-boot.js"></script>




  
  <script>
    (function(){
      var canonicalURL, curProtocol;
      //Get the <link> tag
      var x=document.getElementsByTagName("link");
		//Find the last canonical URL
		if(x.length > 0){
			for (i=0;i<x.length;i++){
				if(x[i].rel.toLowerCase() == 'canonical' && x[i].href){
					canonicalURL=x[i].href;
				}
			}
		}
    //Get protocol
	    if (!canonicalURL){
	    	curProtocol = window.location.protocol.split(':')[0];
	    }
	    else{
	    	curProtocol = canonicalURL.split(':')[0];
	    }
      //Get current URL if the canonical URL does not exist
	    if (!canonicalURL) canonicalURL = window.location.href;
	    //Assign script content. Replace current URL with the canonical URL
      !function(){var e=/([http|https]:\/\/[a-zA-Z0-9\_\.]+\.baidu\.com)/gi,r=canonicalURL,t=document.referrer;if(!e.test(r)){var n=(String(curProtocol).toLowerCase() === 'https')?"https://sp0.baidu.com/9_Q4simg2RQJ8t7jm9iCKT-xh_/s.gif":"//api.share.baidu.com/s.gif";t?(n+="?r="+encodeURIComponent(document.referrer),r&&(n+="&l="+r)):r&&(n+="?l="+r);var i=new Image;i.src=n}}(window);})();
  </script>















  

  

  

<script>
  function loadCount() {
    var d = document, s = d.createElement('script');
    s.src = 'https://zasdfgbnm-github-io.disqus.com/count.js';
    s.id = 'dsq-count-scr';
    (d.head || d.body).appendChild(s);
  }
  // defer loading until the whole page loading is completed
  window.addEventListener('load', loadCount, false);
</script>
<script>
  var disqus_config = function() {
    this.page.url = "https://zasdfgbnm.github.io/2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/";
    this.page.identifier = "2020/02/07/PyTorch-JIT-Source-Code-Read-Note-Updated-202002/";
    this.page.title = "PyTorch JIT Source Code Read Note (Updated at Feb 2020)";
    };
  NexT.utils.loadComments(document.querySelector('#disqus_thread'), () => {
    if (window.DISQUS) {
      DISQUS.reset({
        reload: true,
        config: disqus_config
      });
    } else {
      var d = document, s = d.createElement('script');
      s.src = 'https://zasdfgbnm-github-io.disqus.com/embed.js';
      s.setAttribute('data-timestamp', '' + +new Date());
      (d.head || d.body).appendChild(s);
    }
  });
</script>

</body>
</html>
