<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 5.4.0">
  <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/images/logo.svg" color="#222">
  <meta name="google-site-verification" content="d44tDfSSWxm1_XP1dAq65hkgyD6zw70Ua9JdCaJqWGg">

<link rel="stylesheet" href="/css/main.css">


<link rel="stylesheet" href="/lib/font-awesome/css/all.min.css">

<script id="hexo-configurations">
    var NexT = window.NexT || {};
    var CONFIG = {"hostname":"zasdfgbnm.github.io","root":"/","scheme":"Muse","version":"7.8.0","exturl":false,"sidebar":{"position":"left","display":"post","padding":18,"offset":12,"onmobile":false},"copycode":{"enable":false,"show_result":false,"style":null},"back2top":{"enable":true,"sidebar":false,"scrollpercent":false},"bookmark":{"enable":false,"color":"#222","save":"auto"},"fancybox":false,"mediumzoom":false,"lazyload":false,"pangu":false,"comments":{"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},"algolia":{"hits":{"per_page":10},"labels":{"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}},"localsearch":{"enable":false,"trigger":"auto","top_n_per_article":1,"unescape":false,"preload":false},"motion":{"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}}};
  </script>

  <meta name="description" content="This is my note for reading PyTorch’s JIT source. We begin by looking at torch.jit.script and torch.jit.script_method to find the frontend that compiles the Python code into PyTorch’s tree views, and">
<meta property="og:type" content="article">
<meta property="og:title" content="PyTorch JIT Source Code Read Note">
<meta property="og:url" content="https://zasdfgbnm.github.io/2018/09/20/PyTorch-JIT-Source-Code-Read-Note/index.html">
<meta property="og:site_name" content="zasdfgbnm">
<meta property="og:description" content="This is my note for reading PyTorch’s JIT source. We begin by looking at torch.jit.script and torch.jit.script_method to find the frontend that compiles the Python code into PyTorch’s tree views, and">
<meta property="og:locale" content="en_US">
<meta property="article:published_time" content="2018-09-21T00:12:21.000Z">
<meta property="article:modified_time" content="2021-04-04T05:17:59.765Z">
<meta property="article:author" content="zasdfgbnm">
<meta property="article:tag" content="机器学习">
<meta property="article:tag" content="PyTorch">
<meta property="article:tag" content="深度学习">
<meta name="twitter:card" content="summary">

<link rel="canonical" href="https://zasdfgbnm.github.io/2018/09/20/PyTorch-JIT-Source-Code-Read-Note/">


<script id="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome : false,
    isPost : true,
    lang   : 'en'
  };
</script>

  <title>PyTorch JIT Source Code Read Note | zasdfgbnm</title>
  
    <script async src="https://www.googletagmanager.com/gtag/js?id=UA-7583294-5"></script>
    <script>
      if (CONFIG.hostname === location.hostname) {
        window.dataLayer = window.dataLayer || [];
        function gtag(){dataLayer.push(arguments);}
        gtag('js', new Date());
        gtag('config', 'UA-7583294-5');
      }
    </script>


  <script>
    var _hmt = _hmt || [];
    (function() {
      var hm = document.createElement("script");
      hm.src = "https://hm.baidu.com/hm.js?a56abdeb557a286a6b7a104348fdfbcd";
      var s = document.getElementsByTagName("script")[0];
      s.parentNode.insertBefore(hm, s);
    })();
  </script>




  <noscript>
  <style>
  .use-motion .brand,
  .use-motion .menu-item,
  .sidebar-inner,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line-before i { left: initial; }
  .use-motion .logo-line-after i { right: initial; }
  </style>
</noscript>

<link rel="alternate" href="/atom.xml" title="zasdfgbnm" type="application/atom+xml">
</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container use-motion">
    <div class="headband"></div>

    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-nav-toggle">
    <div class="toggle" aria-label="Toggle navigation bar">
      <span class="toggle-line toggle-line-first"></span>
      <span class="toggle-line toggle-line-middle"></span>
      <span class="toggle-line toggle-line-last"></span>
    </div>
  </div>

  <div class="site-meta">

    <a href="/" class="brand" rel="start">
      <span class="logo-line-before"><i></i></span>
      <h1 class="site-title">zasdfgbnm</h1>
      <span class="logo-line-after"><i></i></span>
    </a>
  </div>

  <div class="site-nav-right">
    <div class="toggle popup-trigger">
    </div>
  </div>
</div>




<nav class="site-nav">
  <ul id="menu" class="main-menu menu">
        <li class="menu-item menu-item-home">

    <a href="/" rel="section"><i class="fa fa-home fa-fw"></i>Home</a>

  </li>
        <li class="menu-item menu-item-archives">

    <a href="/archives/" rel="section"><i class="fa fa-archive fa-fw"></i>Archives</a>

  </li>
        <li class="menu-item menu-item-about">

    <a href="/about/" rel="section"><i class="fa fa-user fa-fw"></i>About</a>

  </li>
        <li class="menu-item menu-item-tags">

    <a href="/tags/" rel="section"><i class="fa fa-tags fa-fw"></i>Tags</a>

  </li>
        <li class="menu-item menu-item-categories">

    <a href="/categories/" rel="section"><i class="fa fa-th fa-fw"></i>Categories</a>

  </li>
        <li class="menu-item menu-item-sitemap">

    <a href="/sitemap.xml" rel="section"><i class="fa fa-sitemap fa-fw"></i>Sitemap</a>

  </li>
  </ul>
</nav>




</div>
    </header>

    
  <div class="back-to-top">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>


    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          

          <div class="content post posts-expand">
            

    
  
  
  <article itemscope itemtype="http://schema.org/Article" class="post-block" lang="en">
    <link itemprop="mainEntityOfPage" href="https://zasdfgbnm.github.io/2018/09/20/PyTorch-JIT-Source-Code-Read-Note/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/images/avatar.gif">
      <meta itemprop="name" content="zasdfgbnm">
      <meta itemprop="description" content="">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="zasdfgbnm">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          PyTorch JIT Source Code Read Note
        </h1>

        <div class="post-meta">
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-calendar"></i>
              </span>
              <span class="post-meta-item-text">Posted on</span>

              <time title="Created: 2018-09-20 20:12:21" itemprop="dateCreated datePublished" datetime="2018-09-20T20:12:21-04:00">2018-09-20</time>
            </span>
              <span class="post-meta-item">
                <span class="post-meta-item-icon">
                  <i class="far fa-calendar-check"></i>
                </span>
                <span class="post-meta-item-text">Edited on</span>
                <time title="Modified: 2021-04-04 01:17:59" itemprop="dateModified" datetime="2021-04-04T01:17:59-04:00">2021-04-04</time>
              </span>
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-folder"></i>
              </span>
              <span class="post-meta-item-text">In</span>
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" itemprop="url" rel="index"><span itemprop="name">机器学习</span></a>
                </span>
            </span>

          
  
  <span class="post-meta-item">
    
      <span class="post-meta-item-icon">
        <i class="far fa-comment"></i>
      </span>
      <span class="post-meta-item-text">Disqus: </span>
    
    <a title="disqus" href="/2018/09/20/PyTorch-JIT-Source-Code-Read-Note/#disqus_thread" itemprop="discussionUrl">
      <span class="post-comments-count disqus-comment-count" data-disqus-identifier="2018/09/20/PyTorch-JIT-Source-Code-Read-Note/" itemprop="commentCount"></span>
    </a>
  </span>
  
  

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">

      
        <p>This is my note for reading PyTorch’s JIT source. We begin by looking at <code>torch.jit.script</code> and <code>torch.jit.script_method</code> to find the frontend that compiles the Python code into PyTorch’s tree views, and the backend that compiles tree views to graph. We also read the structure of the internal representation of PyTorch’s graph. Finally we go to graph executor to look at how the computation graph is further compiled into instructions and how the action of these instructions are defined and executed.</p>
<span id="more"></span>

<p>PyTorch is under very active development. So the PyTorch’s source code at the time the reader reading this article won’t be the same as when I wrote this article. To get the same source code as in this article, the readers could run the following command:</p>
<figure class="highlight bash"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">git checkout 76ab26cc3eff1d7ba822d8db93723f5c9598eead</span><br></pre></td></tr></table></figure>


<h1 id="Starting-point-script-and-script-method"><a href="#Starting-point-script-and-script-method" class="headerlink" title="Starting point: script and script_method"></a>Starting point: script and script_method</h1><p>In PyTorch, a Python function can be just-in-time compiled by doing something like:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">@torch.jit.script</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">x</span>):</span></span><br><span class="line">    <span class="keyword">return</span> x + x</span><br></pre></td></tr></table></figure>

<p>the <code>torch.jit.script</code> is a decorator of your function <code>f</code>. If you are unfamiliar with Python’s decorator, please refer to <a target="_blank" rel="noopener" href="https://realpython.com/primer-on-python-decorators/">this article</a>.</p>
<p>It is also possible to create a module with its method JIT compiled by doing something like:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MyModule</span>(<span class="params">torch.jit.ScriptModule</span>):</span></span><br><span class="line"></span><br><span class="line"><span class="meta">    @torch.jit.script_method</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">self.x</span>):</span></span><br><span class="line">        <span class="keyword">return</span> x * x</span><br><span class="line"></span><br><span class="line"><span class="meta">    @torch.jit.script_method</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">forward</span>(<span class="params">self, x</span>):</span></span><br><span class="line">        <span class="keyword">return</span> x + self.f(x)</span><br></pre></td></tr></table></figure>

<h2 id="Scripting-a-function"><a href="#Scripting-a-function" class="headerlink" title="Scripting a function"></a>Scripting a function</h2><p>We will start by looking at <code>torch.jit.script</code>. To read <code>torch.jit.script</code>, we begin by looking at <code>torch/jit/__init__.py</code>. To quickly locate <code>script</code>, search <code>def script</code> in your editor, and you will immediately find it:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">script</span>(<span class="params">fn, optimize=<span class="literal">True</span>, _frames_up=<span class="number">0</span></span>):</span></span><br><span class="line">    <span class="keyword">if</span> <span class="keyword">not</span> _enabled:</span><br><span class="line">        <span class="keyword">return</span> fn</span><br><span class="line">    rcb = createResolutionCallback(_frames_up + <span class="number">1</span>)</span><br><span class="line">    ast = get_jit_ast(fn, is_method=<span class="literal">False</span>)</span><br><span class="line">    graph = _jit_script_compile(ast, rcb)</span><br><span class="line">    mod = ScriptModule()</span><br><span class="line">    mod._create_method_from_graph(<span class="string">&#x27;forward&#x27;</span>, graph)</span><br><span class="line">    <span class="comment"># <span class="doctag">TODO:</span> refactor everything so we&#x27;re not 1) creating a ScriptModule</span></span><br><span class="line">    <span class="comment"># 2) Throwing everything away except for the graph 3) Creating a new</span></span><br><span class="line">    <span class="comment"># ScriptModule and dumping that graph in 4) Re-populating the schema</span></span><br><span class="line">    <span class="comment"># because it was lost doing the previous</span></span><br><span class="line">    mod.__getattr__(<span class="string">&#x27;forward&#x27;</span>).forward_schema(ast, <span class="literal">False</span>)</span><br><span class="line">    <span class="comment"># Forward docstrings</span></span><br><span class="line">    mod.__doc__ = fn.__doc__</span><br><span class="line">    <span class="keyword">return</span> mod</span><br></pre></td></tr></table></figure>

<p>In the beginning, <code>createResolutionCallback</code> is called. This function is defined in the same file. The source code tells us that it just returns a function that maps names to its values in the scope of the caller of <code>script</code>, this would be used later in C++ to read values from Python.</p>
<p>The <code>get_jit_ast</code> in next line is imported from <code>torch.jit.frontend</code>. From the name of this function and its owning module, we can tell that this is the frontend of PyTorch’s JIT compiler that compiles the source code of the scripted function into abstract syntax tree(AST).</p>
<p>The next line uses <code>_jit_script_compile</code> to compiles the AST obtained in the previous step into computation graph. By searching <code>_jit_script_compile</code>, we find something that reads: <code>torch._C._jit_script_compile</code>, which tells us that <code>_jit_script_compile</code> is implemented in C++.</p>
<p>The next couple lines basically create a <code>ScriptModule</code> whose <code>forward</code> method is the compiled graph.</p>
<h2 id="Scripting-a-module"><a href="#Scripting-a-module" class="headerlink" title="Scripting a module"></a>Scripting a module</h2><p>We start by looking at <code>torch.jit.script_method</code>:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">ScriptMethodStub = namedtuple(<span class="string">&#x27;ScriptMethodStub&#x27;</span>, (<span class="string">&#x27;resolution_callback&#x27;</span>, <span class="string">&#x27;def_&#x27;</span>, <span class="string">&#x27;original_method&#x27;</span>))</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">script_method</span>(<span class="params">fn</span>):</span></span><br><span class="line">    <span class="keyword">if</span> <span class="keyword">not</span> _enabled:</span><br><span class="line">        <span class="keyword">return</span> fn</span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line">    rcb = createResolutionCallback(frames_up=<span class="number">2</span>)</span><br><span class="line">    ast = get_jit_ast(fn, is_method=<span class="literal">True</span>)</span><br><span class="line">    <span class="keyword">return</span> ScriptMethodStub(rcb, ast, fn)</span><br></pre></td></tr></table></figure>

<p>This is similar to <code>script</code>, but instead of creating and returning a module and put the compiled function into its <code>forward</code> method, it simply use a named tuple to store the resolution callback, AST and the original function.</p>
<p>This can not be the end of the story because a named tuple can never be called to do the computation. So there must be some magic somewhere that replace the named tuples with something that actually do the job. For readers familiar with Python’s class meta-programming, it’s not hard to imagine how the magic happens. For those not familiar with class meta-programming, I would refer to the book <a target="_blank" rel="noopener" href="http://shop.oreilly.com/product/0636920032519.do">Fluent Python</a>. I will explain a bit of detail on that:</p>
<p>In Python, everything is an object, and a class itself is not an exception. Classes in Python are objects of a special type of classes called meta-class. During import time, when Python see the following code:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MyModule</span>(<span class="params">torch.jit.ScriptModule</span>):</span></span><br><span class="line"></span><br><span class="line"><span class="meta">    @torch.jit.script_method</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">f</span>(<span class="params">self.x</span>):</span></span><br><span class="line">        <span class="keyword">return</span> x * x</span><br><span class="line"></span><br><span class="line"><span class="meta">    @torch.jit.script_method</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">forward</span>(<span class="params">self, x</span>):</span></span><br><span class="line">        <span class="keyword">return</span> x + self.f(x)</span><br></pre></td></tr></table></figure>

<p>It will execute the body of the class definition, that is: compile the <code>return x * x</code>, create an function object with that compiled code, pass this function object to <code>torch.jit.script_method</code>, and set the returned named tuple as <code>f</code>. Then do the same thing for <code>forward</code>. After that, Python will have a map of attribute names and values of the class to be constructed. This map will then be passed to the meta-class of <code>MyModule</code> to actually construct <code>MyModule</code> as an instance of that meta-class.</p>
<p>To know in detail how this is achieved in PyTorch, we should take a look at <code>ScriptMeta</code> and <code>ScriptModule</code>. These two classes are lengthy, so I will not copy their full code here, but to use pseudocode to show what is done:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">ScriptMeta</span>(<span class="params"><span class="built_in">type</span>(<span class="params">torch._C.ScriptModule</span>)</span>):</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span>(<span class="params">cls, name, bases, attrs</span>):</span></span><br><span class="line">        <span class="comment"># delete all ScriptMethodStub</span></span><br><span class="line"></span><br><span class="line"><span class="meta">        @functools.wraps(<span class="params">original_init</span>)</span></span><br><span class="line">        <span class="function"><span class="keyword">def</span> <span class="title">init_then_register</span>(<span class="params">self, *args, **kwargs</span>):</span></span><br><span class="line">            <span class="comment"># invoke the original __init__</span></span><br><span class="line">            self._create_methods(defs, rcbs)</span><br><span class="line"></span><br><span class="line">        cls.__init__ = init_then_register</span><br><span class="line">        <span class="keyword">return</span> <span class="built_in">super</span>(ScriptMeta, cls).__init__(name, bases, attrs)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">ScriptModule</span>(<span class="params">with_metaclass(<span class="params">ScriptMeta, torch._C.ScriptModule, Module</span>)</span>):</span></span><br><span class="line">    <span class="comment"># ......</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__getattr__</span>(<span class="params">self, attr</span>):</span></span><br><span class="line">        <span class="keyword">if</span> self._has_method(attr):</span><br><span class="line">            <span class="comment"># ......</span></span><br><span class="line">            <span class="keyword">return</span> self._get_method(attr)</span><br><span class="line">        <span class="comment"># .....</span></span><br><span class="line">        <span class="keyword">return</span> Module.__getattr__(self, attr)</span><br></pre></td></tr></table></figure>

<p>In the above pseudocode, <code>_create_methods</code>, <code>_has_method</code>, and <code>_get_method</code> are inherited from <code>torch._C.ScriptModule</code>. So a natural question to ask is then: what does <code>torch._C.ScriptModule</code> do? Before answering this question, let’s first take a look at the frontend.</p>
<h1 id="The-frontend"><a href="#The-frontend" class="headerlink" title="The frontend"></a>The frontend</h1><p>A good starting point of the frontend is the <code>get_jit_ast</code> we just saw. This function is defined at <code>torch/jit/frontend.py</code>. The code is:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_jit_ast</span>(<span class="params">fn, is_method</span>):</span></span><br><span class="line">    source = dedent(inspect.getsource(fn))</span><br><span class="line">    py_ast = ast.parse(source)</span><br><span class="line">    <span class="keyword">if</span> <span class="built_in">len</span>(py_ast.body) != <span class="number">1</span> <span class="keyword">or</span> <span class="keyword">not</span> <span class="built_in">isinstance</span>(py_ast.body[<span class="number">0</span>], ast.FunctionDef):</span><br><span class="line">        <span class="keyword">raise</span> RuntimeError(<span class="string">&quot;expected a single top-level function&quot;</span>)</span><br><span class="line">    type_line = torch.jit.annotations.get_type_line(source)</span><br><span class="line">    ctx = SourceContext(source, _uses_true_division(fn))</span><br><span class="line">    <span class="keyword">return</span> build_def(ctx, py_ast.body[<span class="number">0</span>], type_line, is_method)</span><br></pre></td></tr></table></figure>

<p>The first 4 lines of function body just use the standard tools provided by Python, <code>dedent</code>, <code>inspect</code>, and <code>ast</code>, to construct the Python AST, and do some check to make sure the thing being compiled is “a single top-level function”.</p>
<p>The following line <code>type_line = torch.jit.annotations.get_type_line(source)</code> is interesting. After looking at <code>torch/jit/annotations.py</code>, we can see that PyTorch’s JIT allows the user to specify the type of arguments and return value by writing something like <code># type: (Tensor, torch.Tensor) -&gt; Tuple[Tensor, Tensor]</code>.</p>
<p>In the next line <code>ctx = SourceContext(source, _uses_true_division(fn))</code>, the <code>_uses_true_division</code> is defined in the same file to handle the different behavior of <code>/</code> in Python2 with or without <code>from __future__ import division</code> (see <a target="_blank" rel="noopener" href="https://www.python.org/dev/peps/pep-0238/">PEP 238</a> for the difference). The <code>SourceContext</code> is also defined in the same file. It is a subclass of <code>SourceRangeFactory</code> with additional field to store if the division is true division. The <code>SourceRangeFactory</code> is imported by <code>from torch._C._jit_tree_views import *</code>. After reading its definition at <code>torch/csrc/jit/script/python_tree_views.cpp</code>, we can see that this is basically a class designed to store the range of source code, e.g. where in the source code a token is located.</p>
<p>The core is the <code>build_def</code> in the last line, so we move on:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">build_def</span>(<span class="params">ctx, py_def, type_line, is_method</span>):</span></span><br><span class="line">    returns = []</span><br><span class="line">    ret_body = []</span><br><span class="line">    body = py_def.body</span><br><span class="line">    r = ctx.make_range(py_def.lineno, py_def.col_offset,</span><br><span class="line">                       py_def.col_offset + <span class="built_in">len</span>(<span class="string">&quot;def&quot;</span>))</span><br><span class="line">    param_list = build_param_list(ctx, py_def.args)</span><br><span class="line">    return_type = <span class="literal">None</span></span><br><span class="line">    <span class="keyword">if</span> <span class="built_in">getattr</span>(py_def, <span class="string">&#x27;returns&#x27;</span>, <span class="literal">None</span>) <span class="keyword">is</span> <span class="keyword">not</span> <span class="literal">None</span>:</span><br><span class="line">        return_type = build_expr(ctx, py_def.returns)</span><br><span class="line">    decl = Decl(r, param_list, return_type)</span><br><span class="line">    <span class="keyword">if</span> type_line <span class="keyword">is</span> <span class="keyword">not</span> <span class="literal">None</span>:</span><br><span class="line">        type_comment_decl = torch._C.parse_type_comment(type_line)</span><br><span class="line">        decl = torch._C.merge_type_from_type_comment(decl, type_comment_decl, is_method)</span><br><span class="line">    <span class="keyword">return</span> Def(Ident(r, py_def.name),</span><br><span class="line">               decl,</span><br><span class="line">               build_stmts(ctx, body))</span><br></pre></td></tr></table></figure>

<p>Reading through this, we can see that what basically this does is to convert the Python’s AST into the internal representation. Names like <code>Decl</code>, <code>Def</code>, <code>Ident</code> are all imported by <code>from torch._C._jit_tree_views import *</code>. In the last line, we can see that the function body is constructed by <code>build_stmts</code>, so let’s go further to read <code>build_stmts</code>:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">build_stmts</span>(<span class="params">ctx, stmts</span>):</span></span><br><span class="line">    stmts = [build_stmt(ctx, s) <span class="keyword">for</span> s <span class="keyword">in</span> stmts]</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">list</span>(<span class="built_in">filter</span>(<span class="literal">None</span>, stmts))</span><br></pre></td></tr></table></figure>

<p>This is a very simple function: call <code>build_stmt</code> for each item and filter out those not needed. But what is <code>build_stmt</code>? It is defined as: <code>build_stmt = StmtBuilder()</code>. The definition of <code>StmtBuilder</code> looks like:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">StmtBuilder</span>(<span class="params">Builder</span>):</span></span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">build_Expr</span>(<span class="params">ctx, stmt</span>):</span></span><br><span class="line">        value = stmt.value</span><br><span class="line">        <span class="keyword">if</span> value.__class__.__name__ == <span class="string">&#x27;Str&#x27;</span>:</span><br><span class="line">            <span class="comment"># If a statement is a string literal expression,</span></span><br><span class="line">            <span class="comment"># then it is a docstring. Just ignore it.</span></span><br><span class="line">            <span class="keyword">return</span> <span class="literal">None</span></span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            <span class="keyword">return</span> ExprStmt([build_expr(ctx, value)])</span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">get_assign_lhs_expr</span>(<span class="params">ctx, expr</span>):</span></span><br><span class="line">        <span class="comment"># ...</span></span><br><span class="line">    <span class="comment"># ...</span></span><br><span class="line"><span class="meta">    @staticmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">build_Assign</span>(<span class="params">ctx, stmt</span>):</span></span><br><span class="line">        <span class="comment">#...</span></span><br><span class="line">    <span class="comment"># ......</span></span><br></pre></td></tr></table></figure>

<p>We can see that, this is a class with many static methods that define what to do for different types of Python AST. I will not go deep into how each type is handled. Since at this point, the readers should be able to catch all the details on how each type of nodes in Python AST are dealt with by themselves. So We will stop our frontend reading right here.</p>
<h1 id="ScriptModule-and-ScriptMethod"><a href="#ScriptModule-and-ScriptMethod" class="headerlink" title="ScriptModule and ScriptMethod"></a>ScriptModule and ScriptMethod</h1><p>To find where <code>ScriptModule</code> in C++ is defined, run <code>grep &#39;ScriptModule&#39; -r torch/csrc/</code> and you will locate it at <code>torch/csrc/jit/script/init.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// torch.jit.ScriptModule is a subclass of this C++ object.</span></span><br><span class="line"><span class="comment">// Methods here are prefixed with _ since they should not be</span></span><br><span class="line"><span class="comment">// public.</span></span><br><span class="line">py::class_&lt;Module, std::shared_ptr&lt;Module&gt;&gt;(m, <span class="string">&quot;ScriptModule&quot;</span>)</span><br><span class="line">    .<span class="built_in">def</span>(py::init&lt;&gt;())</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;save&quot;</span>, &amp;Module::save)</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;_set_optimized&quot;</span>, &amp;Module::set_optimized)</span><br><span class="line">    .<span class="built_in">def</span>(</span><br><span class="line">        <span class="string">&quot;_define&quot;</span>,</span><br><span class="line">        [](std::shared_ptr&lt;Module&gt; m,</span><br><span class="line">            <span class="keyword">const</span> std::string&amp; script,</span><br><span class="line">            ResolutionCallback rcb, <span class="keyword">bool</span> has_self) &#123;</span><br><span class="line">          <span class="keyword">auto</span> self = has_self ? std::make_shared&lt;ModuleValue&gt;(m) : <span class="literal">nullptr</span>;</span><br><span class="line">          <span class="keyword">return</span> <span class="built_in">defineMethodsInModule</span>(*m, script, <span class="built_in">pythonResolver</span>(rcb), self);</span><br><span class="line">        &#125;)</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;_create_methods&quot;</span>, [](std::shared_ptr&lt;Module&gt; m, <span class="keyword">const</span> std::vector&lt;Def&gt;&amp; defs, <span class="keyword">const</span> std::vector&lt;ResolutionCallback&gt;&amp; rcbs) &#123;</span><br><span class="line">      std::vector&lt;Resolver&gt; resolvers;</span><br><span class="line">      <span class="keyword">for</span>(<span class="keyword">auto</span> &amp; callback : rcbs) &#123;</span><br><span class="line">        resolvers.<span class="built_in">push_back</span>(<span class="built_in">pythonResolver</span>(callback));</span><br><span class="line">      &#125;</span><br><span class="line">      <span class="built_in">defineMethodsInModule</span>(</span><br><span class="line">        *m,</span><br><span class="line">        defs,</span><br><span class="line">        resolvers,</span><br><span class="line">        std::make_shared&lt;ModuleValue&gt;(m));</span><br><span class="line">    &#125;)</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;_get_method&quot;</span>,</span><br><span class="line">    [](Module&amp; self, <span class="keyword">const</span> std::string&amp; name) -&gt; <span class="keyword">const</span> Method&amp; &#123;</span><br><span class="line">      <span class="keyword">return</span> self.<span class="built_in">get_method</span>(name);</span><br><span class="line">    &#125;, py::return_value_policy::reference_internal)</span><br><span class="line">    <span class="comment">//.def more ...</span></span><br><span class="line"></span><br><span class="line">py::class_&lt;Method&gt;(m, <span class="string">&quot;ScriptMethod&quot;</span>, py::<span class="built_in">dynamic_attr</span>())</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;graph&quot;</span>, [&amp;](Method&amp; self) &#123;</span><br><span class="line">      <span class="keyword">return</span> self.<span class="built_in">graph</span>();</span><br><span class="line">    &#125;)</span><br><span class="line">    .<span class="built_in">def</span>(<span class="string">&quot;__call__&quot;</span>, invokeScriptMethodFromPython)</span><br><span class="line">    <span class="comment">//.def more ...</span></span><br></pre></td></tr></table></figure>

<p>We can see that <code>ScriptModule</code> is basically a binding for the C++ class <code>Module</code>. By skim through the list of methods defined here, we can see that it has methods for adding, getting, and checking existence of methods, parameters, submodules, buffers, etc. The class for methods is <code>Method</code>, which binds to Python as <code>ScriptMethod</code>. Methods in modules are created by <code>defineMethodsInModule</code> and invoked by <code>invokeScriptMethodFromPython</code>. <code>defineMethodsInModule</code> is a bit complicated, and we will postpone its reading to the backend compiler part of this article. But <code>invokeScriptMethodFromPython</code> is very simple. Searching with <code>grep</code>, we can easily find its definition in <code>torch/csrc/jit/pybind_utils.h</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">inline</span> py::object <span class="title">invokeScriptMethodFromPython</span><span class="params">(</span></span></span><br><span class="line"><span class="function"><span class="params">    script::Method&amp; method,</span></span></span><br><span class="line"><span class="function"><span class="params">    py::args args, py::kwargs kwargs)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> stack = <span class="built_in">createStackForSchema</span>(method.<span class="built_in">getSchema</span>(), std::<span class="built_in">move</span>(args), std::<span class="built_in">move</span>(kwargs));</span><br><span class="line">  &#123;</span><br><span class="line">    AutoNoGIL no_gil_guard;</span><br><span class="line">    method.<span class="built_in">run</span>(stack);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">createPyObjectForStack</span>(std::<span class="built_in">move</span>(stack));</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We can easily tell that it just create a stack from the input parameters, invoke <code>Method::run</code> to consume elements on the stack as input and leave the output of graph on the stack, and finally convert elements on the stack into Python objects.</p>
<p>Now let’s move on to <code>Module</code> and <code>Method</code>. It’s easy to guess from the name that these classes are defined at <code>torch/csrc/jit/script/module.&#123;h,cpp&#125;</code>. Read through these two files, we would see that <code>Module</code> is just a container of things: it just uses ordered dictionary to store methods, parameters and submodules, and provide methods to access or run them.</p>
<p>What <code>Method</code> does is more interesting. One important thing that the designer of <code>Method</code> must worry about is, since methods have access to not only its arguments, but also other class members of the same object, there must be a mechanism for such kind of access. We will see how this is handled very soon. From its constructor, we can see that a method can be created either from the graph and initial class members directly, or from a method creator. The method creator is invoked lazily, i.e. it is not invoked inside the constructor, but wait until someone call <code>ensure_defined</code>. The following member functions of <code>Method</code> defines how an object of <code>Method</code> is run:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">(Stack &amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span>(at::Tensor* tp : member_inputs) &#123;</span><br><span class="line">    stack.<span class="built_in">push_back</span>(*tp);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="built_in">get_executor</span>().<span class="built_in">run</span>(stack);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function">IValue <span class="title">operator</span><span class="params">()</span><span class="params">(std::vector&lt;IValue&gt; stack)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">checkInputsAgainstSchema</span>(stack);</span><br><span class="line">  <span class="built_in">run</span>(stack);</span><br><span class="line">  <span class="keyword">if</span> (stack.<span class="built_in">size</span>() != <span class="number">1</span>) &#123;</span><br><span class="line">    <span class="keyword">return</span> Tuple::<span class="built_in">create</span>(std::<span class="built_in">move</span>(stack));</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">return</span> stack.<span class="built_in">front</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>By looking at the types of names appearing in the above code, we can see that: graph is an object of <code>Graph</code>, and the virtual machine that execute the graph is an object of <code>GraphExecutor</code>. <code>GraphExecutor</code> operate on data type <code>IValue</code>, and its stack is a <code>vector</code> of that data type. To run a method, one need to first push the arguments onto the stack, and invoke <code>Method::run</code>, which will further push other member inputs onto the stack, and invoke <code>GraphExecutor::run</code> to run the graph. The graph executor will leave its output on the stack.</p>
<p>At this point, we still don’t know how things like <code>Graph</code> and <code>GraphExecutor</code> works, but before looking deep into that, let’s pause a little bit to take a look at the backend compiler.</p>
<h1 id="From-Python-AST-to-PyTorch-IR-part-1"><a href="#From-Python-AST-to-PyTorch-IR-part-1" class="headerlink" title="From Python AST to PyTorch IR: part 1"></a>From Python AST to PyTorch IR: part 1</h1><p>Now let’s move on to read <code>_jit_script_compile</code>. To find where it is located, simply run the command <code>grep _jit_script_compile -r .</code>. We will find something like:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">./torch/csrc/jit/script/init.cpp:  m.<span class="built_in">def</span>(<span class="string">&quot;_jit_script_compile&quot;</span>, [](<span class="keyword">const</span> Def &amp;def, ResolutionCallback rcb) &#123;</span><br></pre></td></tr></table></figure>
<p>So, <code>torch/csrc/jit/script/init.cpp</code> would be a good start point. The complete definition of <code>_jit_script_compile</code> is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">m.<span class="built_in">def</span>(<span class="string">&quot;_jit_script_compile&quot;</span>, [](<span class="keyword">const</span> Def &amp;def, ResolutionCallback rcb) &#123;</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">compileFunction</span>(def, <span class="built_in">PythonResolver</span>(rcb));</span><br><span class="line">&#125;);</span><br></pre></td></tr></table></figure>

<p>So, let’s move on to <code>compileFunction</code>. Using grep to search, we would find its definition in <code>torch/csrc/jit/script/compiler.cpp</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">std::shared_ptr&lt;Graph&gt; <span class="title">compileFunction</span><span class="params">(Def def, <span class="keyword">const</span> Resolver&amp; resolver)</span> </span>&#123;</span><br><span class="line">  Module m;</span><br><span class="line">  <span class="built_in">defineMethodsInModule</span>(m, &#123;def&#125;, &#123;resolver&#125;, <span class="literal">nullptr</span>);</span><br><span class="line">  <span class="keyword">return</span> m.<span class="built_in">get_method</span>(def.<span class="built_in">name</span>().<span class="built_in">name</span>()).<span class="built_in">graph</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We see the <code>defineMethodsInModule</code> that we saw before on the definition of Python bindings for <code>Module</code>. Move on to <code>defineMethodsInModule</code>, on the same file:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">defineMethodsInModule</span><span class="params">(Module &amp; m, <span class="keyword">const</span> std::vector&lt;Def&gt;&amp; definitions, <span class="keyword">const</span> std::vector&lt;Resolver&gt;&amp; resolvers, SugaredValuePtr self)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// ......</span></span><br><span class="line">  <span class="keyword">for</span>(Def def : definitions) &#123;</span><br><span class="line">    <span class="comment">// ......</span></span><br><span class="line">    <span class="keyword">auto</span> creator = [def, &amp;table, resolver, self](Method&amp; method) &#123;</span><br><span class="line">      <span class="built_in">to_ir</span>(def, table, resolver, self,  method);</span><br><span class="line">    &#125;;</span><br><span class="line">    Method&amp; method = m.<span class="built_in">create_method</span>(name, creator);</span><br><span class="line">    <span class="comment">// ......</span></span><br><span class="line">  &#125;</span><br><span class="line">  <span class="comment">// ......</span></span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Less important parts of the code is omitted. From above, we can find that the core of compiling an AST into a compute graph is done at <code>to_ir</code>. Skimming through <code>to_ir</code> we find that it is a struct of ~1000 lines of code, with member functions that handles different cases of Python AST. Without knowing PyTorch’s IR, it’s not easy to understand what <code>to_ir</code> does. So let’s pause a little bit to take a look at PyTorch IR and come back later.</p>
<h1 id="The-PyTorch-IR"><a href="#The-PyTorch-IR" class="headerlink" title="The PyTorch IR"></a>The PyTorch IR</h1><p>A good starting point is the class <code>Graph</code>, located at <code>torch/csrc/jit/ir.h</code>. Skimming through this file, as well as <code>to_ir</code>, we keep seeing things like <code>aten::mul</code>, <code>prim::Constant</code>. What are they? They seems to be very relevant, actually they seems to be <strong>the node</strong> in the graph. By doing some <code>grep</code> search, we find a good document of them at <code>torch/csrc/jit/interned_strings.h</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// &#x27;prim&#x27; symbols are synthetic operators that occur only in the IR</span></span><br><span class="line"><span class="comment">// and don&#x27;t have corresponding implementations in ATen.</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// &#x27;onnx&#x27; symbols correspond to ONNX operators.  Their semantics</span></span><br><span class="line"><span class="comment">// are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md</span></span><br><span class="line"><span class="comment">// The particular version we are targeting is specified by &#x27;_onnx_opset_version&#x27;</span></span><br><span class="line"><span class="comment">// in torch.onnx.symbolic</span></span><br><span class="line"><span class="comment">//</span></span><br><span class="line"><span class="comment">// In general, most ONNX operators won&#x27;t get an entry here, because they</span></span><br><span class="line"><span class="comment">// are handled from the Python end.  However, you may occasionally need</span></span><br><span class="line"><span class="comment">// to intern an ONNX symbol here so that you can conveniently write an</span></span><br><span class="line"><span class="comment">// optimization on ONNX operations.</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// &#x27;attr&#x27; symbols are attribute keys.  They are shared between both ONNX and ATen</span></span><br><span class="line"><span class="comment">// operators (you disambiguate their meaning by looking at the operator itself).</span></span><br><span class="line"><span class="comment">// In general, you only need to define attribute keys that are used by</span></span><br><span class="line"><span class="comment">// onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS.</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// Note [Symbol allocation]</span></span><br><span class="line"><span class="comment">// ~~~~~~~~~~~~~~~~~~~~~~~~</span></span><br><span class="line"><span class="comment">//</span></span><br><span class="line"><span class="comment">//  1. Symbol namespace is split up into namespaces.</span></span><br><span class="line"><span class="comment">//</span></span><br><span class="line"><span class="comment">//  2. The intended access pattern for built-in symbols is onnx::MatMul</span></span><br><span class="line"><span class="comment">//  in the torch::jit namespace (this is a Symbol).</span></span><br><span class="line"><span class="comment">//</span></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment">// Built-in constant definition strategy:</span></span><br><span class="line"><span class="comment">// - Enum is the most convenient way to generate a contiguous sequence</span></span><br><span class="line"><span class="comment">//   of numbers for an identifier.</span></span><br><span class="line"><span class="comment">// - However, an enum gives you a fresh type.  We want onnx::MatMul to</span></span><br><span class="line"><span class="comment">//   be type Symbol, not some random enum type!</span></span><br><span class="line"><span class="comment">// - Therefore, after using enums to generate the sequence of integers,</span></span><br><span class="line"><span class="comment">//   we then declare constexpr Symbols to get everything the actual Symbol</span></span><br><span class="line"><span class="comment">//   type we want.  Symbols must be constexpr to be valid to be &quot;case&quot;ed on.</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">using</span> <span class="keyword">unique_t</span> = <span class="keyword">uint32_t</span>;</span><br><span class="line"></span><br><span class="line"><span class="keyword">static</span> <span class="keyword">const</span> std::string domain_prefix = <span class="string">&quot;org.PyTorch.&quot;</span>;</span><br><span class="line"></span><br><span class="line"><span class="comment">// A Symbol is like an interned string, but with a little extra</span></span><br><span class="line"><span class="comment">// structure; it is namespaced via SymbolNamespace and the resulting</span></span><br><span class="line"><span class="comment">// intern pointers support efficient namespace testing.</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">TORCH_API</span> <span class="title">Symbol</span> &#123;</span></span><br><span class="line"><span class="comment">// more code omitted ......</span></span><br></pre></td></tr></table></figure>

<p>This very well explains what those things are: they are instances of <code>Symbol</code> to represent operators. Knowing this level of detail about these things is enough for us, so let’s go back to IR.</p>
<p>The beginning of file <code>torch/csrc/jit/ir.h</code> very well explains what things are:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Graph represents one &quot;function&quot; of computation.</span></span><br><span class="line"><span class="comment">// It uses a simple ownership model where the graph owns all the nodes inside it.</span></span><br><span class="line"><span class="comment">// All references inside the graph are raw pointers.</span></span><br><span class="line"><span class="comment">// Destroying the Graph will invalidate any pointers to nodes in the graph.</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Graph</span>;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// Node is the base class of the IR graph. It represents one computation</span></span><br><span class="line"><span class="comment">// and dependencies on a list of Values. The &quot;prim-ops&quot;, so to speak.</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Node</span>;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// A Value represents an input or output to node that is either a</span></span><br><span class="line"><span class="comment">// Tensor or an opaque Handle object, as determined by type().</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Value</span>;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// ......</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// A list of nodes, with inputs and outputs</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Block</span>;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// Each use is represented by this type, see Node::uses()</span></span><br><span class="line"><span class="comment">// &#x27;user&#x27; is the consumer of the value, offset is the index into</span></span><br><span class="line"><span class="comment">// &#x27;user&#x27;s input this where the produces will be found.</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Use</span> &#123;</span></span><br><span class="line">  <span class="built_in">Use</span>(Node * user, <span class="keyword">size_t</span> offset)</span><br><span class="line">  : <span class="built_in">user</span>(user), <span class="built_in">offset</span>(offset) &#123;&#125;</span><br><span class="line">  Node * user;</span><br><span class="line">  <span class="keyword">size_t</span> offset;</span><br><span class="line">&#125;;</span><br><span class="line"></span><br><span class="line"><span class="comment">// ......</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// Scope is a node of a trie that represents the tree of nested scopes.</span></span><br><span class="line"><span class="comment">// Individual scopes are pushed and popped from Graph, which holds a</span></span><br><span class="line"><span class="comment">// pointer to the current scope. Each Node in Graph holds a pointer</span></span><br><span class="line"><span class="comment">// to the scope that was current when the node was created.</span></span><br><span class="line"><span class="comment">// The trie never needs to shrink, it only grows until it is disposed</span></span><br><span class="line"><span class="comment">// of when Graph is deallocated. Hence, pointers to scopes held by nodes</span></span><br><span class="line"><span class="comment">// will always be valid as long as Graph is alive.</span></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">Scope</span> &#123;</span></span><br></pre></td></tr></table></figure>

<p>Reading through the whole file, we can summarize how it works:</p>
<p>A <code>Graph</code> object owns all <code>Node</code>s, <code>Value</code>s, and <code>Block</code>s. The internal structure is not maintained by the <code>Graph</code> object, but inside <code>Node</code>s, <code>Value</code>s, and <code>Block</code>s.</p>
<p>Each <code>Node</code> keeps pointers to its input, and output <code>Value</code>s. It also maintains pointers to siblings in a doubly-linked list of <code>Node</code>s. This doubly-linked list is a topological sort of the <code>Node</code>s in the <code>Graph</code>. Each <code>Node</code> has a <code>NodeKind</code> as an object of <code>Symbol</code>. <code>Node</code>s also maintains a pointer to the <code>Block</code> owning this <code>Node</code>, as well as pointers to subblocks.</p>
<p>Each <code>Value</code> must be an output of some <code>Node</code>, and it has a <code>Node</code> pointer pointing to the <code>Node</code> that outputs this <code>Value</code>. It also has a <code>Use</code> list storing where this <code>Value</code> is used as input.</p>
<p>Each <code>Block</code> maintains pointers to its input and output <code>Node</code>s, as well as the <code>Node</code> owning this <code>Block</code>.</p>
<h1 id="From-Python-AST-to-PyTorch-IR-part-2"><a href="#From-Python-AST-to-PyTorch-IR-part-2" class="headerlink" title="From Python AST to PyTorch IR: part 2"></a>From Python AST to PyTorch IR: part 2</h1><p>With the knowledge of IR, let’s go back to read the backend compiler.</p>
<p>In the code in <code>torch/csrc/jit/script/compiler.cpp</code>, we have been seeing <code>SugaredValue</code> many times. What <code>SugaredValue</code> does is explained in <code>torch/csrc/jit/script/compiler.h</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// The AST can contain nodes like `self`, `self.b` or `Python_fn` that</span></span><br><span class="line"><span class="comment">// are not first-class values in the graph representation, but instead</span></span><br><span class="line"><span class="comment">// will be desugared based on how they are used in the AST.</span></span><br><span class="line"></span><br><span class="line"><span class="comment">// SugaredValue is used to temporarily represent these values in a way</span></span><br><span class="line"><span class="comment">// that separates their behavior from the AST -&gt; IR converter itself.</span></span><br><span class="line"><span class="comment">// This allows us to keep dependencies on Python minimal.</span></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">SugaredValue</span> :</span> <span class="keyword">public</span> std::enable_shared_from_this&lt;SugaredValue&gt; &#123;</span><br></pre></td></tr></table></figure>

<p>From the comments above, together with what we see when skimming through the code, we can see that, <code>SugaredValue</code> is a super class of different types of values. These values might be first-class values like tensors or integers, or <code>ScriptModule</code> such as <code>self</code>, or Python modules like <code>torch</code>, or some builtin functions like <code>print</code>. Different types of values are handled by different subclasses: <code>SimpleValue</code> for first class values, <code>BuiltinFunction</code> for operators like <code>aten::relu</code>, <code>BuiltinModule</code> for something like <code>torch</code>, <code>NoneValue</code> for <code>None</code>, <code>PrintValue</code> for <code>print</code>, <code>CastValue</code> for types like <code>int</code>, <code>float</code>, etc. These subclasses listed above are all defined in <code>torch/csrc/jit/script/compiler.&#123;cpp, h&#125;</code>.</p>
<p>Now let’s move on to read the constructor of the struct <code>to_ir</code>. It basically:</p>
<ol>
<li>Read the information of parameters from the Python AST, and set them up in graph.</li>
<li>Call <code>emitStatements</code> to emit IR for function body.</li>
<li>Set up output values for the graph based on the return statement in the end of function body (compiling functions that has a return statement on somewhere other than the end is not supported).</li>
</ol>
<p>In step 1, there is a little bit of trouble that for functions that is a method of some module, the first parameter is always the reference to the object owing this method (aka. the so called “self”). So it requires a little bit of special case when checking against schema. Also, we need to add the identifier for the first parameter to the symbol table (here the symbol table is <code>Environment::value_table</code>, an object of <code>ValueTable</code>). The input to the graph is not only those appears explicitly in the argument list, but also those members access inside the function body. Recall that when we read the code of <code>Method::run</code>, there is a step that push members onto the stack. This issue is not handled here, and we will see how it is handled later.</p>
<p>In step 2, things started to get complicated. In <code>emitStatements</code>, code emitting are dispatched to different specialized private methods of the struct by its type:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">emitStatements</span><span class="params">(List&lt;Stmt&gt;::const_iterator begin, List&lt;Stmt&gt;::const_iterator end)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span> (; begin != end; ++begin) &#123;</span><br><span class="line">    <span class="keyword">auto</span> stmt = *begin;</span><br><span class="line">    <span class="built_in"><span class="keyword">switch</span></span> (stmt.<span class="built_in">kind</span>()) &#123;</span><br><span class="line">      <span class="keyword">case</span> TK_IF:</span><br><span class="line">        <span class="built_in">emitIf</span>(<span class="built_in">If</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_WHILE:</span><br><span class="line">        <span class="built_in">emitWhile</span>(<span class="built_in">While</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_FOR:</span><br><span class="line">        <span class="built_in">emitFor</span>(<span class="built_in">For</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_ASSIGN:</span><br><span class="line">        <span class="built_in">emitAssignment</span>(<span class="built_in">Assign</span>(stmt));</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_GLOBAL:</span><br><span class="line">        <span class="keyword">for</span> (<span class="keyword">auto</span> ident : <span class="built_in">Global</span>(stmt).<span class="built_in">names</span>()) &#123;</span><br><span class="line">          <span class="keyword">const</span> <span class="keyword">auto</span>&amp; name = <span class="built_in">Ident</span>(ident).<span class="built_in">name</span>();</span><br><span class="line">          environment_stack-&gt;<span class="built_in">setVar</span>(ident.<span class="built_in">range</span>(), name, graph-&gt;<span class="built_in">addInput</span>(name));</span><br><span class="line">        &#125;</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_EXPR_STMT: &#123;</span><br><span class="line">        <span class="keyword">auto</span> exprs = <span class="built_in">ExprStmt</span>(stmt).<span class="built_in">exprs</span>();</span><br><span class="line">        <span class="keyword">for</span> (<span class="keyword">const</span> <span class="keyword">auto</span>&amp; expr : exprs) &#123;</span><br><span class="line">          <span class="built_in">emitSugaredExpr</span>(expr, <span class="number">0</span>);</span><br><span class="line">        &#125;</span><br><span class="line">      &#125;</span><br><span class="line">      <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> TK_RETURN:</span><br><span class="line">        <span class="keyword">throw</span> <span class="built_in">ErrorReport</span>(stmt) &lt;&lt; <span class="string">&quot;return statements can appear only at the end &quot;</span></span><br><span class="line">                                &lt;&lt; <span class="string">&quot;of the function body&quot;</span>;</span><br><span class="line">        <span class="keyword">break</span>;</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>There are so many specialized emits, I will not go over these in detail one by one. I will only go deep into <code>emitSugaredExpr</code> as an example here. <code>emitSugaredExpr</code> is defined as follows:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// any expression that can produce a SugaredValue is handled here</span></span><br><span class="line"><span class="comment">// expressions that only return a single Value* are handled in emitSimpleExpr</span></span><br><span class="line"><span class="function">std::shared_ptr&lt;SugaredValue&gt; <span class="title">emitSugaredExpr</span><span class="params">(Expr tree, <span class="keyword">size_t</span> n_binders)</span> </span>&#123;</span><br><span class="line">  <span class="built_in"><span class="keyword">switch</span></span>(tree.<span class="built_in">kind</span>()) &#123;</span><br><span class="line">    <span class="keyword">case</span> TK_VAR:</span><br><span class="line">      <span class="keyword">return</span> environment_stack-&gt;<span class="built_in">getSugaredVar</span>(<span class="built_in">Var</span>(tree).<span class="built_in">name</span>());</span><br><span class="line">    <span class="keyword">case</span> <span class="string">&#x27;.&#x27;</span>: &#123;</span><br><span class="line">      <span class="keyword">auto</span> select = <span class="built_in">Select</span>(tree);</span><br><span class="line">      <span class="keyword">auto</span> sv = <span class="built_in">emitSugaredExpr</span>(select.<span class="built_in">value</span>(), <span class="number">1</span>);</span><br><span class="line">      <span class="keyword">return</span> sv-&gt;<span class="built_in">attr</span>(select.<span class="built_in">range</span>(), method, select.<span class="built_in">selector</span>().<span class="built_in">name</span>());</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="keyword">case</span> TK_APPLY: &#123;</span><br><span class="line">      <span class="keyword">auto</span> apply = <span class="built_in">Apply</span>(tree);</span><br><span class="line">      <span class="keyword">auto</span> inputs = <span class="built_in">getNamedValues</span>(apply.<span class="built_in">inputs</span>(), <span class="literal">true</span>);</span><br><span class="line">      <span class="keyword">auto</span> attributes = <span class="built_in">fmap</span>(apply.<span class="built_in">attributes</span>(), [&amp;](<span class="keyword">const</span> Attribute&amp; attr) &#123;</span><br><span class="line">        <span class="keyword">return</span> <span class="built_in">NamedValue</span>(attr.<span class="built_in">range</span>(), attr.<span class="built_in">name</span>().<span class="built_in">name</span>(), <span class="built_in">emitExpr</span>(attr.<span class="built_in">value</span>()));</span><br><span class="line">      &#125;);</span><br><span class="line">      <span class="comment">// the apply is directly an identifier &#x27;foo&#x27;</span></span><br><span class="line">      <span class="keyword">if</span>(apply.<span class="built_in">callee</span>().<span class="built_in">kind</span>() == TK_VAR) &#123;</span><br><span class="line">        <span class="keyword">return</span> <span class="built_in">emitApplyIdent</span>(<span class="built_in">Var</span>(apply.<span class="built_in">callee</span>()).<span class="built_in">name</span>(), inputs, attributes, n_binders);</span><br><span class="line">      &#125;</span><br><span class="line">      <span class="keyword">return</span> <span class="built_in">emitApplyExpr</span>(apply.<span class="built_in">callee</span>(), inputs, attributes, n_binders);</span><br><span class="line">    &#125; <span class="keyword">break</span>;</span><br><span class="line">    <span class="keyword">default</span>:</span><br><span class="line">      <span class="keyword">return</span> std::make_shared&lt;SimpleValue&gt;(<span class="built_in">emitSimpleExpr</span>(tree));</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>What it does is basically: for cases that guaranteed to produce a <code>SimpleValue</code>, we just call <code>emitSimpleExpr</code> to emit the code, otherwise it must be one of the following three format: <code>foo</code>, <code>foo.bar</code>, <code>foo(bar)</code>. For the <code>foo</code> case, we just lookup <code>foo</code> in the symbol table, for the <code>foo.bar</code> case, we first emit <code>foo</code> and lookup its attribute <code>bar</code>. For the <code>foo(bar)</code> case, depending on whether <code>foo</code> is an identifier or an expression, invoke <code>emitApplyIdent</code> or <code>emitApplyExpr</code> correspondingly to do code emitting.</p>
<p>The <code>self</code> argument of the method is handled a bit differently: there is a subclass of <code>SugaredValue</code> called <code>ModuleValue</code> defined in <code>torch/csrc/jit/script/init.cpp</code>, in its override method <code>attr</code>, we see:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">if</span>(NamedParameter* v = <span class="keyword">module</span>-&gt;<span class="built_in">find_parameter</span>(field)) &#123;</span><br><span class="line">  <span class="keyword">return</span> std::make_shared&lt;SimpleValue&gt;(m.<span class="built_in">get_or_add_parameter</span>(v-&gt;<span class="built_in">slot</span>()));</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Where the <code>get_or_add_parameter</code> defined in <code>torch/csrc/jit/script/module.h</code> reads:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">Value * <span class="title">get_or_add_parameter</span><span class="params">(at::Tensor* slot)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> it = member_input_index.<span class="built_in">find</span>(slot);</span><br><span class="line">  <span class="keyword">if</span>(it != member_input_index.<span class="built_in">end</span>()) &#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">graph</span>()-&gt;<span class="built_in">inputs</span>().<span class="built_in">at</span>(it-&gt;second);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="comment">// add it as a new parameter</span></span><br><span class="line">  member_inputs.<span class="built_in">push_back</span>(slot);</span><br><span class="line">  member_input_index[slot] = <span class="built_in">graph</span>()-&gt;<span class="built_in">inputs</span>().<span class="built_in">size</span>();</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">graph</span>()-&gt;<span class="built_in">addInput</span>();</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>That tells us: adding members as parameters of the graph actually happens at code emitting of <code>self.bar</code>, where the <code>attr</code> of <code>ModuleValue</code> called.</p>
<h1 id="The-Graph-Executor"><a href="#The-Graph-Executor" class="headerlink" title="The Graph Executor"></a>The Graph Executor</h1><p>Now we have seen how the compilation is done and what does PyTorch JIT’s IR looks like, the thing left is how the IR are executed. From above we already know that the executor is obtained by invoking <code>Method::get_executor</code> and run by invoking <code>GraphExecutor::run</code>. Let’s first take a look at <code>Method::get_executor</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">GraphExecutor&amp; <span class="title">get_executor</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  std::<span class="built_in">call_once</span>(executor_init, [&amp;]&#123;</span><br><span class="line">    executor = <span class="built_in">GraphExecutor</span>(<span class="built_in">graph</span>(), optimize);</span><br><span class="line">  &#125;);</span><br><span class="line">  <span class="keyword">return</span> executor;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We know that a graph executor is created from a graph, and does optimization if asked. It’s not hard to guess from name that <code>GraphExecutor</code> is defined in <code>torch/csrc/jit/graph_executor.&#123;h, cpp&#125;</code>.</p>
<p>The constructor and <code>run</code> tells us that <code>GraphExecutor</code> is just a wrapper of <code>GraphExecutorImpl</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">GraphExecutor::<span class="built_in">GraphExecutor</span>(std::shared_ptr&lt;Graph&gt; graph, <span class="keyword">bool</span> optimize)</span><br><span class="line">: <span class="built_in">pImpl</span>(<span class="keyword">new</span> <span class="built_in">GraphExecutorImpl</span>(std::<span class="built_in">move</span>(graph), optimize)) &#123;&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">GraphExecutor::run</span><span class="params">(Stack &amp; inputs)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> pImpl-&gt;<span class="built_in">run</span>(inputs);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>So let’s move on to <code>GraphExecutorImpl</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">GraphExecutorImpl</span>(std::shared_ptr&lt;Graph&gt; graph, <span class="keyword">bool</span> optimize)</span><br><span class="line">  : <span class="built_in">graph</span>(<span class="built_in">prepareGraph</span>(graph))</span><br><span class="line">  , <span class="built_in">optimize</span>(optimize)</span><br><span class="line">  , <span class="built_in">num_inputs</span>(<span class="keyword">this</span>-&gt;graph-&gt;<span class="built_in">inputs</span>().<span class="built_in">size</span>())</span><br><span class="line">  , <span class="built_in">num_flat_inputs</span>(<span class="built_in">countFlatInputs</span>(graph))</span><br><span class="line">  , <span class="built_in">num_outputs</span>(<span class="keyword">this</span>-&gt;graph-&gt;<span class="built_in">outputs</span>().<span class="built_in">size</span>()) &#123;&#125;</span><br><span class="line"></span><br><span class="line"><span class="comment">// entry point where execution begins</span></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">(Stack &amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">AT_CHECK</span>(stack.<span class="built_in">size</span>() &gt;= num_inputs, <span class="string">&quot;expected &quot;</span>, num_inputs, <span class="string">&quot; inputs, but got only &quot;</span>, stack.<span class="built_in">size</span>());</span><br><span class="line"></span><br><span class="line">  <span class="keyword">if</span>(tracer::<span class="built_in">isTracing</span>()) &#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">runTraced</span>(stack);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="keyword">auto</span> &amp; execution_plan = optimize ? <span class="built_in">getOrCompile</span>(stack) : <span class="built_in">getOrCompileFallback</span>();</span><br><span class="line">  <span class="keyword">return</span> execution_plan.<span class="built_in">run</span>(stack);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>We see that the graph is compiled at the first time it runs to get an execution plan. The <code>run</code> method of execution plan is called to run the graph. Compilation of graph to execution plan is done by <code>getOrCompile</code> or <code>getOrCompileFallback</code> depending on if optimization is enabled. These two methods are copied below:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">const</span> ExecutionPlan &amp; <span class="title">getOrCompileFallback</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">  <span class="keyword">if</span>(!fallback) &#123;</span><br><span class="line">    <span class="keyword">auto</span> graph_ = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">    <span class="built_in">runRequiredPasses</span>(graph_);</span><br><span class="line">    fallback = <span class="built_in">ExecutionPlan</span>(graph_);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="keyword">return</span> fallback;</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">const</span> ExecutionPlan &amp; <span class="title">getOrCompile</span><span class="params">(<span class="keyword">const</span> Stack&amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// outside lock guard, to minimize the time holding the lock on the fast path</span></span><br><span class="line">  <span class="comment">// ArgumentSpec even computes its hashCode here.</span></span><br><span class="line">  <span class="function">ArgumentSpec <span class="title">spec</span><span class="params">(autograd::GradMode::is_enabled(), last(stack, num_inputs), num_flat_inputs)</span></span>;</span><br><span class="line">  &#123;</span><br><span class="line">    <span class="function">std::lock_guard&lt;std::mutex&gt; <span class="title">lock</span><span class="params">(compile_mutex)</span></span>;</span><br><span class="line">    <span class="keyword">auto</span> it = plan_cache.<span class="built_in">find</span>(spec);</span><br><span class="line">    <span class="keyword">if</span> (it != plan_cache.<span class="built_in">end</span>())</span><br><span class="line">      <span class="keyword">return</span> it-&gt;second;</span><br><span class="line">    <span class="keyword">auto</span> plan = <span class="built_in">compileSpec</span>(spec);</span><br><span class="line">    <span class="keyword">auto</span> r = plan_cache.<span class="built_in">emplace</span>(std::<span class="built_in">move</span>(spec), std::<span class="built_in">move</span>(plan));</span><br><span class="line">    <span class="keyword">return</span> r.first-&gt;second;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>These code explain itself well: if optimization is turned off, then we only run required passes and cache the result. Otherwise, depending on the characteristic of inputs (<code>ArgumentSpec</code>), we run full optimization and cache the generated plan for each different <code>ArgumentSpec</code>. The plan is created by the constructor of <code>ExecutionPlan</code>.</p>
<p>It worth a look at what passes are called:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br></pre></td><td class="code"><pre><span class="line"><span class="function">ExecutionPlan <span class="title">compileSpec</span><span class="params">(<span class="keyword">const</span> ArgumentSpec &amp; spec)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> opt_graph = graph-&gt;<span class="built_in">copy</span>();</span><br><span class="line">  <span class="built_in">setInputTypes</span>(*opt_graph, spec);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 1. Specialize to input definedness (this is very important for</span></span><br><span class="line">  <span class="comment">//          gradient graphs), and run required passes to bring the graph</span></span><br><span class="line">  <span class="comment">//          to an executable form.</span></span><br><span class="line">  <span class="built_in">runRequiredPasses</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 2. Propagate detailed information about the spec through the</span></span><br><span class="line">  <span class="comment">//          graph (enabled more specializations in later passes).</span></span><br><span class="line">  <span class="comment">//          Shape propagation sometimes depends on certain arguments being</span></span><br><span class="line">  <span class="comment">//          constants, and constant propagation doesn&#x27;t need shape information</span></span><br><span class="line">  <span class="comment">//          anyway, so it&#x27;s better to run it first.</span></span><br><span class="line">  <span class="built_in">ConstantPropagation</span>(opt_graph);</span><br><span class="line">  <span class="built_in">PropagateInputShapes</span>(*opt_graph);</span><br><span class="line">  <span class="built_in">PropagateRequiresGrad</span>(opt_graph);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 3. Run differentiable optimizations (i.e. simple graph rewrites that</span></span><br><span class="line">  <span class="comment">//          we can still execute using autograd).</span></span><br><span class="line">  <span class="built_in">runOptimization</span>(opt_graph, spec);</span><br><span class="line"></span><br><span class="line">  <span class="comment">// Phase 4. If this graph will be differentiated, we need to slice out the</span></span><br><span class="line">  <span class="comment">//          symbolically differentiable subgraphs for further optimizations.</span></span><br><span class="line">  <span class="comment">// Phase 5. Apply non-differentiable optimizations to the graphs we&#x27;ve found</span></span><br><span class="line">  <span class="comment">//          (or the whole grpah if we know we won&#x27;t need its derivative).</span></span><br><span class="line">  <span class="keyword">if</span> (<span class="built_in">needsGradient</span>(opt_graph)) &#123;</span><br><span class="line">    <span class="keyword">auto</span> diff_nodes = <span class="built_in">CreateAutodiffSubgraphs</span>(*opt_graph);</span><br><span class="line">    <span class="keyword">for</span> (Node * dnode : diff_nodes) &#123;</span><br><span class="line">      <span class="keyword">auto</span> diff_graph = std::<span class="built_in">move</span>(dnode-&gt;<span class="built_in">g</span>(attr::Subgraph));</span><br><span class="line">      Gradient gradient = <span class="built_in">differentiate</span>(diff_graph);</span><br><span class="line">      <span class="built_in">runNondiffOptimization</span>(gradient.f);</span><br><span class="line">      <span class="built_in">packGradient</span>(gradient, dnode);</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="built_in">InlineAutodiffSubgraphs</span>(opt_graph);</span><br><span class="line">  &#125; <span class="keyword">else</span> &#123;</span><br><span class="line">    <span class="built_in">runNondiffOptimization</span>(opt_graph);</span><br><span class="line">  &#125;</span><br><span class="line">  <span class="comment">// Make sure there are no leftovers from any passes.</span></span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(opt_graph);</span><br><span class="line">  <span class="keyword">return</span> <span class="built_in">ExecutionPlan</span>(opt_graph);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runOptimization</span><span class="params">(std::shared_ptr&lt;Graph&gt;&amp; graph, <span class="keyword">const</span> ArgumentSpec&amp; spec)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(graph);</span><br><span class="line">  <span class="built_in">EliminateCommonSubexpression</span>(graph);</span><br><span class="line">  <span class="built_in">UnrollLoops</span>(graph);</span><br><span class="line">  <span class="built_in">PeepholeOptimize</span>(graph);</span><br><span class="line">  <span class="built_in">CheckInplace</span>(graph);</span><br><span class="line">  <span class="built_in">BatchMM</span>(graph);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runNondiffOptimization</span><span class="params">(std::shared_ptr&lt;Graph&gt;&amp; graph)</span> </span>&#123;</span><br><span class="line">  <span class="built_in">FuseGraph</span>(graph);</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="comment">// ......</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runRequiredPasses</span><span class="params">(<span class="keyword">const</span> std::shared_ptr&lt;Graph&gt;&amp; g)</span>  </span>&#123;</span><br><span class="line">  <span class="built_in">specializeUndef</span>(*g);</span><br><span class="line">  <span class="built_in">LowerGradOf</span>(*g);</span><br><span class="line">  <span class="comment">// implicit inserted expand nodes are not necessarily always valid</span></span><br><span class="line">  <span class="comment">// when used inside script methods that might have unstable shapes</span></span><br><span class="line">  <span class="comment">// we remove the implicitly created ones, and have shape analysis</span></span><br><span class="line">  <span class="comment">// add valid expand nodes when the shapes are stable</span></span><br><span class="line">  <span class="built_in">RemoveExpands</span>(g);</span><br><span class="line">  <span class="built_in">CanonicalizeOps</span>(g);</span><br><span class="line">  <span class="built_in">EliminateDeadCode</span>(g);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>I will not go deep into these passes here, interested readers can read them at <code>torch/csrc/jit/passes/</code>.</p>
<p>Now it’s time to look at <code>ExecutionPlan</code>:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">ExecutionPlan</span> &#123;</span></span><br><span class="line">  <span class="built_in">ExecutionPlan</span>() = <span class="keyword">default</span>;</span><br><span class="line">  <span class="built_in">ExecutionPlan</span>(std::shared_ptr&lt;Graph&gt; graph)</span><br><span class="line">    : <span class="built_in">code</span>(graph)</span><br><span class="line">    , <span class="built_in">graph</span>(std::<span class="built_in">move</span>(graph)) &#123;&#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">void</span> <span class="title">run</span><span class="params">(Stack&amp; stack)</span> <span class="keyword">const</span> </span>&#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">InterpreterState</span>(code).<span class="built_in">runOneStage</span>(stack);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="function"><span class="keyword">operator</span> <span class="title">bool</span><span class="params">()</span> <span class="keyword">const</span> </span>&#123;</span><br><span class="line">    <span class="keyword">return</span> <span class="keyword">static_cast</span>&lt;<span class="keyword">bool</span>&gt;(graph);</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  <span class="function">ExecutionPlanState <span class="title">getDebugState</span><span class="params">()</span> </span>&#123;</span><br><span class="line">    ExecutionPlanState state;</span><br><span class="line">    state.code = &amp;code;</span><br><span class="line">    state.graph = graph.<span class="built_in">get</span>();</span><br><span class="line">    <span class="keyword">return</span> state;</span><br><span class="line">  &#125;</span><br><span class="line"></span><br><span class="line">  Code code;</span><br><span class="line">  std::shared_ptr&lt;Graph&gt; graph;</span><br><span class="line">&#125;;</span><br></pre></td></tr></table></figure>

<p>It just convert the graph into an object of <code>Code</code>, and the running is done by <code>InterpreterState</code>.</p>
<h1 id="Compiling-to-Interpreter-Instructions"><a href="#Compiling-to-Interpreter-Instructions" class="headerlink" title="Compiling to Interpreter Instructions"></a>Compiling to Interpreter Instructions</h1><p><code>Code</code> and <code>InterpreterState</code> are defined in <code>torch/csrc/jit/interpreter.&#123;h,cpp&#125;</code>. These two classes are just a wrapper of its implementations:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line">Code::<span class="built_in">Code</span>(std::shared_ptr&lt;Graph&gt;&amp; graph)</span><br><span class="line">    : <span class="built_in">pImpl</span>(<span class="keyword">new</span> <span class="built_in">CodeImpl</span>(graph)) &#123;&#125;</span><br><span class="line">Code::~<span class="built_in">Code</span>() = <span class="keyword">default</span>;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">const</span> std::vector&lt;GraphExecutor*&gt;&amp; <span class="title">Code::grad_executors</span><span class="params">()</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> pImpl-&gt;<span class="built_in">grad_executors</span>();</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">InterpreterState::<span class="built_in">InterpreterState</span>(<span class="keyword">const</span> Code &amp; code)</span><br><span class="line">  : <span class="built_in">pImpl</span>(<span class="keyword">new</span> <span class="built_in">InterpreterStateImpl</span>(code)) &#123;&#125;</span><br><span class="line">InterpreterState::~<span class="built_in">InterpreterState</span>() = <span class="keyword">default</span>;</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">InterpreterState::runOneStage</span><span class="params">(Stack &amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">return</span> pImpl-&gt;<span class="built_in">runOneStage</span>(stack);</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p><code>CodeImpl</code> is a long struct, but quite logical. A selected list of fields it has is listed below:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">PreprocessGraph preprocess;</span><br><span class="line">std::vector&lt;Instruction&gt; instructions;</span><br></pre></td></tr></table></figure>

<p>Its constructor is:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">CodeImpl</span>(std::shared_ptr&lt;Graph&gt;&amp; graph_)</span><br><span class="line">    : <span class="built_in">preprocess</span>(*graph_) &#123;</span><br><span class="line">  graph = preprocess.graph;</span><br><span class="line">  <span class="comment">// std::cout &lt;&lt; &quot;into code graph:\n&quot; &lt;&lt; *graph &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">  <span class="built_in">insertNodesFromBlock</span>(graph-&gt;<span class="built_in">block</span>());</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Clearly we can see what it does is: 1. preprocess the graph, and then 2. emit instructions for interpreter.</p>
<p>The preprocessing of graph is very well explained in the beginning of file:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// Before we translate to intepreter instructions, we do</span></span><br><span class="line"><span class="comment">// some preprocessing of the graph to turn it into a form that is closer</span></span><br><span class="line"><span class="comment">// to what the instructions will look like.</span></span><br><span class="line"><span class="comment">// In particular we:</span></span><br><span class="line"><span class="comment">// * (TODO) desugar Loop trip counts into c = 0, c += 1 instructions in the loop</span></span><br><span class="line"><span class="comment">// * flatten stages so that each stage starts with a load from the stack</span></span><br><span class="line"><span class="comment">//   and ends with a store to the stack</span></span><br><span class="line"><span class="comment">// *. computes move_flags (see Outputs), and inserts</span></span><br><span class="line"><span class="comment">// *  Drop nodes are inserted for any node that is unused to create a dummy use</span></span><br><span class="line"><span class="comment">//    that will cause the interpreter to free the node.</span></span><br><span class="line"><span class="comment">//    A drop node is just a node with no outputs that just pops its inputs off the stack,</span></span><br><span class="line"><span class="comment">//    to ensure the interpreter release references to nodes that are never used.</span></span><br><span class="line"><span class="comment">//    Drop nodes are also inserted when the last use of a node is in some conditionally</span></span><br><span class="line"><span class="comment">//    run control flow (e.g. one side of an If) and the interpreter must free</span></span><br><span class="line"><span class="comment">//    the node only after the control flow has reconverged</span></span><br><span class="line"><span class="comment">// Outputs are:</span></span><br><span class="line"><span class="comment">// * graph - the post processed copy of g</span></span><br><span class="line"><span class="comment">// * move_flags[n] - a list of booleans, one for each input,</span></span><br><span class="line"><span class="comment">//   indicating whether this is the last use of the value. The interpreter</span></span><br><span class="line"><span class="comment">//   should generate a move rather than a copy in this case.</span></span><br><span class="line"><span class="comment">// * stage_input_types: the type annotations on the inputs to each stage</span></span><br><span class="line"><span class="comment">//   these can be removed once the the backward tracer is no longer used</span></span><br></pre></td></tr></table></figure>

<p>as well as in its definition</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">struct</span> <span class="title">PreprocessGraph</span> &#123;</span></span><br><span class="line">  <span class="built_in">PreprocessGraph</span>(Graph &amp; g)</span><br><span class="line">  : <span class="built_in">graph</span>(g.<span class="built_in">copy</span>()) &#123;</span><br><span class="line">    <span class="built_in">desugarTripCounts</span>(graph-&gt;<span class="built_in">block</span>());</span><br><span class="line">    stage_input_types = <span class="built_in">flattenStages</span>(*graph);</span><br><span class="line">    <span class="built_in">dropUnused</span>(graph-&gt;<span class="built_in">block</span>());</span><br><span class="line">    <span class="comment">// fill in move_flags by scanning blocks;</span></span><br><span class="line">    move_flags = <span class="built_in">findLastUses</span>(*graph);</span><br><span class="line">    <span class="comment">//<span class="doctag">TODO:</span> desugar Loop trip counts, for now we drop trip counts</span></span><br><span class="line">  &#125;</span><br><span class="line">  <span class="comment">// Outputs of the preprocessing:</span></span><br><span class="line">  std::shared_ptr&lt;Graph&gt; graph;</span><br><span class="line">  <span class="comment">// for each input, should we move rather than copy the inputs</span></span><br><span class="line">  std::unordered_map&lt;Node*, std::vector&lt;<span class="keyword">uint8_t</span>&gt;&gt; move_flags;</span><br><span class="line">  std::vector&lt;std::vector&lt;TypePtr&gt;&gt; stage_input_types;</span><br><span class="line"></span><br><span class="line">&#125;;</span><br></pre></td></tr></table></figure>

<p>The <code>insertNodesFromBlock</code> emits instructions. It is also very self-explained:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">insertNodesFromBlock</span><span class="params">(Block* block)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">for</span>(<span class="keyword">auto</span> node : block-&gt;<span class="built_in">nodes</span>()) &#123;</span><br><span class="line">    <span class="keyword">const</span> <span class="keyword">auto</span> &amp; source_location = node-&gt;<span class="built_in">getSourceLocation</span>();</span><br><span class="line">    <span class="built_in"><span class="keyword">switch</span></span>(node-&gt;<span class="built_in">kind</span>()) &#123;</span><br><span class="line">      <span class="keyword">case</span> prim::If: &#123;</span><br><span class="line">        <span class="comment">// x = if c:</span></span><br><span class="line">        <span class="comment">//   &lt;then_block&gt;</span></span><br><span class="line">        <span class="comment">//   -&gt; (vt)</span></span><br><span class="line">        <span class="comment">// else:</span></span><br><span class="line">        <span class="comment">//    &lt;else_block&gt;</span></span><br><span class="line">        <span class="comment">//   -&gt; (vf)</span></span><br><span class="line"></span><br><span class="line">        <span class="comment">// turns into:</span></span><br><span class="line">        <span class="comment">//   JumpNZ c, then</span></span><br><span class="line">        <span class="comment">//   &lt;else_block&gt;</span></span><br><span class="line">        <span class="comment">//   x = vf</span></span><br><span class="line">        <span class="comment">//   Jump end</span></span><br><span class="line">        <span class="comment">// then:</span></span><br><span class="line">        <span class="comment">//   &lt;then_block&gt;</span></span><br><span class="line">        <span class="comment">//   x = vt</span></span><br><span class="line">        <span class="comment">// end:</span></span><br><span class="line"></span><br><span class="line">        <span class="comment">// prim::Placeholder instructions are replaced with branch instructions</span></span><br><span class="line">        <span class="comment">// when the branch target locations are known</span></span><br><span class="line">        <span class="keyword">auto</span> cond_branch = <span class="built_in">insertInstruction</span>(prim::Placeholder, source_location, node-&gt;<span class="built_in">inputs</span>(), <span class="built_in">moveFlags</span>(node), &#123;&#125;);</span><br><span class="line">        <span class="keyword">auto</span> then_block = node-&gt;<span class="built_in">blocks</span>()[<span class="number">0</span>];</span><br><span class="line">        <span class="keyword">auto</span> else_block = node-&gt;<span class="built_in">blocks</span>()[<span class="number">1</span>];</span><br><span class="line">        <span class="built_in">insertNodesFromBlock</span>(else_block);</span><br><span class="line">        <span class="built_in">insertAssign</span>(source_location,else_block-&gt;<span class="built_in">outputs</span>(), <span class="built_in">moveFlags</span>(else_block), node-&gt;<span class="built_in">outputs</span>());</span><br><span class="line">        <span class="keyword">auto</span> jump = <span class="built_in">insertInstruction</span>(prim::Placeholder, source_location, &#123;&#125;, &#123;&#125;, &#123;&#125;);</span><br><span class="line">        <span class="keyword">auto</span> then_block_start = instructions.<span class="built_in">size</span>();</span><br><span class="line">        <span class="built_in">insertNodesFromBlock</span>(then_block);</span><br><span class="line">        <span class="built_in">insertAssign</span>(source_location, then_block-&gt;<span class="built_in">outputs</span>(), <span class="built_in">moveFlags</span>(then_block), node-&gt;<span class="built_in">outputs</span>());</span><br><span class="line">        <span class="built_in">createJump</span>(jump, instructions.<span class="built_in">size</span>());</span><br><span class="line">        <span class="built_in">createJumpNZ</span>(cond_branch, then_block_start);</span><br><span class="line">      &#125; <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">case</span> prim::Loop: &#123;</span><br><span class="line">        <span class="comment">// omitted ......</span></span><br><span class="line">      &#125; <span class="keyword">break</span>;</span><br><span class="line">      <span class="keyword">default</span>: &#123;</span><br><span class="line">        <span class="built_in">insertInstruction</span>(node);</span><br><span class="line">      &#125; <span class="keyword">break</span>;</span><br><span class="line">    &#125;</span><br><span class="line">    <span class="comment">// each stage ends with a load instruction</span></span><br><span class="line">    <span class="comment">// we record where these instructions occur, and use them to</span></span><br><span class="line">    <span class="comment">// exit the interpreter</span></span><br><span class="line">    <span class="keyword">if</span>(node-&gt;<span class="built_in">kind</span>() == prim::Load) &#123;</span><br><span class="line">      stage_end.<span class="built_in">push_back</span>(instructions.<span class="built_in">size</span>());</span><br><span class="line">    &#125;</span><br><span class="line">  &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Since the nodes are topologically sorted, we just need to iterate the linked list and generate code for each node.</p>
<h1 id="The-Virtual-Machine"><a href="#The-Virtual-Machine" class="headerlink" title="The Virtual Machine"></a>The Virtual Machine</h1><p><code>InterpreterStateImpl</code> is the virtual machine that executes instructions.</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">InterpreterStateImpl</span>(<span class="keyword">const</span> Code &amp; code)</span><br><span class="line">: <span class="built_in">function</span>(code.pImpl),</span><br><span class="line">  <span class="built_in">int_data</span>(function-&gt;int_data.<span class="built_in">data</span>()),</span><br><span class="line">  <span class="built_in">bool_data</span>(function-&gt;bool_data),</span><br><span class="line">  <span class="built_in">registers</span>(function-&gt;register_size) &#123;</span><br><span class="line">&#125;</span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">runOneStage</span><span class="params">(Stack &amp; stack)</span> </span>&#123;</span><br><span class="line">  <span class="comment">// std::cout &lt;&lt; &quot;running stage: &quot; &lt;&lt; current_stage &lt;&lt; &quot; of &quot; &lt;&lt; function-&gt;stage_end.size() &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">  <span class="comment">// std::cout &lt;&lt; *function-&gt;graph &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">  <span class="comment">// function-&gt;dump(std::cout);</span></span><br><span class="line">  <span class="keyword">size_t</span> pc = current_pc;</span><br><span class="line">  <span class="keyword">size_t</span> last = function-&gt;stage_end[current_stage];</span><br><span class="line">  <span class="keyword">auto</span> &amp; instructions = function-&gt;instructions;</span><br><span class="line">  <span class="keyword">while</span>(pc &lt; last) &#123;</span><br><span class="line">      <span class="comment">// std::cout &lt;&lt; &quot;executing &quot; &lt;&lt; pc &lt;&lt; &quot;: &quot;;</span></span><br><span class="line">      <span class="comment">// function-&gt;dumpInstruction(std::cout, pc);</span></span><br><span class="line">      <span class="comment">// std::cout &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">      <span class="keyword">try</span> &#123;</span><br><span class="line">        <span class="keyword">auto</span> &amp; inst = instructions[pc];</span><br><span class="line">        <span class="built_in">loadTensorsFromRegisters</span>(inst.inputs, stack);</span><br><span class="line">        <span class="keyword">size_t</span> new_pc = pc + <span class="number">1</span> + inst.<span class="built_in">callback</span>(stack);</span><br><span class="line">        <span class="keyword">for</span>(<span class="keyword">int</span> i = inst.outputs.size - <span class="number">1</span>; i &gt;= <span class="number">0</span>; i--) &#123;</span><br><span class="line">          <span class="keyword">int</span> reg = <span class="built_in">get</span>(inst.outputs,i);</span><br><span class="line">          registers[reg] = <span class="built_in">pop</span>(stack);</span><br><span class="line">          <span class="comment">// std::cout &lt;&lt; &quot;pop reg[&quot; &lt;&lt; reg &lt;&lt; &quot;];\n&quot; &lt;&lt; registers[reg].pImpl &lt;&lt; &quot;\n&quot;;</span></span><br><span class="line">        &#125;</span><br><span class="line">        pc = new_pc;</span><br><span class="line">      &#125; <span class="built_in"><span class="keyword">catch</span></span>(std::exception &amp; e) &#123;</span><br><span class="line">        <span class="keyword">if</span>(!instructions[pc].debug_location)</span><br><span class="line">          <span class="keyword">throw</span>; <span class="comment">// rethrow original exception</span></span><br><span class="line">        <span class="comment">// throw a new exception with enhanced debugging information</span></span><br><span class="line">        instructions[pc].debug_location-&gt;<span class="built_in">wrapAndRethrowException</span>(e, <span class="string">&quot;operation failed in interpreter&quot;</span>);</span><br><span class="line">      &#125;</span><br><span class="line">  &#125;</span><br><span class="line">  current_pc = pc;</span><br><span class="line">  current_stage++;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>There is nothing special, just mimicking the behavior of processors. We can easily tell from the above code that the actions is defined at <code>Instruction::callback</code> and branching is implemented as returning a non-zero value from that callback function. Some of the callbacks are defined inside <code>CodeImpl</code>, such as:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// jump when input is not 0</span></span><br><span class="line"><span class="function"><span class="keyword">void</span> <span class="title">createJumpNZ</span><span class="params">(<span class="keyword">int</span> from_inst, <span class="keyword">int</span> to_inst)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> &amp; inst = instructions[from_inst];</span><br><span class="line">  <span class="built_in">JIT_ASSERT</span>(inst.debug_name == prim::Placeholder);</span><br><span class="line">  <span class="keyword">auto</span> offset = <span class="built_in">relativeJump</span>(from_inst, to_inst);</span><br><span class="line">  inst.callback = [offset](Stack &amp; stack) &#123;</span><br><span class="line">    <span class="keyword">auto</span> t = <span class="built_in">pop</span>(stack).<span class="built_in">toInt</span>();</span><br><span class="line">    <span class="keyword">return</span> (t != <span class="number">0</span>) ? offset : <span class="number">0</span>;</span><br><span class="line">  &#125;;</span><br><span class="line">  inst.debug_name = prim::JumpNZ;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>while others are defined by its node kind:</p>
<figure class="highlight c++"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">size_t</span> <span class="title">insertInstruction</span><span class="params">(Node * n)</span> </span>&#123;</span><br><span class="line">  <span class="keyword">auto</span> inst = <span class="built_in">insertInstruction</span>(n-&gt;<span class="built_in">kind</span>(), n-&gt;<span class="built_in">getSourceLocation</span>(), n-&gt;<span class="built_in">inputs</span>(), <span class="built_in">moveFlags</span>(n) , n-&gt;<span class="built_in">outputs</span>());</span><br><span class="line">  instructions[inst].callback = <span class="built_in">getOperation</span>(n);</span><br><span class="line">  <span class="keyword">return</span> inst;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>where <code>getOperation</code> is defined in <code>torch/csrc/jit/operator.&#123;h, cpp&#125;</code>. Further reading through these two files, we can see that operations are registered by calling <code>registerOperator</code>, which is done through calling <code>RegisterOperators</code>. Using <code>grep RegisterOperators -r torch/csrc/</code>, we can locate the definition of all operations:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;generated&#x2F;register_aten_ops.cpp:RegisterOperators reg(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;fusers&#x2F;common&#x2F;fusion_handle_impl.cpp:RegisterOperators reg_fused_operators(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:&#x2F;&#x2F;&#x2F; so in the global scope when a &#96;RegisterOperators&#96; object is assigned to a</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:struct TORCH_API RegisterOperators &#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:  RegisterOperators() &#x3D; default;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:  RegisterOperators(std::vector&lt;Operator&gt; operators) &#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:  RegisterOperators(const std::string&amp; name, Implementation&amp;&amp; implementation) &#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;custom_operator.h:  RegisterOperators&amp; op(</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;Python_interpreter.cpp:RegisterOperators reg(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;register_special_ops.cpp:RegisterOperators reg(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;graph_executor.cpp:RegisterOperators reg_graph_executor_ops(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;constants.cpp:RegisterOperators reg(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;register_prim_ops.cpp:RegisterOperators reg(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;register_prim_ops.cpp:RegisterOperators reg2(&#123;</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;test_jit.cpp:    RegisterOperators reg(&#123;createOperator(</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;test_jit.cpp:    RegisterOperators reg(&#123;createOperator(</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;test_jit.cpp:    RegisterOperators reg(&#123;createOperator(</span><br><span class="line">torch&#x2F;csrc&#x2F;jit&#x2F;test_jit.cpp:    RegisterOperators reg(</span><br></pre></td></tr></table></figure>

<p>At this point, we are done with getting the whole big picture of PyTorch’s JIT. It’s time to stop here, and interested readers can read the code by themselves for more details.</p>

    </div>

    
    
    
        

  <div class="followme">
    <p>Welcome to my other publishing channels</p>

    <div class="social-list">

        <div class="social-item">
          <a target="_blank" class="social-link" href="https://twitter.com/gaoxiang_ai">
            <span class="icon">
              <i class="fab fa-twitter"></i>
            </span>

            <span class="label">Twitter</span>
          </a>
        </div>

        <div class="social-item">
          <a target="_blank" class="social-link" href="/atom.xml">
            <span class="icon">
              <i class="fa fa-rss"></i>
            </span>

            <span class="label">RSS</span>
          </a>
        </div>
    </div>
  </div>


      <footer class="post-footer">
          <div class="post-tags">
              <a href="/tags/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" rel="tag"># 机器学习</a>
              <a href="/tags/PyTorch/" rel="tag"># PyTorch</a>
              <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/" rel="tag"># 深度学习</a>
          </div>

        


        
    <div class="post-nav">
      <div class="post-nav-item">
    <a href="/2018/06/11/%E4%BB%8E%E5%A4%B4%E5%BC%80%E5%A7%8B%E9%98%85%E8%AF%BBPyTorch%E4%BB%A3%E7%A0%81%20--%20Operators%E7%AF%87/" rel="prev" title="从头开始阅读PyTorch代码 -- Operators篇">
      <i class="fa fa-chevron-left"></i> 从头开始阅读PyTorch代码 -- Operators篇
    </a></div>
      <div class="post-nav-item">
    <a href="/2018/11/09/GRE%E3%80%81Wireguard%E3%80%81%E7%BD%91%E6%A1%A5%E4%B8%8EIPv6/" rel="next" title="GRE、Wireguard、网桥与IPv6">
      GRE、Wireguard、网桥与IPv6 <i class="fa fa-chevron-right"></i>
    </a></div>
    </div>
      </footer>
    
  </article>
  
  
  



          </div>
          
    
  <div class="comments">
    <div id="disqus_thread">
      <noscript>Please enable JavaScript to view the comments powered by Disqus.</noscript>
    </div>
  </div>
  

<script>
  window.addEventListener('tabs:register', () => {
    let { activeClass } = CONFIG.comments;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      let commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>

        </div>
          
  
  <div class="toggle sidebar-toggle">
    <span class="toggle-line toggle-line-first"></span>
    <span class="toggle-line toggle-line-middle"></span>
    <span class="toggle-line toggle-line-last"></span>
  </div>

  <aside class="sidebar">
    <div class="sidebar-inner">

      <ul class="sidebar-nav motion-element">
        <li class="sidebar-nav-toc">
          Table of Contents
        </li>
        <li class="sidebar-nav-overview">
          Overview
        </li>
      </ul>

      <!--noindex-->
      <div class="post-toc-wrap sidebar-panel">
          <div class="post-toc motion-element"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#Starting-point-script-and-script-method"><span class="nav-number">1.</span> <span class="nav-text">Starting point: script and script_method</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#Scripting-a-function"><span class="nav-number">1.1.</span> <span class="nav-text">Scripting a function</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Scripting-a-module"><span class="nav-number">1.2.</span> <span class="nav-text">Scripting a module</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-frontend"><span class="nav-number">2.</span> <span class="nav-text">The frontend</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#ScriptModule-and-ScriptMethod"><span class="nav-number">3.</span> <span class="nav-text">ScriptModule and ScriptMethod</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#From-Python-AST-to-PyTorch-IR-part-1"><span class="nav-number">4.</span> <span class="nav-text">From Python AST to PyTorch IR: part 1</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-PyTorch-IR"><span class="nav-number">5.</span> <span class="nav-text">The PyTorch IR</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#From-Python-AST-to-PyTorch-IR-part-2"><span class="nav-number">6.</span> <span class="nav-text">From Python AST to PyTorch IR: part 2</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Graph-Executor"><span class="nav-number">7.</span> <span class="nav-text">The Graph Executor</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Compiling-to-Interpreter-Instructions"><span class="nav-number">8.</span> <span class="nav-text">Compiling to Interpreter Instructions</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Virtual-Machine"><span class="nav-number">9.</span> <span class="nav-text">The Virtual Machine</span></a></li></ol></div>
      </div>
      <!--/noindex-->

      <div class="site-overview-wrap sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
  <p class="site-author-name" itemprop="name">zasdfgbnm</p>
  <div class="site-description" itemprop="description"></div>
</div>
<div class="site-state-wrap motion-element">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/archives/">
        
          <span class="site-state-item-count">11</span>
          <span class="site-state-item-name">posts</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/categories/">
          
        <span class="site-state-item-count">5</span>
        <span class="site-state-item-name">categories</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/tags/">
          
        <span class="site-state-item-count">56</span>
        <span class="site-state-item-name">tags</span></a>
      </div>
  </nav>
</div>
  <div class="links-of-author motion-element">
      <span class="links-of-author-item">
        <a href="https://github.com/zasdfgbnm" title="GitHub → https:&#x2F;&#x2F;github.com&#x2F;zasdfgbnm" rel="noopener" target="_blank"><i class="fab fa-github fa-fw"></i>GitHub</a>
      </span>
      <span class="links-of-author-item">
        <a href="https://twitter.com/gaoxiang_ai" title="Twitter → https:&#x2F;&#x2F;twitter.com&#x2F;gaoxiang_ai" rel="noopener" target="_blank"><i class="fab fa-twitter fa-fw"></i>Twitter</a>
      </span>
  </div>



      </div>

    </div>
  </aside>
  <div id="sidebar-dimmer"></div>


      </div>
    </main>

    <footer class="footer">
      <div class="footer-inner">
        

        

<div class="copyright">
  
  &copy; 
  <span itemprop="copyrightYear">2021</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">zasdfgbnm</span>
</div>
  <div class="powered-by">Powered by <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> & <a href="https://muse.theme-next.org/" class="theme-link" rel="noopener" target="_blank">NexT.Muse</a>
  </div>

        








      </div>
    </footer>
  </div>

  
  <script src="/lib/anime.min.js"></script>
  <script src="/lib/velocity/velocity.min.js"></script>
  <script src="/lib/velocity/velocity.ui.min.js"></script>

<script src="/js/utils.js"></script>

<script src="/js/motion.js"></script>


<script src="/js/schemes/muse.js"></script>


<script src="/js/next-boot.js"></script>




  
  <script>
    (function(){
      var canonicalURL, curProtocol;
      //Get the <link> tag
      var x=document.getElementsByTagName("link");
		//Find the last canonical URL
		if(x.length > 0){
			for (i=0;i<x.length;i++){
				if(x[i].rel.toLowerCase() == 'canonical' && x[i].href){
					canonicalURL=x[i].href;
				}
			}
		}
    //Get protocol
	    if (!canonicalURL){
	    	curProtocol = window.location.protocol.split(':')[0];
	    }
	    else{
	    	curProtocol = canonicalURL.split(':')[0];
	    }
      //Get current URL if the canonical URL does not exist
	    if (!canonicalURL) canonicalURL = window.location.href;
	    //Assign script content. Replace current URL with the canonical URL
      !function(){var e=/([http|https]:\/\/[a-zA-Z0-9\_\.]+\.baidu\.com)/gi,r=canonicalURL,t=document.referrer;if(!e.test(r)){var n=(String(curProtocol).toLowerCase() === 'https')?"https://sp0.baidu.com/9_Q4simg2RQJ8t7jm9iCKT-xh_/s.gif":"//api.share.baidu.com/s.gif";t?(n+="?r="+encodeURIComponent(document.referrer),r&&(n+="&l="+r)):r&&(n+="?l="+r);var i=new Image;i.src=n}}(window);})();
  </script>















  

  

  

<script>
  function loadCount() {
    var d = document, s = d.createElement('script');
    s.src = 'https://zasdfgbnm-github-io.disqus.com/count.js';
    s.id = 'dsq-count-scr';
    (d.head || d.body).appendChild(s);
  }
  // defer loading until the whole page loading is completed
  window.addEventListener('load', loadCount, false);
</script>
<script>
  var disqus_config = function() {
    this.page.url = "https://zasdfgbnm.github.io/2018/09/20/PyTorch-JIT-Source-Code-Read-Note/";
    this.page.identifier = "2018/09/20/PyTorch-JIT-Source-Code-Read-Note/";
    this.page.title = "PyTorch JIT Source Code Read Note";
    };
  NexT.utils.loadComments(document.querySelector('#disqus_thread'), () => {
    if (window.DISQUS) {
      DISQUS.reset({
        reload: true,
        config: disqus_config
      });
    } else {
      var d = document, s = d.createElement('script');
      s.src = 'https://zasdfgbnm-github-io.disqus.com/embed.js';
      s.setAttribute('data-timestamp', '' + +new Date());
      (d.head || d.body).appendChild(s);
    }
  });
</script>

</body>
</html>
