<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 4.2.0">
  <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/images/logo.svg" color="#222">

<link rel="stylesheet" href="/css/main.css">


<link rel="stylesheet" href="/lib/font-awesome/css/font-awesome.min.css">


<script id="hexo-configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    hostname: new URL('http://yoursite.com').hostname,
    root: '/',
    scheme: 'Pisces',
    version: '7.6.0',
    exturl: false,
    sidebar: {"position":"left","display":"post","padding":18,"offset":12,"onmobile":false},
    copycode: {"enable":false,"show_result":false,"style":null},
    back2top: {"enable":true,"sidebar":false,"scrollpercent":false},
    bookmark: {"enable":false,"color":"#222","save":"auto"},
    fancybox: false,
    mediumzoom: false,
    lazyload: false,
    pangu: false,
    comments: {"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},
    algolia: {
      appID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    },
    localsearch: {"enable":false,"trigger":"auto","top_n_per_article":1,"unescape":false,"preload":false},
    path: '',
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}}
  };
</script>

  <meta name="description" content="Overview　　Guided face restortion network(GFRNet).包含两个网络，WarpNet和RecNet.WarpNet预测面部流场使得Guide face和degraded face有相同的姿势（对齐操作）。输入degrade face和warped guide face到RecNet中重建出高质量face.WarpNet中的loss是landmark los">
<meta property="og:type" content="article">
<meta property="og:title" content="GFRNet">
<meta property="og:url" content="http://yoursite.com/2019/12/27/GFRNet/index.html">
<meta property="og:site_name" content="SRCNN">
<meta property="og:description" content="Overview　　Guided face restortion network(GFRNet).包含两个网络，WarpNet和RecNet.WarpNet预测面部流场使得Guide face和degraded face有相同的姿势（对齐操作）。输入degrade face和warped guide face到RecNet中重建出高质量face.WarpNet中的loss是landmark los">
<meta property="og:locale" content="en_US">
<meta property="og:image" content="http://yoursite.com/images/GFRNet_all.jpg">
<meta property="og:image" content="http://yoursite.com/images/GFRNet_warp.jpg">
<meta property="og:image" content="http://yoursite.com/images/GFRNet_Rec.jpg">
<meta property="og:image" content="http://yoursite.com/images/equation.jpg">
<meta property="article:published_time" content="2019-12-27T01:49:26.000Z">
<meta property="article:modified_time" content="2019-12-30T10:25:48.811Z">
<meta property="article:author" content="Z.J. Jiang">
<meta property="article:tag" content="SISR">
<meta property="article:tag" content=" FH">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="http://yoursite.com/images/GFRNet_all.jpg">

<link rel="canonical" href="http://yoursite.com/2019/12/27/GFRNet/">


<script id="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome: false,
    isPost: true
  };
</script>

  <title>GFRNet | SRCNN</title>
  






  <noscript>
  <style>
  .use-motion .brand,
  .use-motion .menu-item,
  .sidebar-inner,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line-before i { left: initial; }
  .use-motion .logo-line-after i { right: initial; }
  </style>
</noscript>

</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container use-motion">
    <div class="headband"></div>

    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-meta">

    <div>
      <a href="/" class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">SRCNN</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
        <p class="site-subtitle">SISR-FH</p>
  </div>

  <div class="site-nav-toggle">
    <div class="toggle" aria-label="Toggle navigation bar">
      <span class="toggle-line toggle-line-first"></span>
      <span class="toggle-line toggle-line-middle"></span>
      <span class="toggle-line toggle-line-last"></span>
    </div>
  </div>
</div>


<nav class="site-nav">
  
  <ul id="menu" class="menu">
        <li class="menu-item menu-item-home">

    <a href="/" rel="section"><i class="fa fa-fw fa-home"></i>Home</a>

  </li>
        <li class="menu-item menu-item-tags">

    <a href="/tags/" rel="section"><i class="fa fa-fw fa-tags"></i>Tags</a>

  </li>
        <li class="menu-item menu-item-categories">

    <a href="/categories/" rel="section"><i class="fa fa-fw fa-th"></i>Categories</a>

  </li>
        <li class="menu-item menu-item-archives">

    <a href="/archives/" rel="section"><i class="fa fa-fw fa-archive"></i>Archives</a>

  </li>
  </ul>

</nav>
</div>
    </header>

    
  <div class="back-to-top">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>


    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          

          <div class="content">
            

  <div class="posts-expand">
      
  
  
  <article itemscope itemtype="http://schema.org/Article" class="post-block " lang="en">
    <link itemprop="mainEntityOfPage" href="http://yoursite.com/2019/12/27/GFRNet/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/images/avatar.gif">
      <meta itemprop="name" content="Z.J. Jiang">
      <meta itemprop="description" content="about the single image super-resolution and face hallucination">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="SRCNN">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          GFRNet
        </h1>

        <div class="post-meta">
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              <span class="post-meta-item-text">Posted on</span>

              <time title="Created: 2019-12-27 09:49:26" itemprop="dateCreated datePublished" datetime="2019-12-27T09:49:26+08:00">2019-12-27</time>
            </span>
              <span class="post-meta-item">
                <span class="post-meta-item-icon">
                  <i class="fa fa-calendar-check-o"></i>
                </span>
                <span class="post-meta-item-text">Edited on</span>
                <time title="Modified: 2019-12-30 18:25:48" itemprop="dateModified" datetime="2019-12-30T18:25:48+08:00">2019-12-30</time>
              </span>
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              <span class="post-meta-item-text">In</span>
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/categories/FH/" itemprop="url" rel="index">
                    <span itemprop="name">FH</span>
                  </a>
                </span>
            </span>

          

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">

      
        <h1 id="Overview"><a href="#Overview" class="headerlink" title="Overview"></a>Overview</h1><p>　　Guided face restortion network(GFRNet).包含两个网络，WarpNet和RecNet.WarpNet预测面部流场使得Guide face和degraded face有相同的姿势（对齐操作）。输入degrade face和warped guide face到RecNet中重建出高质量face.WarpNet中的loss是landmark loss和TV loss。退化模型进行了模糊、加噪、下采样、JPEG压缩操作。<br><a id="more"></a></p>
<h1 id="Model"><a href="#Model" class="headerlink" title="Model"></a>Model</h1><img src="/images/GFRNet_all.jpg" class="[GFRNet模型示意图]" title="[14] [6] " alt="title text">
<p>　　给定$I^{d}, I^{g}$， 大小为$256 \times 256$, 输出$\hat I$。整个模型函数关系可以表示为:</p>
<script type="math/tex; mode=display">\hat I = F(I^d, I^g)</script><h2 id="WarpNet"><a href="#WarpNet" class="headerlink" title="WarpNet"></a>WarpNet</h2><img src="/images/GFRNet_warp.jpg" class="[WarpNet模型示意图]" title="[14] [6] " alt="title text">
<ul>
<li>input encoder 从$I^d, I^g$提取特征，8个卷积层，kernel_size=4, stride=2.</li>
<li>flow decoder 预测密集流场（dense flow field）。使$I^g$完成姿势调整、对齐操作。<br>　　除了第一层和最后一层，每一层都是conv-BN-ReLU结构。整个WarpNet的作用用函数可以表示为:<script type="math/tex; mode=display">\Phi = F_{w}(I^d, I^g; \Theta_{w})</script>$\Theta_{w}$表示的是WarpNet的模型参数。</li>
</ul>
<h2 id="RecNet"><a href="#RecNet" class="headerlink" title="RecNet"></a>RecNet</h2><img src="/images/GFRNet_Rec.jpg" class="[RecNet模型示意图]" title="[14] [6] " alt="title text">
<p>　　对于RecNet来说，输入的$I^d, I^g$和输出的$\hat I$有相同的姿势。RecNet的encoder和decoder和WarpNet的结构是一样的。RecNet特殊的跳跃连接使得重建图片有丰富的细节。整个RecNet的作用用函数可以表示为:</p>
<script type="math/tex; mode=display">\hat I = F_{r}(I^d, I^w; \Theta_{r})</script><p>$\Theta_{r}$是RecNet模型参数。</p>
<h2 id="Degradation-Model-and-Synthetic-Training-Data"><a href="#Degradation-Model-and-Synthetic-Training-Data" class="headerlink" title="Degradation Model and Synthetic Training Data"></a>Degradation Model and Synthetic Training Data</h2><p>　　采用退化模型生成降质的图片$I^{d,s}$<br><img src="/images/equation.jpg" class="[equation]" title="[14] [3] " alt="title text"><br><!-- $$I^{d,s} = ((I + K_{\varrho})\downarrow_{s} + n_{\sigma})_{JPEG}$$ --><br>$k<em>{\varrho}$ 是标准差为 $\varrho$ 的高斯卷积核。 $\downarrow</em>{s}$ 下采样操作，s是缩放因子。$n<em>{\sigma}$是添加的噪声级别为$\sigma$的高斯白噪声。JPEG是质量因子为q的JPEG压缩操作。由于下采样改变了$I^{d,s}$的大小，导致与$I^{g}$的大小不一致所以还需要双立方差值进行上采样。$I^d = (I^{d,s}\uparrow</em>{s})$, 将$I^d, I^g$作为GFRNet的输入。</p>
<ul>
<li>Blur kernel: set $\varrho \in { 0,1:0.1:3 }$</li>
<li>Downsampler: set $s \in {1:0.1:8}$</li>
<li>Noise: set $\sigma \in {0:1:7}$</li>
<li>JPEG compression: set $q \in {0,10:1:40}$</li>
</ul>
<p>　　此外采用了人脸对齐方法(TCDCN)提取landmark。$I<em>i$的landmark为${(x_j^{I_i},y_j^{I_i})|</em>{j=1}^{68}}$, $I<em>i^g$的landmark为${(x_j^{I_i^g},y_j^{I_i^g})|</em>{j=1}^{68}}$。因此训练集可以表示为$X = {(I<em>i, I_i^g, I_i^d, {(x_j^{I_i},y_j^{I_i})|</em>{j=1}^{68}}, {(x<em>j^{I_i^g},y_j^{I_i^g})|</em>{j=1}^{68}})| _{i=1}^N}$.</p>
<h1 id="Training-Detail"><a href="#Training-Detail" class="headerlink" title="Training Detail"></a>Training Detail</h1><h2 id="Losses-on-Restoration-Result-hat-I"><a href="#Losses-on-Restoration-Result-hat-I" class="headerlink" title="Losses on Restoration Result $\hat I$."></a>Losses on Restoration Result $\hat I$.</h2><p>　　adversarial loss 使得重建的模型有更好的视觉效果。reconstruction loss包括perceptual loss 和 MSELoss.</p>
<h3 id="Reconstruct-loss"><a href="#Reconstruct-loss" class="headerlink" title="Reconstruct loss"></a>Reconstruct loss</h3><script type="math/tex; mode=display">l_{r}^{0}(I, \hat I) = \parallel I-\hat I \parallel ^{2}</script><script type="math/tex; mode=display">l_{p}^{\varphi,l}(I, \hat I) = \frac{1}{C_{l}H_{l}W_{l}} \parallel \varphi_{l}(\hat I) - \varphi_{l}(I) \parallel_{2}^{2}</script><script type="math/tex; mode=display">L_{r}(I, \hat I) = \lambda_{r,0}l_{r}^{0}(I,\hat I) + \lambda_{r,l}l_{p}^{\varphi,l}(I,\hat I)</script><p>$\varphi$是VGG-Face网络。$\lambda<em>{r,0}$和$\lambda</em>{r,l}$是$l_2$loss和perceptual loss的系数。</p>
<h3 id="Adversarial-Loss"><a href="#Adversarial-Loss" class="headerlink" title="Adversarial Loss"></a>Adversarial Loss</h3><script type="math/tex; mode=display">L_{a} = \lambda_{a,g}l_{a,g} + \lambda_{a,l}l_{a,l}</script><p>$\lambda<em>{a,g}$和$\lambda</em>{a,l}$是全局对抗损失和局部对抗损失的系数。</p>
<h2 id="Losses-on-Flow-Field-Phi"><a href="#Losses-on-Flow-Field-Phi" class="headerlink" title="Losses on Flow Field $\Phi$"></a>Losses on Flow Field $\Phi$</h2><h3 id="Landmark-loss"><a href="#Landmark-loss" class="headerlink" title="Landmark loss"></a>Landmark loss</h3><p>　　stn不适合于流场，采用landmark来对齐。</p>
<script type="math/tex; mode=display">l_{lm} = \sum(\Phi_{x}(x_{i}^{I},y_{i}^{I}) - x_{i}^{I^{g}})^2 + (\Phi_y(x_{i}^{I},y_{i}^{I})-y_{i}^{I^g})^2</script><h3 id="TV-regulrization"><a href="#TV-regulrization" class="headerlink" title="TV regulrization"></a>TV regulrization</h3><script type="math/tex; mode=display">l_{TV} = \parallel \nabla_{x}\Phi_{x} \parallel^2 + \parallel \nabla_{y}\Phi_{x} \parallel^2 + \parallel \nabla_{x}\Phi_{y} \parallel^2 + \parallel \nabla_{y}\Phi_{y} \parallel^2</script><script type="math/tex; mode=display">L_{flow} = \lambda_{lm}l_{lm} + \lambda_{TV}l_{TV}</script><h2 id="Overall-Loss"><a href="#Overall-Loss" class="headerlink" title="Overall Loss"></a>Overall Loss</h2><script type="math/tex; mode=display">L = L_r + L_a + L_{flow}</script><h2 id="Parameters-setting"><a href="#Parameters-setting" class="headerlink" title="Parameters setting"></a>Parameters setting</h2><p><center>Adam with lr of $2 \times 10^{-4}, 2 \times 10^{-5}, 2 \times 10^{-6}$, $\beta_{1}=0.5$</center></p>
<script type="math/tex; mode=display">\lambda_{r,0} = 100, \lambda_{r,l}=0.001, \lambda_{a,g}=1, \lambda_{a,l}=0.5, \lambda_{lm}=10, \lambda_{TV}=1</script><script type="math/tex; mode=display">batchSize=1</script><script type="math/tex; mode=display">epochs=100</script><ul>
<li>模型使用Adam算法来训练，具体参数见文章。在每一个学习率上，模型会被训练直至reconstruction loss不会再下降。然后一个更小的学习率会被采用来精调模型。各个tradeoff参数见文章。</li>
<li>我们首先会预训练WarpNet，5个epochs（最小化flow loss），然后再通过最小化整体loss来以端到端的形式训练整个网络。</li>
<li>batch size为1且训练在100个epoch后停止。在训练过程中也采用了一些数据增广方式如翻转。</li>
</ul>
<h2 id="Dataset"><a href="#Dataset" class="headerlink" title="Dataset"></a>Dataset</h2><ul>
<li>采用CASIA-WebFace和VggFace2来构建训练集和测试集。WebFace包含10575个身份的人脸且每一个人大约有46张图片，大小均为256*256.VggFace2包含9131个身份的人脸（8631用来做训练，500用来测试）且每一个人平均有362张图片（不同大小的）。这些图片可以说比较全面的覆盖了各种姿态、年龄、光照强度和表情。</li>
<li>对于每一个人来说，最多会选择5张高质量图片，在这其中会选出一张正面睁开眼睛的图片作为guided image，其他的会被作为目标图片来生成低清图片。通过这种方式，我们构建了包含20273对目标图像和guided image的训练集（从VggFace2的训练集中得来）</li>
<li>我们的测试集包含两个子集：<ul>
<li>1005对来自VggFace2的的测试集；</li>
<li>1455对来自WebFace。</li>
</ul>
</li>
</ul>
<h1 id="Challenge"><a href="#Challenge" class="headerlink" title="Challenge"></a>Challenge</h1><p>　　WarpNet的训练。不使用stn，使用TCDCN寻找图片的特征点，引入landmark loss. 但是我没有找到TCDCN PyTorch 的pretrained model。TCDCN官方只提供Windows二进制文件和MATLAB实现版本。会是一个比较大的挑战。</p>
<p>　　经过进一步的尝试，由于还未研究人脸对齐，尝试使用dlib进行landmark检测，但是有些图片检测不出来landmark。论文也没有很详细的描述如何训练WarpNet.暂时放下这个研究。</p>
<h1 id="PyTorch-Code"><a href="#PyTorch-Code" class="headerlink" title="PyTorch Code"></a>PyTorch Code</h1><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> torch</span><br><span class="line"><span class="keyword">import</span> torch.nn <span class="keyword">as</span> nn</span><br><span class="line"></span><br><span class="line"><span class="keyword">from</span> torchvision.models.vgg <span class="keyword">import</span> vgg16</span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Vgg_4</span><span class="params">(nn.Module)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self)</span>:</span></span><br><span class="line">        super(Vgg_4, self).__init__()</span><br><span class="line">        vgg = vgg16(pretrained=<span class="literal">True</span>)</span><br><span class="line">        vgg_4 = nn.Sequential(*list(vgg.features)[:<span class="number">8</span>]).eval()</span><br><span class="line">        <span class="keyword">for</span> param <span class="keyword">in</span> vgg_4:</span><br><span class="line">            param.requires_grad=<span class="literal">False</span></span><br><span class="line">        self.vgg_4 = vgg_4</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">forward</span><span class="params">(self, x)</span>:</span></span><br><span class="line">        <span class="keyword">return</span> self.vgg_4(x)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">WarpNet</span><span class="params">(nn.Module)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self)</span>:</span></span><br><span class="line">        super(WarpNet, self).__init__()</span><br><span class="line">        encoder = []</span><br><span class="line">        eight_layers = [<span class="number">64</span>, <span class="number">128</span>, <span class="number">256</span>, <span class="number">512</span>, <span class="number">1024</span>, <span class="number">1024</span>, <span class="number">1024</span>, <span class="number">1024</span>]</span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>, len(eight_layers)):</span><br><span class="line">            encoder.append(nn.Sequential(nn.Conv2d(eight_layers[i<span class="number">-1</span>], eight_layers[i], kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(eight_layers[i]), nn.ReLU(<span class="literal">True</span>)))</span><br><span class="line"></span><br><span class="line">        eight_layers.reverse()</span><br><span class="line">        decoder = []</span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>, len(eight_layers)):</span><br><span class="line">            decoder.append(nn.Sequential(nn.ConvTranspose2d(eight_layers[i<span class="number">-1</span>], eight_layers[i], kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(eight_layers[i]), nn.ReLU(<span class="literal">True</span>)))</span><br><span class="line">        self.first = nn.Sequential(nn.Conv2d(<span class="number">6</span>, <span class="number">64</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.ReLU(<span class="literal">True</span>))</span><br><span class="line">        self.encoder = nn.Sequential(*encoder)</span><br><span class="line">        self.decoder = nn.Sequential(*decoder)</span><br><span class="line">        self.last = nn.Sequential(nn.ConvTranspose2d(<span class="number">64</span>, <span class="number">2</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.Tanh())</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">forward</span><span class="params">(self, blur, guide)</span>:</span></span><br><span class="line">        pair = torch.cat([blur, guide], <span class="number">1</span>)</span><br><span class="line">        grid = self.first(pair)</span><br><span class="line">        grid = self.encoder(grid)</span><br><span class="line">        grid = self.decoder(grid)</span><br><span class="line">        grid = self.last(grid)</span><br><span class="line">        <span class="keyword">return</span> grid</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">RecNet</span><span class="params">(nn.Module)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self)</span>:</span></span><br><span class="line">        super(RecNet, self).__init__()</span><br><span class="line">        encoder = []</span><br><span class="line">        eight_layers = [<span class="number">64</span>, <span class="number">128</span>, <span class="number">256</span>, <span class="number">512</span>, <span class="number">1024</span>, <span class="number">1024</span>, <span class="number">1024</span>, <span class="number">1024</span>]</span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>, len(eight_layers)):</span><br><span class="line">            encoder.append(nn.Sequential(nn.Conv2d(eight_layers[i<span class="number">-1</span>], eight_layers[i], kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(eight_layers[i]), nn.ReLU(<span class="literal">True</span>)))</span><br><span class="line"></span><br><span class="line">        decoder = []</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">1024</span>, <span class="number">1024</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">1024</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">2048</span>, <span class="number">1024</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">1024</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">2048</span>, <span class="number">1024</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">1024</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">2048</span>, <span class="number">512</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">512</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">1024</span>, <span class="number">256</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">256</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">512</span>, <span class="number">128</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">128</span>)))</span><br><span class="line">        decoder.append(nn.Sequential(nn.ConvTranspose2d(<span class="number">256</span>, <span class="number">64</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.BatchNorm2d(<span class="number">64</span>)))</span><br><span class="line">        <span class="comment"># the last layer</span></span><br><span class="line">        self.first = nn.Sequential(nn.Conv2d(<span class="number">6</span>, <span class="number">64</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.ReLU(<span class="literal">True</span>))</span><br><span class="line">        self.encoder = nn.Sequential(*encoder)</span><br><span class="line">        self.decoder = nn.Sequential(*decoder)</span><br><span class="line">        self.last = nn.Sequential(nn.ConvTranspose2d(<span class="number">128</span>, <span class="number">3</span>, kernel_size=<span class="number">4</span>, stride=<span class="number">2</span>, padding=<span class="number">1</span>), nn.ReLU(<span class="literal">True</span>))</span><br><span class="line">        self.relu = nn.ReLU(<span class="literal">True</span>)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">forward</span><span class="params">(self, x)</span>:</span></span><br><span class="line">        x = self.first(x)</span><br><span class="line">        skip_connect = []</span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> self.encoder:</span><br><span class="line">            skip_connect.append(x)</span><br><span class="line">            x = i(x)</span><br><span class="line">        <span class="keyword">for</span> j <span class="keyword">in</span> self.decoder:</span><br><span class="line">            x = j(x)</span><br><span class="line">            x = torch.cat([x, skip_connect.pop()], <span class="number">1</span>)</span><br><span class="line">            <span class="comment"># 级联之后在跟上一个激活函数</span></span><br><span class="line">            x = self.relu(x)</span><br><span class="line">        x = self.last(x)</span><br><span class="line">        <span class="keyword">return</span> x</span><br><span class="line"></span><br><span class="line"></span><br></pre></td></tr></table></figure>
<h1 id="Reference"><a href="#Reference" class="headerlink" title="Reference"></a>Reference</h1><p><a href="http://openaccess.thecvf.com/content_ECCV_2018/papers/Xiaoming_Li_Learning_Warped_Guidance_ECCV_2018_paper.pdf" target="_blank" rel="noopener">论文地址</a><br><a href="https://github.com/sonack/GFRNet_pytorch_new" target="_blank" rel="noopener">参考代码</a><br><a href="https://zhuanlan.zhihu.com/p/40601190" target="_blank" rel="noopener">https://zhuanlan.zhihu.com/p/40601190</a></p>

    </div>

    
    
    

      <footer class="post-footer">

        


        
    <div class="post-nav">
      <div class="post-nav-item">
    <a href="/2019/12/24/SRCNN/" rel="prev" title="SRCNN">
      <i class="fa fa-chevron-left"></i> SRCNN
    </a></div>
      <div class="post-nav-item">
    <a href="/2019/12/30/Super-FAN/" rel="next" title="Super-FAN">
      Super-FAN <i class="fa fa-chevron-right"></i>
    </a></div>
    </div>
      </footer>
    
  </article>
  
  
  

  </div>


          </div>
          

<script>
  window.addEventListener('tabs:register', () => {
    let activeClass = CONFIG.comments.activeClass;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      let commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>

        </div>
          
  
  <div class="toggle sidebar-toggle">
    <span class="toggle-line toggle-line-first"></span>
    <span class="toggle-line toggle-line-middle"></span>
    <span class="toggle-line toggle-line-last"></span>
  </div>

  <aside class="sidebar">
    <div class="sidebar-inner">

      <ul class="sidebar-nav motion-element">
        <li class="sidebar-nav-toc">
          Table of Contents
        </li>
        <li class="sidebar-nav-overview">
          Overview
        </li>
      </ul>

      <!--noindex-->
      <div class="post-toc-wrap sidebar-panel">
          <div class="post-toc motion-element"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#Overview"><span class="nav-number">1.</span> <span class="nav-text">Overview</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Model"><span class="nav-number">2.</span> <span class="nav-text">Model</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#WarpNet"><span class="nav-number">2.1.</span> <span class="nav-text">WarpNet</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#RecNet"><span class="nav-number">2.2.</span> <span class="nav-text">RecNet</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Degradation-Model-and-Synthetic-Training-Data"><span class="nav-number">2.3.</span> <span class="nav-text">Degradation Model and Synthetic Training Data</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Training-Detail"><span class="nav-number">3.</span> <span class="nav-text">Training Detail</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#Losses-on-Restoration-Result-hat-I"><span class="nav-number">3.1.</span> <span class="nav-text">Losses on Restoration Result $\hat I$.</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Reconstruct-loss"><span class="nav-number">3.1.1.</span> <span class="nav-text">Reconstruct loss</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Adversarial-Loss"><span class="nav-number">3.1.2.</span> <span class="nav-text">Adversarial Loss</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Losses-on-Flow-Field-Phi"><span class="nav-number">3.2.</span> <span class="nav-text">Losses on Flow Field $\Phi$</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Landmark-loss"><span class="nav-number">3.2.1.</span> <span class="nav-text">Landmark loss</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#TV-regulrization"><span class="nav-number">3.2.2.</span> <span class="nav-text">TV regulrization</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Overall-Loss"><span class="nav-number">3.3.</span> <span class="nav-text">Overall Loss</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Parameters-setting"><span class="nav-number">3.4.</span> <span class="nav-text">Parameters setting</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Dataset"><span class="nav-number">3.5.</span> <span class="nav-text">Dataset</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Challenge"><span class="nav-number">4.</span> <span class="nav-text">Challenge</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#PyTorch-Code"><span class="nav-number">5.</span> <span class="nav-text">PyTorch Code</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Reference"><span class="nav-number">6.</span> <span class="nav-text">Reference</span></a></li></ol></div>
      </div>
      <!--/noindex-->

      <div class="site-overview-wrap sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
  <p class="site-author-name" itemprop="name">Z.J. Jiang</p>
  <div class="site-description" itemprop="description">about the single image super-resolution and face hallucination</div>
</div>
<div class="site-state-wrap motion-element">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/archives/">
        
          <span class="site-state-item-count">42</span>
          <span class="site-state-item-name">posts</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/categories/">
          
        <span class="site-state-item-count">11</span>
        <span class="site-state-item-name">categories</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/tags/">
          
        <span class="site-state-item-count">1</span>
        <span class="site-state-item-name">tags</span></a>
      </div>
  </nav>
</div>
  <div class="links-of-author motion-element">
      <span class="links-of-author-item">
        <a href="https://github.com/jzijin" title="GitHub → https:&#x2F;&#x2F;github.com&#x2F;jzijin" rel="noopener" target="_blank"><i class="fa fa-fw fa-github"></i>GitHub</a>
      </span>
      <span class="links-of-author-item">
        <a href="/atom.xml" title="RSS → &#x2F;atom.xml"><i class="fa fa-fw fa-rss"></i>RSS</a>
      </span>
  </div>


  <div class="links-of-blogroll motion-element">
    <div class="links-of-blogroll-title">
      <i class="fa fa-fw fa-link"></i>
      Links
    </div>
    <ul class="links-of-blogroll-list">
        <li class="links-of-blogroll-item">
          <a href="http://www.njust.edu.cn/" title="http:&#x2F;&#x2F;www.njust.edu.cn" rel="noopener" target="_blank">南京理工大学</a>
        </li>
        <li class="links-of-blogroll-item">
          <a href="http://ehall.njust.edu.cn/new/index.html" title="http:&#x2F;&#x2F;ehall.njust.edu.cn&#x2F;new&#x2F;index.html" rel="noopener" target="_blank">南京理工大学智慧服务</a>
        </li>
        <li class="links-of-blogroll-item">
          <a href="http://lib.njust.edu.cn/" title="http:&#x2F;&#x2F;lib.njust.edu.cn&#x2F;" rel="noopener" target="_blank">南京理工大学图书馆</a>
        </li>
    </ul>
  </div>

      </div>

    </div>
  </aside>
  <div id="sidebar-dimmer"></div>


      </div>
    </main>

    <footer class="footer">
      <div class="footer-inner">
        

<div class="copyright">
  
  &copy; 
  <span itemprop="copyrightYear">2022</span>
  <span class="with-love">
    <i class="fa fa-user"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">Z.J. Jiang</span>
</div>
  <div class="powered-by">Powered by <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> v4.2.0
  </div>
  <span class="post-meta-divider">|</span>
  <div class="theme-info">Theme – <a href="https://pisces.theme-next.org/" class="theme-link" rel="noopener" target="_blank">NexT.Pisces</a> v7.6.0
  </div>

        








      </div>
    </footer>
  </div>

  
  <script src="/lib/anime.min.js"></script>
  <script src="/lib/velocity/velocity.min.js"></script>
  <script src="/lib/velocity/velocity.ui.min.js"></script>

<script src="/js/utils.js"></script>

<script src="/js/motion.js"></script>


<script src="/js/schemes/pisces.js"></script>


<script src="/js/next-boot.js"></script>




  















  

  
      
<script type="text/x-mathjax-config">
    MathJax.Ajax.config.path['mhchem'] = '//cdn.jsdelivr.net/npm/mathjax-mhchem@3';

  MathJax.Hub.Config({
    tex2jax: {
      inlineMath: [ ['$', '$'], ['\\(', '\\)'] ],
      processEscapes: true,
      skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
    },
    TeX: {
        extensions: ['[mhchem]/mhchem.js'],
      equationNumbers: {
        autoNumber: 'AMS'
      }
    }
  });

  MathJax.Hub.Register.StartupHook('TeX Jax Ready', function() {
    MathJax.InputJax.TeX.prefilterHooks.Add(function(data) {
      if (data.display) {
        var next = data.script.nextSibling;
        while (next && next.nodeName.toLowerCase() === '#text') {
          next = next.nextSibling;
        }
        if (next && next.nodeName.toLowerCase() === 'br') {
          next.parentNode.removeChild(next);
        }
      }
    });
  });

  MathJax.Hub.Queue(function() {
    var all = MathJax.Hub.getAllJax(), i;
    for (i = 0; i < all.length; i += 1) {
      element = document.getElementById(all[i].inputID + '-Frame').parentNode;
      if (element.nodeName.toLowerCase() == 'li') {
        element = element.parentNode;
      }
      element.classList.add('has-jax');
    }
  });
</script>
<script>
  NexT.utils.getScript('//cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML', () => {
    MathJax.Hub.Typeset();
  }, window.MathJax);
</script>

    

  

</body>
</html>
