<!DOCTYPE HTML>
<html lang="zh-CN">


<head>
    <meta charset="utf-8">
    <meta name="keywords" content="Transformer, VIT and TransGAN, study">
    <meta name="description" content="关于Transformer, VIT 和 TransGAN的内容整理。">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
    <meta name="renderer" content="webkit|ie-stand|ie-comp">
    <meta name="mobile-web-app-capable" content="yes">
    <meta name="format-detection" content="telephone=no">
    <meta name="apple-mobile-web-app-capable" content="yes">
    <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
    <meta name="referrer" content="no-referrer-when-downgrade">
    <!-- Global site tag (gtag.js) - Google Analytics -->


    <title>Transformer, VIT and TransGAN | 今夕清欢</title>
    
    <link rel="stylesheet" type="text/css" href="/libs/artitalk/artitalk.min.css">


    <link rel="icon" type="image/png" href="/favicon.png">

    <link rel="stylesheet" type="text/css" href="/libs/awesome/css/all.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/materialize/materialize.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/aos/aos.css">
    <link rel="stylesheet" type="text/css" href="/libs/animate/animate.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/lightGallery/css/lightgallery.min.css">
    <link rel="stylesheet" type="text/css" href="/css/matery.css">
    <link rel="stylesheet" type="text/css" href="/css/my.css">

    <script src="/libs/jquery/jquery-3.6.0.min.js"></script>

<meta name="generator" content="Hexo 5.4.0">
<style>.github-emoji { position: relative; display: inline-block; width: 1.2em; min-height: 1.2em; overflow: hidden; vertical-align: top; color: transparent; }  .github-emoji > span { position: relative; z-index: 10; }  .github-emoji img, .github-emoji .fancybox { margin: 0 !important; padding: 0 !important; border: none !important; outline: none !important; text-decoration: none !important; user-select: none !important; cursor: auto !important; }  .github-emoji img { height: 1.2em !important; width: 1.2em !important; position: absolute !important; left: 50% !important; top: 50% !important; transform: translate(-50%, -50%) !important; user-select: none !important; cursor: auto !important; } .github-emoji-fallback { color: inherit; } .github-emoji-fallback img { opacity: 0 !important; }</style>
<link rel="alternate" href="/atom.xml" title="今夕清欢" type="application/atom+xml">
</head>




<body>
    <header class="navbar-fixed">
    <nav id="headNav" class="bg-color nav-transparent">
        <div id="navContainer" class="nav-wrapper container">
            <div class="brand-logo">
                <a href="/" class="waves-effect waves-light">
                    
                    <img src="/medias/logo.png" class="logo-img" alt="LOGO">
                    
                    <span class="logo-span">今夕清欢</span>
                </a>
            </div>
            

<a href="#" data-target="mobile-nav" class="sidenav-trigger button-collapse"><i class="fas fa-bars"></i></a>
<ul class="right nav-menu">
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/" class="waves-effect waves-light">
      
      <i class="fas fa-home" style="zoom: 0.6;"></i>
      
      <span>首页</span>
    </a>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/tags" class="waves-effect waves-light">
      
      <i class="fas fa-tags" style="zoom: 0.6;"></i>
      
      <span>标签</span>
    </a>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/categories" class="waves-effect waves-light">
      
      <i class="fas fa-bookmark" style="zoom: 0.6;"></i>
      
      <span>分类</span>
    </a>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/archives" class="waves-effect waves-light">
      
      <i class="fas fa-archive" style="zoom: 0.6;"></i>
      
      <span>归档</span>
    </a>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/about" class="waves-effect waves-light">
      
      <i class="fas fa-user-circle" style="zoom: 0.6;"></i>
      
      <span>关于</span>
    </a>
    
  </li>
  
  <li>
    <a href="#searchModal" class="modal-trigger waves-effect waves-light">
      <i id="searchIcon" class="fas fa-search" title="搜索" style="zoom: 0.85;"></i>
    </a>
  </li>
</ul>


<div id="mobile-nav" class="side-nav sidenav">

    <div class="mobile-head bg-color">
        
        <img src="/medias/logo.png" class="logo-img circle responsive-img">
        
        <div class="logo-name">今夕清欢</div>
        <div class="logo-desc">
            
            Never really desperate, only the lost of the soul.
            
        </div>
    </div>

    <ul class="menu-list mobile-menu-list">
        
        <li class="m-nav-item">
	  
		<a href="/" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-home"></i>
			
			首页
		</a>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="/tags" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-tags"></i>
			
			标签
		</a>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="/categories" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-bookmark"></i>
			
			分类
		</a>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="/archives" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-archive"></i>
			
			归档
		</a>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="/about" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-user-circle"></i>
			
			关于
		</a>
          
        </li>
        
        
        <li><div class="divider"></div></li>
        <li>
            <a href="https://github.com/blinkfox/hexo-theme-matery" class="waves-effect waves-light" target="_blank">
                <i class="fab fa-github-square fa-fw"></i>Fork Me
            </a>
        </li>
        
    </ul>
</div>


        </div>

        
            <style>
    .nav-transparent .github-corner {
        display: none !important;
    }

    .github-corner {
        position: absolute;
        z-index: 10;
        top: 0;
        right: 0;
        border: 0;
        transform: scale(1.1);
    }

    .github-corner svg {
        color: #0f9d58;
        fill: #fff;
        height: 64px;
        width: 64px;
    }

    .github-corner:hover .octo-arm {
        animation: a 0.56s ease-in-out;
    }

    .github-corner .octo-arm {
        animation: none;
    }

    @keyframes a {
        0%,
        to {
            transform: rotate(0);
        }
        20%,
        60% {
            transform: rotate(-25deg);
        }
        40%,
        80% {
            transform: rotate(10deg);
        }
    }
</style>

<a href="https://github.com/blinkfox/hexo-theme-matery" class="github-corner tooltipped hide-on-med-and-down" target="_blank"
   data-tooltip="Fork Me" data-position="left" data-delay="50">
    <svg viewBox="0 0 250 250" aria-hidden="true">
        <path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path>
        <path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2"
              fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path>
        <path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z"
              fill="currentColor" class="octo-body"></path>
    </svg>
</a>
        
    </nav>

</header>

    



<div class="bg-cover pd-header post-cover" style="background-image: url('/medias/featureimages/15.jpg')">
    <div class="container" style="right: 0px;left: 0px;">
        <div class="row">
            <div class="col s12 m12 l12">
                <div class="brand">
                    <h1 class="description center-align post-title">Transformer, VIT and TransGAN</h1>
                </div>
            </div>
        </div>
    </div>
</div>




<main class="post-container content">

    
    <link rel="stylesheet" href="/libs/tocbot/tocbot.css">
<style>
    #articleContent h1::before,
    #articleContent h2::before,
    #articleContent h3::before,
    #articleContent h4::before,
    #articleContent h5::before,
    #articleContent h6::before {
        display: block;
        content: " ";
        height: 100px;
        margin-top: -100px;
        visibility: hidden;
    }

    #articleContent :focus {
        outline: none;
    }

    .toc-fixed {
        position: fixed;
        top: 64px;
    }

    .toc-widget {
        width: 345px;
        padding-left: 20px;
    }

    .toc-widget .toc-title {
        padding: 35px 0 15px 17px;
        font-size: 1.5rem;
        font-weight: bold;
        line-height: 1.5rem;
    }

    .toc-widget ol {
        padding: 0;
        list-style: none;
    }

    #toc-content {
        padding-bottom: 30px;
        overflow: auto;
    }

    #toc-content ol {
        padding-left: 10px;
    }

    #toc-content ol li {
        padding-left: 10px;
    }

    #toc-content .toc-link:hover {
        color: #42b983;
        font-weight: 700;
        text-decoration: underline;
    }

    #toc-content .toc-link::before {
        background-color: transparent;
        max-height: 25px;

        position: absolute;
        right: 23.5vw;
        display: block;
    }

    #toc-content .is-active-link {
        color: #42b983;
    }

    #floating-toc-btn {
        position: fixed;
        right: 15px;
        bottom: 76px;
        padding-top: 15px;
        margin-bottom: 0;
        z-index: 998;
    }

    #floating-toc-btn .btn-floating {
        width: 48px;
        height: 48px;
    }

    #floating-toc-btn .btn-floating i {
        line-height: 48px;
        font-size: 1.4rem;
    }
</style>
<div class="row">
    <div id="main-content" class="col s12 m12 l9">
        <!-- 文章内容详情 -->
<div id="artDetail">
    <div class="card">
        <div class="card-content article-info">
            <div class="row tag-cate">
                <div class="col s7">
                    
                    <div class="article-tag">
                        
                            <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">
                                <span class="chip bg-color">深度学习</span>
                            </a>
                        
                            <a href="/tags/%E8%AE%A1%E7%AE%97%E6%9C%BA%E8%A7%86%E8%A7%89/">
                                <span class="chip bg-color">计算机视觉</span>
                            </a>
                        
                            <a href="/tags/Transformer/">
                                <span class="chip bg-color">Transformer</span>
                            </a>
                        
                    </div>
                    
                </div>
                <div class="col s5 right-align">
                    
                    <div class="post-cate">
                        <i class="fas fa-bookmark fa-fw icon-category"></i>
                        
                            <a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" class="post-category">
                                机器学习
                            </a>
                        
                    </div>
                    
                </div>
            </div>

            <div class="post-info">
                
                <div class="post-date info-break-policy">
                    <i class="far fa-calendar-minus fa-fw"></i>发布日期:&nbsp;&nbsp;
                    2021-11-20
                </div>
                

                
                <div class="post-date info-break-policy">
                    <i class="far fa-calendar-check fa-fw"></i>更新日期:&nbsp;&nbsp;
                    2021-12-01
                </div>
                

                

                

                
            </div>
        </div>
        <hr class="clearfix">

        
        <!-- 是否加载使用自带的 prismjs. -->
        <link rel="stylesheet" href="/libs/prism/prism.css">
        

        

        <div class="card-content article-card-content">
            <div id="articleContent">
                <h1 id="注意力机制"><a href="#注意力机制" class="headerlink" title="注意力机制"></a>注意力机制</h1><p><a target="_blank" rel="noopener" href="https://zh-v2.d2l.ai/chapter_attention-mechanisms/index.html">10. 注意力机制 — 动手学深度学习 2.0.0-alpha2 documentation (d2l.ai)</a></p>
<h1 id="Transformer"><a href="#Transformer" class="headerlink" title="Transformer"></a>Transformer</h1><h2 id="一-概览"><a href="#一-概览" class="headerlink" title="一. 概览"></a>一. 概览</h2><p><img src="http://image.lijitao.top//20211116115554.png" alt="Transformer和它的延伸网络"></p>
<p>基于Transformer的另外两个比较著名的工作是Bert和ViT，Bert采用了更深的解码器，ViT是专门用于CV领域的Transform.，还有一个就是曹越的Swin Transformer。</p>
<p>图像生成：iGPT,  TransGAN</p>
<p><img src="http://image.lijitao.top//202111161211249.jpg" alt="Transformer基本结构"></p>
<pre class="line-numbers language-python" data-language="python"><code class="language-python"><span class="token comment">## 1. 从整体网路结构来看，分为三个部分：编码层，解码层，输出层</span>
<span class="token keyword">class</span> <span class="token class-name">Transformer</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>Transformer<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>encoder <span class="token operator">=</span> Encoder<span class="token punctuation">(</span><span class="token punctuation">)</span>  <span class="token comment">## 编码层</span>
        self<span class="token punctuation">.</span>decoder <span class="token operator">=</span> Decoder<span class="token punctuation">(</span><span class="token punctuation">)</span>  <span class="token comment">## 解码层</span>
        self<span class="token punctuation">.</span>projection <span class="token operator">=</span> nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span>d_model<span class="token punctuation">,</span> tgt_vocab_size<span class="token punctuation">,</span> bias<span class="token operator">=</span><span class="token boolean">False</span><span class="token punctuation">)</span> <span class="token comment">## 输出层 d_model 是我们解码层每个token输出的维度大小，之后会做一个 tgt_vocab_size 大小的softmax</span>
    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token comment">## 这里有两个数据进行输入，一个是enc_inputs 形状为[batch_size, src_len]，主要是作为编码段的输入，一个dec_inputs，形状为[batch_size, tgt_len]，主要是作为解码端的输入</span>

        <span class="token comment">## enc_inputs作为输入 形状为[batch_size, src_len]，输出由自己的函数内部指定，想要什么指定输出什么，可以是全部tokens的输出，可以是特定每一层的输出；也可以是中间某些参数的输出；</span>
        <span class="token comment">## enc_outputs就是主要的输出，enc_self_attns这里没记错的是QK转置相乘之后softmax之后的矩阵值，代表的是每个单词和其他单词相关性；</span>
        enc_outputs<span class="token punctuation">,</span> enc_self_attns <span class="token operator">=</span> self<span class="token punctuation">.</span>encoder<span class="token punctuation">(</span>enc_inputs<span class="token punctuation">)</span>

        <span class="token comment">## dec_outputs 是decoder主要输出，用于后续的linear映射； dec_self_attns类比于enc_self_attns 是查看每个单词对decoder中输入的其余单词的相关性；dec_enc_attns是decoder中每个单词对encoder中每个单词的相关性；</span>
        dec_outputs<span class="token punctuation">,</span> dec_self_attns<span class="token punctuation">,</span> dec_enc_attns <span class="token operator">=</span> self<span class="token punctuation">.</span>decoder<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">)</span>

        <span class="token comment">## dec_outputs做映射到词表大小</span>
        dec_logits <span class="token operator">=</span> self<span class="token punctuation">.</span>projection<span class="token punctuation">(</span>dec_outputs<span class="token punctuation">)</span> <span class="token comment"># dec_logits : [batch_size x src_vocab_size x tgt_vocab_size]</span>
        <span class="token keyword">return</span> dec_logits<span class="token punctuation">.</span>view<span class="token punctuation">(</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> dec_logits<span class="token punctuation">.</span>size<span class="token punctuation">(</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">)</span><span class="token punctuation">)</span><span class="token punctuation">,</span> enc_self_attns<span class="token punctuation">,</span> dec_self_attns<span class="token punctuation">,</span> dec_enc_attns<span aria-hidden="true" class="line-numbers-rows"><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span></span></code></pre>





<h2 id="二-Encoder"><a href="#二-Encoder" class="headerlink" title="二. Encoder"></a>二. Encoder</h2><p><img src="http://image.lijitao.top//20211116122154.png" alt="Encoder"></p>
<h3 id="输入部分"><a href="#输入部分" class="headerlink" title="输入部分"></a>输入部分</h3><p>为什么需要位置编码？</p>
<p><img src="http://image.lijitao.top//20211116122721.png" alt="RNN的天然时序"></p>
<p>在RNN中U,W,V使用的是一套参数，并且按照时间线展开（不能并行计算），具有天然的时序关系。</p>
<p>而Transforms是一起处理的，可以进行并行计算，这样加快了处理的速度，但是牺牲了时序，因此需要添加位置编码。比如如果没有位置编码的话，“我打你”和“你打我”这样相反的两句话，会被网络视为一样的意思。</p>
<p><img src="http://image.lijitao.top//20211116123226.png" alt="位置编码公式"></p>
<p>比如对于爱这个单词，将它进行Embdding编码后，然后加上它的位置编码，作为模型的输入。</p>
<p><img src="http://image.lijitao.top//20211116123321.png" alt=" "></p>
<p>在李宏毅的课程中是这样讲的，位置编码是一个one_hot vector, 然后与输入做一个contact。其中Wp是人设的。</p>
<p><img src="http://image.lijitao.top//20211116164848.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116165112.png" alt=" "></p>
<h3 id="注意力机制-1"><a href="#注意力机制-1" class="headerlink" title="注意力机制"></a>注意力机制</h3><p><img src="http://image.lijitao.top//20211116122154.png" alt=" "></p>
<p>Attention的思想就是喂进去两个向量，然后给出这两个向量的match程度，评分。</p>
<p>$$ Attention(Q, K, V) = softmax(\frac{QK_{}^{T}}{\sqrt{d_k}})V $$</p>
<p><img src="http://image.lijitao.top//20211116124203.png" alt=" "></p>
<p>Query是我们要查询的目标，Key代表这样图片的不同区域，将婴儿分别于Key进行点乘，点乘的结果越大说明越相似，越关注此区域。然后与V相乘得出加权和。</p>
<p>在只有单词向量的情况下，如何获取QKV?</p>
<p><img src="http://image.lijitao.top//20211116125407.png" alt=" "></p>
<p>每个q分别与其他的K计算</p>
<p><img src="http://image.lijitao.top//20211116164237.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116125517.png" alt=" "></p>
<p>除以$\sqrt{d_K}$的原因：$QK_{}^{T}$的值太大，softmax反向传播的时候值会特别小，容易导致梯度消失。</p>
<p>在实际计算中，会将所有的词表示为一个大矩阵，然后可以进行并行计算。</p>
<p><img src="http://image.lijitao.top//20211116130056.png" alt=" "></p>
<h3 id="多头注意力机制"><a href="#多头注意力机制" class="headerlink" title="多头注意力机制"></a>多头注意力机制</h3><p>多头相当于映射到不同的子空间，捕捉到不同的信息。将多个头合在一起进行输出：</p>
<p><img src="http://image.lijitao.top//20211116160315.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116160226.png" alt=" "></p>
<h3 id="残差和LayNorm"><a href="#残差和LayNorm" class="headerlink" title="残差和LayNorm"></a>残差和LayNorm</h3><h4 id="残差"><a href="#残差" class="headerlink" title="残差"></a>残差</h4><p><img src="http://image.lijitao.top//20211116160542.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116160800.png" alt=" "></p>
<p>梯度消失一般情况下是由于连乘产生的，但是在残差网络中即使连乘再多，前边的1可以缓解梯度消失的出现。 </p>
<h4 id="Layer-Normalization"><a href="#Layer-Normalization" class="headerlink" title="Layer Normalization"></a>Layer Normalization</h4><p>BN的主要思想是: <strong>在每一层的每一批数据(一个batch里的同一通道)上进行归一化</strong><br>LN的主要思想是:<strong>是在每一个样本(一个样本里的不同通道)上计算均值和方差</strong>，而不是 BN 那种在批方向计算均值和方差！</p>
<p><strong>BN</strong></p>
<p><img src="http://image.lijitao.top//20211116161201.png" alt=" "></p>
<p>BN的优点：</p>
<ul>
<li>可以解决内部协变量偏移</li>
<li>缓解了梯度饱和问题（如果使用sigmoid激活函数的话），加快收敛。</li>
</ul>
<p>BN的缺点：</p>
<ul>
<li><p>batch_size比较小的时候效果较差</p>
</li>
<li><p>在RNN中效果较差（如下图：RNN的输入是动态，不能有效的得到整体的均值和方差）</p>
<p><img src="http://image.lijitao.top//20211116161524.png" alt=" "></p>
</li>
</ul>
<p><strong>LN</strong></p>
<p>当我们使用梯度下降法做优化时，随着网络深度的增加，数据的分布会不断发生变化,为了保证数据特征分布的稳定性，我们加入Layer Normalization，这样可以加速模型的收敛速度</p>
<h4 id="为什么要告诉后面模型哪些位置被PAD填充？"><a href="#为什么要告诉后面模型哪些位置被PAD填充？" class="headerlink" title="为什么要告诉后面模型哪些位置被PAD填充？"></a>为什么要告诉后面模型哪些位置被PAD填充？</h4><p>pad和pad计算相似度也会很高。</p>
<pre class="line-numbers language-python" data-language="python"><code class="language-python"><span class="token keyword">class</span> <span class="token class-name">MultiHeadAttention</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>MultiHeadAttention<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        <span class="token comment">## 输入进来的QKV是相等的，我们会使用映射linear做一个映射得到参数矩阵Wq, Wk,Wv</span>
        self<span class="token punctuation">.</span>W_Q <span class="token operator">=</span> nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span>d_model<span class="token punctuation">,</span> d_k <span class="token operator">*</span> n_heads<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>W_K <span class="token operator">=</span> nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span>d_model<span class="token punctuation">,</span> d_k <span class="token operator">*</span> n_heads<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>W_V <span class="token operator">=</span> nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span>d_model<span class="token punctuation">,</span> d_v <span class="token operator">*</span> n_heads<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>linear <span class="token operator">=</span> nn<span class="token punctuation">.</span>Linear<span class="token punctuation">(</span>n_heads <span class="token operator">*</span> d_v<span class="token punctuation">,</span> d_model<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>layer_norm <span class="token operator">=</span> nn<span class="token punctuation">.</span>LayerNorm<span class="token punctuation">(</span>d_model<span class="token punctuation">)</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> Q<span class="token punctuation">,</span> K<span class="token punctuation">,</span> V<span class="token punctuation">,</span> attn_mask<span class="token punctuation">)</span><span class="token punctuation">:</span>

        <span class="token comment">## 这个多头分为这几个步骤，首先映射分头，然后计算atten_scores，然后计算atten_value;</span>
        <span class="token comment">##输入进来的数据形状： Q: [batch_size x len_q x d_model], K: [batch_size x len_k x d_model], V: [batch_size x len_k x d_model]</span>
        residual<span class="token punctuation">,</span> batch_size <span class="token operator">=</span> Q<span class="token punctuation">,</span> Q<span class="token punctuation">.</span>size<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">)</span>
        <span class="token comment"># (B, S, D) -proj-&gt; (B, S, D) -split-&gt; (B, S, H, W) -trans-&gt; (B, H, S, W)</span>

        <span class="token comment">##下面这个就是先映射，后分头；一定要注意的是q和k分头之后维度是一致额，所以一看这里都是dk</span>
        q_s <span class="token operator">=</span> self<span class="token punctuation">.</span>W_Q<span class="token punctuation">(</span>Q<span class="token punctuation">)</span><span class="token punctuation">.</span>view<span class="token punctuation">(</span>batch_size<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> n_heads<span class="token punctuation">,</span> d_k<span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span><span class="token number">2</span><span class="token punctuation">)</span>  <span class="token comment"># q_s: [batch_size x n_heads x len_q x d_k]</span>
        k_s <span class="token operator">=</span> self<span class="token punctuation">.</span>W_K<span class="token punctuation">(</span>K<span class="token punctuation">)</span><span class="token punctuation">.</span>view<span class="token punctuation">(</span>batch_size<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> n_heads<span class="token punctuation">,</span> d_k<span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span><span class="token number">2</span><span class="token punctuation">)</span>  <span class="token comment"># k_s: [batch_size x n_heads x len_k x d_k]</span>
        v_s <span class="token operator">=</span> self<span class="token punctuation">.</span>W_V<span class="token punctuation">(</span>V<span class="token punctuation">)</span><span class="token punctuation">.</span>view<span class="token punctuation">(</span>batch_size<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> n_heads<span class="token punctuation">,</span> d_v<span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span><span class="token number">2</span><span class="token punctuation">)</span>  <span class="token comment"># v_s: [batch_size x n_heads x len_k x d_v]</span>

        <span class="token comment">## 输入进行的attn_mask形状是 batch_size x len_q x len_k，然后经过下面这个代码得到 新的attn_mask : [batch_size x n_heads x len_q x len_k]，就是把pad信息重复了n个头上</span>
        attn_mask <span class="token operator">=</span> attn_mask<span class="token punctuation">.</span>unsqueeze<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">)</span><span class="token punctuation">.</span>repeat<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span> n_heads<span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span>


        <span class="token comment">##然后我们计算 ScaledDotProductAttention 这个函数，去7.看一下</span>
        <span class="token comment">## 得到的结果有两个：context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q x len_k]</span>
        context<span class="token punctuation">,</span> attn <span class="token operator">=</span> ScaledDotProductAttention<span class="token punctuation">(</span><span class="token punctuation">)</span><span class="token punctuation">(</span>q_s<span class="token punctuation">,</span> k_s<span class="token punctuation">,</span> v_s<span class="token punctuation">,</span> attn_mask<span class="token punctuation">)</span>
        context <span class="token operator">=</span> context<span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">.</span>contiguous<span class="token punctuation">(</span><span class="token punctuation">)</span><span class="token punctuation">.</span>view<span class="token punctuation">(</span>batch_size<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> n_heads <span class="token operator">*</span> d_v<span class="token punctuation">)</span> <span class="token comment"># context: [batch_size x len_q x n_heads * d_v]</span>
        output <span class="token operator">=</span> self<span class="token punctuation">.</span>linear<span class="token punctuation">(</span>context<span class="token punctuation">)</span>
        <span class="token keyword">return</span> self<span class="token punctuation">.</span>layer_norm<span class="token punctuation">(</span>output <span class="token operator">+</span> residual<span class="token punctuation">)</span><span class="token punctuation">,</span> attn <span class="token comment"># output: [batch_size x len_q x d_model]</span>

<span class="token comment">## 4. get_attn_pad_mask</span>

<span class="token comment">## 比如说，我现在的句子长度是5，在后面注意力机制的部分，我们在计算出来QK转置除以根号之后，softmax之前，我们得到的形状</span>
<span class="token comment">## len_input * len*input  代表每个单词对其余包含自己的单词的影响力</span>

<span class="token comment">## 所以这里我需要有一个同等大小形状的矩阵，告诉我哪个位置是PAD部分，之后在计算计算softmax之前会把这里置为无穷大；</span>

<span class="token comment">## 一定需要注意的是这里得到的矩阵形状是batch_size x len_q x len_k，我们是对k中的pad符号进行标识，并没有对k中的做标识，因为没必要</span>

<span class="token comment">## seq_q 和 seq_k 不一定一致，在交互注意力，q来自解码端，k来自编码端，所以告诉模型编码这边pad符号信息就可以，解码端的pad信息在交互注意力层是没有用到的；</span>

<span class="token keyword">def</span> <span class="token function">get_attn_pad_mask</span><span class="token punctuation">(</span>seq_q<span class="token punctuation">,</span> seq_k<span class="token punctuation">)</span><span class="token punctuation">:</span>
    batch_size<span class="token punctuation">,</span> len_q <span class="token operator">=</span> seq_q<span class="token punctuation">.</span>size<span class="token punctuation">(</span><span class="token punctuation">)</span>
    batch_size<span class="token punctuation">,</span> len_k <span class="token operator">=</span> seq_k<span class="token punctuation">.</span>size<span class="token punctuation">(</span><span class="token punctuation">)</span>
    <span class="token comment"># eq(zero) is PAD token</span>
    pad_attn_mask <span class="token operator">=</span> seq_k<span class="token punctuation">.</span>data<span class="token punctuation">.</span>eq<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">)</span><span class="token punctuation">.</span>unsqueeze<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">)</span>  <span class="token comment"># batch_size x 1 x len_k, one is masking</span>
    <span class="token keyword">return</span> pad_attn_mask<span class="token punctuation">.</span>expand<span class="token punctuation">(</span>batch_size<span class="token punctuation">,</span> len_q<span class="token punctuation">,</span> len_k<span class="token punctuation">)</span>  <span class="token comment"># batch_size x len_q x len_k</span>


<span class="token comment">## 3. PositionalEncoding 代码实现</span>
<span class="token keyword">class</span> <span class="token class-name">PositionalEncoding</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> d_model<span class="token punctuation">,</span> dropout<span class="token operator">=</span><span class="token number">0.1</span><span class="token punctuation">,</span> max_len<span class="token operator">=</span><span class="token number">5000</span><span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>PositionalEncoding<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>

        <span class="token comment">## 位置编码的实现其实很简单，直接对照着公式去敲代码就可以，下面这个代码只是其中一种实现方式；</span>
        <span class="token comment">## 从理解来讲，需要注意的就是偶数和奇数在公式上有一个共同部分，我们使用log函数把次方拿下来，方便计算；</span>
        <span class="token comment">## pos代表的是单词在句子中的索引，这点需要注意；比如max_len是128个，那么索引就是从0，1，2，...,127</span>
        <span class="token comment">##假设我的demodel是512，2i那个符号中i从0取到了255，那么2i对应取值就是0,2,4...510</span>
        self<span class="token punctuation">.</span>dropout <span class="token operator">=</span> nn<span class="token punctuation">.</span>Dropout<span class="token punctuation">(</span>p<span class="token operator">=</span>dropout<span class="token punctuation">)</span>

        pe <span class="token operator">=</span> torch<span class="token punctuation">.</span>zeros<span class="token punctuation">(</span>max_len<span class="token punctuation">,</span> d_model<span class="token punctuation">)</span>
        position <span class="token operator">=</span> torch<span class="token punctuation">.</span>arange<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> max_len<span class="token punctuation">,</span> dtype<span class="token operator">=</span>torch<span class="token punctuation">.</span><span class="token builtin">float</span><span class="token punctuation">)</span><span class="token punctuation">.</span>unsqueeze<span class="token punctuation">(</span><span class="token number">1</span><span class="token punctuation">)</span>
        div_term <span class="token operator">=</span> torch<span class="token punctuation">.</span>exp<span class="token punctuation">(</span>torch<span class="token punctuation">.</span>arange<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> d_model<span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">.</span><span class="token builtin">float</span><span class="token punctuation">(</span><span class="token punctuation">)</span> <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token operator">-</span>math<span class="token punctuation">.</span>log<span class="token punctuation">(</span><span class="token number">10000.0</span><span class="token punctuation">)</span> <span class="token operator">/</span> d_model<span class="token punctuation">)</span><span class="token punctuation">)</span>
        pe<span class="token punctuation">[</span><span class="token punctuation">:</span><span class="token punctuation">,</span> <span class="token number">0</span><span class="token punctuation">:</span><span class="token punctuation">:</span><span class="token number">2</span><span class="token punctuation">]</span> <span class="token operator">=</span> torch<span class="token punctuation">.</span>sin<span class="token punctuation">(</span>position <span class="token operator">*</span> div_term<span class="token punctuation">)</span><span class="token comment">## 这里需要注意的是pe[:, 0::2]这个用法，就是从0开始到最后面，补长为2，其实代表的就是偶数位置</span>
        pe<span class="token punctuation">[</span><span class="token punctuation">:</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">:</span><span class="token punctuation">:</span><span class="token number">2</span><span class="token punctuation">]</span> <span class="token operator">=</span> torch<span class="token punctuation">.</span>cos<span class="token punctuation">(</span>position <span class="token operator">*</span> div_term<span class="token punctuation">)</span><span class="token comment">##这里需要注意的是pe[:, 1::2]这个用法，就是从1开始到最后面，补长为2，其实代表的就是奇数位置</span>
        <span class="token comment">## 上面代码获取之后得到的pe:[max_len*d_model]</span>

        <span class="token comment">## 下面这个代码之后，我们得到的pe形状是：[max_len*1*d_model]</span>
        pe <span class="token operator">=</span> pe<span class="token punctuation">.</span>unsqueeze<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span>

        self<span class="token punctuation">.</span>register_buffer<span class="token punctuation">(</span><span class="token string">'pe'</span><span class="token punctuation">,</span> pe<span class="token punctuation">)</span>  <span class="token comment">## 定一个缓冲区，其实简单理解为这个参数不更新就可以</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> x<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token triple-quoted-string string">"""
        x: [seq_len, batch_size, d_model]
        """</span>
        x <span class="token operator">=</span> x <span class="token operator">+</span> self<span class="token punctuation">.</span>pe<span class="token punctuation">[</span><span class="token punctuation">:</span>x<span class="token punctuation">.</span>size<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">)</span><span class="token punctuation">,</span> <span class="token punctuation">:</span><span class="token punctuation">]</span>
        <span class="token keyword">return</span> self<span class="token punctuation">.</span>dropout<span class="token punctuation">(</span>x<span class="token punctuation">)</span>


<span class="token comment">## 5. EncoderLayer ：包含两个部分，多头注意力机制和前馈神经网络</span>
<span class="token keyword">class</span> <span class="token class-name">EncoderLayer</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>EncoderLayer<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>enc_self_attn <span class="token operator">=</span> MultiHeadAttention<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>pos_ffn <span class="token operator">=</span> PoswiseFeedForwardNet<span class="token punctuation">(</span><span class="token punctuation">)</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> enc_self_attn_mask<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token comment">## 下面这个就是做自注意力层，输入是enc_inputs，形状是[batch_size x seq_len_q x d_model] 需要注意的是最初始的QKV矩阵是等同于这个输入的，去看一下enc_self_attn函数 6.</span>
        enc_outputs<span class="token punctuation">,</span> attn <span class="token operator">=</span> self<span class="token punctuation">.</span>enc_self_attn<span class="token punctuation">(</span>enc_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> enc_self_attn_mask<span class="token punctuation">)</span> <span class="token comment"># enc_inputs to same Q,K,V</span>
        enc_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>pos_ffn<span class="token punctuation">(</span>enc_outputs<span class="token punctuation">)</span> <span class="token comment"># enc_outputs: [batch_size x len_q x d_model]</span>
        <span class="token keyword">return</span> enc_outputs<span class="token punctuation">,</span> attn


<span class="token comment">## 2. Encoder 部分包含三个部分：词向量embedding，位置编码部分，注意力层及后续的前馈神经网络</span>
<span class="token keyword">class</span> <span class="token class-name">Encoder</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>Encoder<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>src_emb <span class="token operator">=</span> nn<span class="token punctuation">.</span>Embedding<span class="token punctuation">(</span>src_vocab_size<span class="token punctuation">,</span> d_model<span class="token punctuation">)</span>  <span class="token comment">## 这个其实就是去定义生成一个矩阵，大小是 src_vocab_size * d_model</span>
        self<span class="token punctuation">.</span>pos_emb <span class="token operator">=</span> PositionalEncoding<span class="token punctuation">(</span>d_model<span class="token punctuation">)</span> <span class="token comment">## 位置编码情况，这里是固定的正余弦函数，也可以使用类似词向量的nn.Embedding获得一个可以更新学习的位置编码</span>
        self<span class="token punctuation">.</span>layers <span class="token operator">=</span> nn<span class="token punctuation">.</span>ModuleList<span class="token punctuation">(</span><span class="token punctuation">[</span>EncoderLayer<span class="token punctuation">(</span><span class="token punctuation">)</span> <span class="token keyword">for</span> _ <span class="token keyword">in</span> <span class="token builtin">range</span><span class="token punctuation">(</span>n_layers<span class="token punctuation">)</span><span class="token punctuation">]</span><span class="token punctuation">)</span> <span class="token comment">## 使用ModuleList对多个encoder进行堆叠，因为后续的encoder并没有使用词向量和位置编码，所以抽离出来；</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token comment">## 这里我们的 enc_inputs 形状是： [batch_size x source_len]</span>

        <span class="token comment">## 下面这个代码通过src_emb，进行索引定位，enc_outputs输出形状是[batch_size, src_len, d_model]</span>
        enc_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>src_emb<span class="token punctuation">(</span>enc_inputs<span class="token punctuation">)</span>

        <span class="token comment">## 这里就是位置编码，把两者相加放入到了这个函数里面，从这里可以去看一下位置编码函数的实现；3.</span>
        enc_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>pos_emb<span class="token punctuation">(</span>enc_outputs<span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span><span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span>

        <span class="token comment">##get_attn_pad_mask是为了得到句子中pad的位置信息，给到模型后面，在计算自注意力和交互注意力的时候去掉pad符号的影响，去看一下这个函数 4.</span>
        enc_self_attn_mask <span class="token operator">=</span> get_attn_pad_mask<span class="token punctuation">(</span>enc_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">)</span>
        enc_self_attns <span class="token operator">=</span> <span class="token punctuation">[</span><span class="token punctuation">]</span>
        <span class="token keyword">for</span> layer <span class="token keyword">in</span> self<span class="token punctuation">.</span>layers<span class="token punctuation">:</span>
            <span class="token comment">## 去看EncoderLayer 层函数 5.</span>
            enc_outputs<span class="token punctuation">,</span> enc_self_attn <span class="token operator">=</span> layer<span class="token punctuation">(</span>enc_outputs<span class="token punctuation">,</span> enc_self_attn_mask<span class="token punctuation">)</span>
            enc_self_attns<span class="token punctuation">.</span>append<span class="token punctuation">(</span>enc_self_attn<span class="token punctuation">)</span>
        <span class="token keyword">return</span> enc_outputs<span class="token punctuation">,</span> enc_self_attns
<span aria-hidden="true" class="line-numbers-rows"><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span></span></code></pre>





<h2 id="三、Decoder"><a href="#三、Decoder" class="headerlink" title="三、Decoder"></a>三、Decoder</h2><p><img src="http://image.lijitao.top//20211119124026.png" alt=" "></p>
<h3 id="Masked-Multi-Head-Attention"><a href="#Masked-Multi-Head-Attention" class="headerlink" title="Masked Multi-Head Attention"></a>Masked Multi-Head Attention</h3><p><img src="http://image.lijitao.top//20211116162119.png" alt=" "></p>
<p>需要对当前单词和之后的单词做mask</p>
<p>为什么？</p>
<p><img src="http://image.lijitao.top//20211116162317.png" alt=" "></p>
<p>训练过程和预测过程不匹配，实际预测的时候是看不到后边的单词的，这样训练和测试就会产生较大的gap，最终效果不好。mask实际操作会给后边的信息赋值一个特别小的数，比如1e-10, 这样在softmax的时候它的值就接近于0。</p>
<h3 id="交互层"><a href="#交互层" class="headerlink" title="交互层"></a>交互层</h3><p><img src="http://image.lijitao.top//20211116162711.png" alt=" "></p>
<p>-<img src="http://image.lijitao.top//20211116163003.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116163057.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211116163314.png" alt=" "></p>
<pre class="line-numbers language-python" data-language="python"><code class="language-python"><span class="token comment">## 10.</span>
<span class="token keyword">class</span> <span class="token class-name">DecoderLayer</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>DecoderLayer<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>dec_self_attn <span class="token operator">=</span> MultiHeadAttention<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>dec_enc_attn <span class="token operator">=</span> MultiHeadAttention<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>pos_ffn <span class="token operator">=</span> PoswiseFeedForwardNet<span class="token punctuation">(</span><span class="token punctuation">)</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">,</span> dec_self_attn_mask<span class="token punctuation">,</span> dec_enc_attn_mask<span class="token punctuation">)</span><span class="token punctuation">:</span>
        dec_outputs<span class="token punctuation">,</span> dec_self_attn <span class="token operator">=</span> self<span class="token punctuation">.</span>dec_self_attn<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">,</span> dec_self_attn_mask<span class="token punctuation">)</span>
        dec_outputs<span class="token punctuation">,</span> dec_enc_attn <span class="token operator">=</span> self<span class="token punctuation">.</span>dec_enc_attn<span class="token punctuation">(</span>dec_outputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">,</span> dec_enc_attn_mask<span class="token punctuation">)</span>
        dec_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>pos_ffn<span class="token punctuation">(</span>dec_outputs<span class="token punctuation">)</span>
        <span class="token keyword">return</span> dec_outputs<span class="token punctuation">,</span> dec_self_attn<span class="token punctuation">,</span> dec_enc_attn

<span class="token comment">## 9. Decoder</span>

<span class="token keyword">class</span> <span class="token class-name">Decoder</span><span class="token punctuation">(</span>nn<span class="token punctuation">.</span>Module<span class="token punctuation">)</span><span class="token punctuation">:</span>
    <span class="token keyword">def</span> <span class="token function">__init__</span><span class="token punctuation">(</span>self<span class="token punctuation">)</span><span class="token punctuation">:</span>
        <span class="token builtin">super</span><span class="token punctuation">(</span>Decoder<span class="token punctuation">,</span> self<span class="token punctuation">)</span><span class="token punctuation">.</span>__init__<span class="token punctuation">(</span><span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>tgt_emb <span class="token operator">=</span> nn<span class="token punctuation">.</span>Embedding<span class="token punctuation">(</span>tgt_vocab_size<span class="token punctuation">,</span> d_model<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>pos_emb <span class="token operator">=</span> PositionalEncoding<span class="token punctuation">(</span>d_model<span class="token punctuation">)</span>
        self<span class="token punctuation">.</span>layers <span class="token operator">=</span> nn<span class="token punctuation">.</span>ModuleList<span class="token punctuation">(</span><span class="token punctuation">[</span>DecoderLayer<span class="token punctuation">(</span><span class="token punctuation">)</span> <span class="token keyword">for</span> _ <span class="token keyword">in</span> <span class="token builtin">range</span><span class="token punctuation">(</span>n_layers<span class="token punctuation">)</span><span class="token punctuation">]</span><span class="token punctuation">)</span>

    <span class="token keyword">def</span> <span class="token function">forward</span><span class="token punctuation">(</span>self<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">)</span><span class="token punctuation">:</span> <span class="token comment"># dec_inputs : [batch_size x target_len]</span>
        dec_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>tgt_emb<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">)</span>  <span class="token comment"># [batch_size, tgt_len, d_model]</span>
        dec_outputs <span class="token operator">=</span> self<span class="token punctuation">.</span>pos_emb<span class="token punctuation">(</span>dec_outputs<span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span><span class="token punctuation">)</span><span class="token punctuation">.</span>transpose<span class="token punctuation">(</span><span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">1</span><span class="token punctuation">)</span> <span class="token comment"># [batch_size, tgt_len, d_model]</span>

        <span class="token comment">## get_attn_pad_mask 自注意力层的时候的pad 部分</span>
        dec_self_attn_pad_mask <span class="token operator">=</span> get_attn_pad_mask<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">,</span> dec_inputs<span class="token punctuation">)</span>

        <span class="token comment">## get_attn_subsequent_mask 这个做的是自注意层的mask部分，就是当前单词之后看不到，使用一个上三角为1的矩阵</span>
        dec_self_attn_subsequent_mask <span class="token operator">=</span> get_attn_subsequent_mask<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">)</span>

        <span class="token comment">## 两个矩阵相加，大于0的为1，不大于0的为0，为1的在之后就会被fill到无限小</span>
        dec_self_attn_mask <span class="token operator">=</span> torch<span class="token punctuation">.</span>gt<span class="token punctuation">(</span><span class="token punctuation">(</span>dec_self_attn_pad_mask <span class="token operator">+</span> dec_self_attn_subsequent_mask<span class="token punctuation">)</span><span class="token punctuation">,</span> <span class="token number">0</span><span class="token punctuation">)</span>

        <span class="token comment">## 这个做的是交互注意力机制中的mask矩阵，enc的输入是k，我去看这个k里面哪些是pad符号，给到后面的模型；注意哦，我q肯定也是有pad符号，但是这里我不在意的，之前说了好多次了哈</span>
        dec_enc_attn_mask <span class="token operator">=</span> get_attn_pad_mask<span class="token punctuation">(</span>dec_inputs<span class="token punctuation">,</span> enc_inputs<span class="token punctuation">)</span>

        dec_self_attns<span class="token punctuation">,</span> dec_enc_attns <span class="token operator">=</span> <span class="token punctuation">[</span><span class="token punctuation">]</span><span class="token punctuation">,</span> <span class="token punctuation">[</span><span class="token punctuation">]</span>
        <span class="token keyword">for</span> layer <span class="token keyword">in</span> self<span class="token punctuation">.</span>layers<span class="token punctuation">:</span>
            dec_outputs<span class="token punctuation">,</span> dec_self_attn<span class="token punctuation">,</span> dec_enc_attn <span class="token operator">=</span> layer<span class="token punctuation">(</span>dec_outputs<span class="token punctuation">,</span> enc_outputs<span class="token punctuation">,</span> dec_self_attn_mask<span class="token punctuation">,</span> dec_enc_attn_mask<span class="token punctuation">)</span>
            dec_self_attns<span class="token punctuation">.</span>append<span class="token punctuation">(</span>dec_self_attn<span class="token punctuation">)</span>
            dec_enc_attns<span class="token punctuation">.</span>append<span class="token punctuation">(</span>dec_enc_attn<span class="token punctuation">)</span>
        <span class="token keyword">return</span> dec_outputs<span class="token punctuation">,</span> dec_self_attns<span class="token punctuation">,</span> dec_enc_attns<span aria-hidden="true" class="line-numbers-rows"><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span><span></span></span></code></pre>











<h2 id="四、参考"><a href="#四、参考" class="headerlink" title="四、参考"></a>四、参考</h2><ol>
<li><p> <a target="_blank" rel="noopener" href="https://www.bilibili.com/video/BV1dR4y1E7aL?p=2&amp;spm_id_from=pageDriver">Transformer代码从零解读(Pytorch版本）_哔哩哔哩_bilibili</a></p>
</li>
<li><p> bilibili.com</p>
</li>
<li><p> youtube 李宏毅</p>
</li>
</ol>
<h1 id="Vision-Transformer-ICLR-2021"><a href="#Vision-Transformer-ICLR-2021" class="headerlink" title="Vision Transformer (ICLR 2021)"></a>Vision Transformer (ICLR 2021)</h1><p><img src="http://image.lijitao.top//202111212037233.gif" alt="vit"></p>
<h2 id="一、概览"><a href="#一、概览" class="headerlink" title="一、概览"></a>一、概览</h2><p> 在NLP任务中，将汉字转化为数字，然后embedding成矩阵放到Encoder中进行训练。图片的思路应该是类似的，也就是说将图片转化为一个一个的token。</p>
<p><strong>大部分人的思路</strong></p>
<p><img src="http://image.lijitao.top//20211118172117.png" alt=" "></p>
<p><strong>复杂度问题</strong></p>
<p>224 * 224 = 50176</p>
<p>BIRT的最大长度为512，相当于100倍</p>
<p>解决方法：局部注意力机制、改进Attention公式等，但是实现起来比较复杂。</p>
<p><img src="http://image.lijitao.top//20211118203912.png" alt=" "></p>
<p><strong>VIT模型架构</strong></p>
<p>下图是原论文中给出的关于Vision Transformer(ViT)的模型框架。简单而言，模型由三个模块组成：</p>
<ul>
<li>Linear Projection of Flattened Patches(Embedding层)</li>
<li>Transformer Encoder(图右侧有给出更加详细的结构)</li>
<li>MLP Head（最终用于分类的层结构）</li>
</ul>
<p><img src="http://image.lijitao.top//20211118211910.png" alt=" "></p>
<p><img src="http://image.lijitao.top//20211118204040.png" alt=" "></p>
<h3 id="位置编码"><a href="#位置编码" class="headerlink" title="位置编码"></a>位置编码</h3><p><strong>为什么需要位置编码？</strong></p>
<p>分patch会损失二维空间信息，patch是有顺序的。</p>
<p><strong>如何优雅的避开位置编码？</strong></p>
<p><a target="_blank" rel="noopener" href="https://www.zhihu.com/question/453193028/answer/1837974538">(3 封私信) 视觉Transformer如何优雅地避开位置编码？ - 知乎 (zhihu.com)</a></p>
<p><img src="http://image.lijitao.top//20211118145932.png" alt=" "></p>
<p><strong>为什么需要位置编码？</strong></p>
<p>没有特别好的解释，大部分都是以果推因。对于Position Embedding作者也有做一系列对比试验，**在源码中默认使用的是<code>1D Pos. Emb.</code>**，对比不使用Position Embedding准确率提升了大概3个点，和<code>2D Pos. Emb.</code>比起来没太大差别。</p>
<p><img src="http://image.lijitao.top//20211118205625.png" alt=" "></p>
<h2 id="二、Encoder和MLP-Block"><a href="#二、Encoder和MLP-Block" class="headerlink" title="二、Encoder和MLP Block"></a>二、Encoder和MLP Block</h2><h4 id="Encoder"><a href="#Encoder" class="headerlink" title="Encoder"></a>Encoder</h4><p><img src="http://image.lijitao.top//20211118210020.png" alt=" "></p>
<p>Norm提前、没有pad符号</p>
<p><img src="http://image.lijitao.top//20211118210824.png" alt=" "></p>
<p>Transformer Encoder其实就是重复堆叠Encoder Block N次，主要由以下几部分组成：</p>
<p> <img src="http://image.lijitao.top//20211118213427.png" alt=" "></p>
<ul>
<li><p>Layer Norm，这种Normalization方法主要是针对NLP领域提出的，这里是对每个token进行Norm处理</p>
</li>
<li><p>Multi-Head Attention，这个结构之前在讲Transformer中很详细的讲过，不在赘述</p>
</li>
<li><p>Dropout/DropPath，在原论文的代码中是直接使用的Dropout层，在但rwightman实现的代码中使用的DropPath（stochastic depth），可能后者会更好一点。主要用在残差网络中，因为网络的有些部分是冗余的。</p>
</li>
<li><p>MLP Block，就是全连接+GELU激活函数+Dropout组成也非常简单，需要注意的是第一个全连接层会把输入节点个数翻4倍[197, 768] -&gt; [197, 3072]，第二个全连接层会还原回原节点个数<code>[197, 3072] -&gt; [197, 768]</code></p>
</li>
</ul>
<h4 id="MLP-Head"><a href="#MLP-Head" class="headerlink" title="MLP Head"></a>MLP Head</h4><p>上面通过Transformer Encoder后输出的shape和输入的shape是保持不变的，以ViT-B/16为例，输入的是<code>[197, 768]</code>输出的还是<code>[197, 768]</code>。注意，在Transformer Encoder后其实还有一个Layer Norm没有画出来，后面有我自己画的ViT的模型可以看到详细结构。这里我们只是需要分类的信息，所以我们只需要提取出[class]token生成的对应结果就行，即<code>[197, 768]</code>中抽取出[class]token对应的<code>[1, 768]</code>。接着我们通过MLP Head得到我们最终的分类结果。MLP Head原论文中说在训练ImageNet21K时是由Linear+tanh激活函数+Linear组成。但是迁移到ImageNet1K上或者你自己的数据上时，只用一个Linear即可。</p>
<p><img src="https://img-blog.csdnimg.cn/20210626143208683.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzM3NTQxMDk3,size_16,color_FFFFFF,t_70#pic_center" alt=" "></p>
<p><img src="http://image.lijitao.top//20211118213945.png" alt=" "></p>
<p><strong>九个token出出来的结果都不用吗，那它存在的意义是什么?</strong></p>
<p>存在的意义在于帮助学习网络权重。帮助的方式就是以自监督的方式预测掩盖的token。 真正inference用的时候不用掩盖，也就不用管它了。</p>
<h2 id="三、结果"><a href="#三、结果" class="headerlink" title="三、结果"></a>三、结果</h2><p><img src="http://image.lijitao.top//20211119130840.png" alt="VIT的不同参数网络"></p>
<p>这个表是不同网络结构的参数。</p>
<p><img src="http://image.lijitao.top//20211118211638.png" alt="VIT结果"></p>
<p>Transformer最初提出是针对NLP领域的，并且在NLP领域大获成功。这篇论文也是受到其启发，尝试将Transformer应用到CV领域。关于Transformer的部分理论之前的博文中有讲，链接，这里不在赘述。通过这篇文章的实验，给出的最佳模型在ImageNet1K上能够达到88.55%的准确率（先在Google自家的JFT数据集上进行了预训练），说明Transformer在CV领域确实是有效的，而且效果还挺惊人。</p>
<h1 id="TransGAN-Two-Transformer-GAN-Make-One-Strong-GAN"><a href="#TransGAN-Two-Transformer-GAN-Make-One-Strong-GAN" class="headerlink" title="TransGAN: Two Transformer GAN Make One Strong GAN"></a>TransGAN: Two Transformer GAN Make One Strong GAN</h1><h2 id="一、概述"><a href="#一、概述" class="headerlink" title="一、概述"></a>一、概述</h2><p>TransGAN没有卷积，是首个基于纯Transformer的GAN网络。</p>
<p>最近对Transformer的爆炸性兴趣表明，它们有潜力成为计算机视觉任务（例如分类，检测和分割）的强大“通用”模型。但是，Transformer还能走多远？他们准备好解决一些更难的视觉任务，例如生成对抗网络（GAN）吗？</p>
<p>在这种好奇心的驱使下，这篇论文进行了第一个试点研究，即仅使用基于Transformer的架构来构建GAN ，完全没有卷积！</p>
<p>GAN架构称为TransGAN，由一个基于memory的基于转换器的生成器和一个基于转换器的patch-level鉴别器组成，该生成器在减小嵌入尺寸的同时逐步提高特征分辨率。</p>
<p><img src="http://image.lijitao.top//202111221114848.png" alt="image-20211122111411755"></p>
<p>总的来说，论文的核心是：</p>
<ol>
<li>新的GAN网络结构，仅使用Transformer不包含任何卷积</li>
</ol>
<blockquote>
<p>内存友好的生成器：逐渐增加特征分辨率，同时减少embedding维度，pixelshuffle modul<br>patch-level判别器</p>
</blockquote>
<ol start="2">
<li>训练技巧</li>
</ol>
<blockquote>
<p>数据增强<br>生成器的自监督多任务协同训练<br>自注意力局部初始化， 强调自然图像的邻域平滑度</p>
</blockquote>
<ol start="3">
<li>表现</li>
</ol>
<blockquote>
<p>STL-10 新SOTA，CIFAR-10仅略逊StyleGAN v2</p>
</blockquote>
<h2 id="二、模型"><a href="#二、模型" class="headerlink" title="二、模型"></a>二、模型</h2><p>略微修改的Transformer encoder作为<strong>基础模块</strong>。模块由两部分组成。下半部分是multi-head self-attention模块。上半部分是前向MLP使用GELU激活函数。在两部分前都进行layer normalization，且使用残差连接。</p>
<h3 id="内存友好的生成器"><a href="#内存友好的生成器" class="headerlink" title="内存友好的生成器"></a>内存友好的生成器</h3><p>如果使用Transformer逐个像素生成图片，即使32x32分辨率的图像也会产生一个长度为1024的序列。并且自注意力的成本更是爆炸的序列长度的平方。</p>
<p>受CNN启发，TransGAN generator选择<strong>分阶段迭代提高分辨率</strong>。</p>
<p>上图左侧， 随机噪声作为输入，经过MLP产生{8 x 8 x C}维度的向量。64个C维度的tokens和可学习的positional encoding作为1D序列输入到Encoder中。</p>
<p><a href="https://link.zhihu.com/?target=https://arxiv.org/abs/1609.05158">Pixelshuffle module</a>：一个column的features展开得到高分辨率图像。</p>
<p><img src="http://image.lijitao.top//202111221136411.png" alt=" "></p>
<p>上采样先把1D的tokens嵌入2D特征图，$X_0 \in R_{}^{H×W×C}$之后使用pixelshuffle module增加特征分辨率，同时减少embedding维度得到$X_{0}{‘} \in R{}^{2H  \times 2W \times C / 4 }$。 再将2D特征图嵌入1D序列的tokens。此权衡减轻了内存和计算压力。重复多个阶段最终得到 $Y \in R{}^{H_T \times W_T \times 3}$的RGB图像。</p>
<h3 id="Patch-level判别器"><a href="#Patch-level判别器" class="headerlink" title="Patch-level判别器"></a>Patch-level判别器</h3><p>判别器同VIT结构一样，将生成图的图片$Y \in R{}^{H \times W \times 3}$分割成8×8个patch作为输入。每个patch经过网络压缩成64个C维tokens, 然后和可学习的positional encoding组成序列，再在序列的第一个位置附上[cls]token。通过transformer encoders， 用[cls] token的输出做分类。之后作者尝试了将TranGAN和AutoGAN（含卷积）的D和G交叉结合。</p>
<p><img src="http://image.lijitao.top//202111221550895.png" alt="image-20211122155025801"></p>
<h2 id="三、训练技巧"><a href="#三、训练技巧" class="headerlink" title="三、训练技巧"></a>三、训练技巧</h2><h3 id="数据增强"><a href="#数据增强" class="headerlink" title="数据增强"></a>数据增强</h3><p>从上节表中可以看出，无论是基于CNN还是Transformer的生成器，TranGAN的<strong>判别器</strong>训练的效果都不好。由于Transformer是一个去除人为设计偏差的通用模型，只有数据量特别大的时候才能超越CNN的表现。</p>
<p><img src="http://image.lijitao.top//202111221714253.jpeg" alt="img"></p>
<p>通常GAN是不使用数据增强的。但受 “few-shot”训练GAN的启发，作者认为不同类型的强大数据增强可以帮助有效训练ViT。本文使用的数据增强方法是**<a href="https://link.zhihu.com/?target=https://arxiv.org/abs/2006.10738">DiffAug</a>**，即反向传播update生成器的时候数据增强T的梯度也会计算在内（T必须可微）。</p>
<p>作者在多个SOTA GAN中尝试了DiffAug。显然数据量对Transformer有一个<strong>很大的提升</strong>。</p>
<p><img src="http://image.lijitao.top//202111211559348.png" alt=" "></p>
<h3 id="生成器的自监督多任务协同训练"><a href="#生成器的自监督多任务协同训练" class="headerlink" title="生成器的自监督多任务协同训练"></a>生成器的自监督多任务协同训练</h3><p><img src="http://image.lijitao.top//202111221722576.jpg" alt=" "></p>
<p>超分辨率辅助任务，作者将真实图像视为高分辨率，并将其缩小以获得对应的低分辨率图片。$\lambda * L_{SR}$ 被添加到<strong>GAN loss</strong>中（λ设为50，$L_{SR}$ 是MSE)。提升了上采样的能力。</p>
<p><img src="http://image.lijitao.top//202111211612436.png" alt=" "></p>
<h3 id="自注意力局部初始化"><a href="#自注意力局部初始化" class="headerlink" title="自注意力局部初始化"></a>自注意力局部初始化</h3><p><img src="http://image.lijitao.top//202111211601134.png" alt=" "></p>
<p>CNN架构具有内置的自然图像平滑度，据信是有助于自然图像的产生。这是具有完全学习灵活性的Transformer架构所缺乏的。 作者提出了一个mask，每个query只允许与其局部不被masked邻居互动。在训练过程中，逐渐减少mask直至消失。最终自注意力是全局的。</p>
<p>作者：卷积神经网络经常关注局部感受问题，比如我们使用maxpooling 方法来提高感受野等。但是transformer的一个特点是“天涯若比邻”，已经不需要讨论感受野的问题了。每个token会与其他所有token交互。但是当我想生成狗的鼻子的是时候，我更关注的是鼻子附近的区域，其他位置比如草地、天空，和该任务关系不大。</p>
<p>虽然不存在感受野的问题，但是它缺失了CNN的关注局部的特性。</p>
<p>但是使用局部限制策略，又会影响它的全局优势。所以一开始使用mask，使它更关注局部。随着训练的进行，增大mask的windows size，关注的氛围越来越大。只到训练结束，会达到全局。</p>
<h3 id="更大的模型"><a href="#更大的模型" class="headerlink" title="更大的模型"></a>更大的模型</h3><p><img src="http://image.lijitao.top//202111211615151.png" alt=" "></p>
<p>对D进行加深，没有得到很好的提升，这一点和biggan是相同的，提升主要来源于G。</p>
<p><img src="http://image.lijitao.top//202111211616266.png" alt=" "></p>
<h1 id="TransUnet：-Transformer和Unet的强强联合"><a href="#TransUnet：-Transformer和Unet的强强联合" class="headerlink" title="TransUnet： Transformer和Unet的强强联合"></a>TransUnet： Transformer和Unet的强强联合</h1><p>论文地址：<a target="_blank" rel="noopener" href="https://arxiv.org/abs/2102.04306">https://arxiv.org/abs/2102.04306</a></p>
<p>开源代码：<a target="_blank" rel="noopener" href="https://github.com/Beckschen/TransUNet.git">https://github.com/Beckschen/TransUNet.git</a></p>
<p>3D实现及其他：<a target="_blank" rel="noopener" href="https://github.com/The-AI-Summer/self-attention-cv.git">https://github.com/The-AI-Summer/self-attention-cv.git</a></p>
<h2 id="一、Introdution"><a href="#一、Introdution" class="headerlink" title="一、Introdution"></a>一、Introdution</h2><ul>
<li>Unet是目前语义分割网络常用的网络架构，但是由于卷积固有的局部性特征，基于卷积的方法不能有效建模长程关系。</li>
<li>Transformers是为sequence to sequence的预测设计的方法，不仅对于全文建模有很大的优势，而且在大规模的预训练中，它还表现出对下游任务的优越的可转移性。</li>
<li>文章提出Unet和Transformers结合的TransUNet。</li>
</ul>
<p><img src="http://image.lijitao.top//202111251439979.png" alt="image-20211125143919927"></p>

                
            </div>
            <hr/>

            

    <div class="reprint" id="reprint-statement">
        
            <div class="reprint__author">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-user">
                        文章作者:
                    </i>
                </span>
                <span class="reprint-info">
                    <a href="/about" rel="external nofollow noreferrer">清欢</a>
                </span>
            </div>
            <div class="reprint__type">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-link">
                        文章链接:
                    </i>
                </span>
                <span class="reprint-info">
                    <a href="https://lijitao.top/2021/11/20/transformer/">https://lijitao.top/2021/11/20/transformer/</a>
                </span>
            </div>
            <div class="reprint__notice">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-copyright">
                        版权声明:
                    </i>
                </span>
                <span class="reprint-info">
                    本博客所有文章除特別声明外，均采用
                    <a href="https://creativecommons.org/licenses/by/4.0/deed.zh" rel="external nofollow noreferrer" target="_blank">CC BY 4.0</a>
                    许可协议。转载请注明来源
                    <a href="/about" target="_blank">清欢</a>
                    !
                </span>
            </div>
        
    </div>

    <script async defer>
      document.addEventListener("copy", function (e) {
        let toastHTML = '<span>复制成功，请遵循本文的转载规则</span><button class="btn-flat toast-action" onclick="navToReprintStatement()" style="font-size: smaller">查看</a>';
        M.toast({html: toastHTML})
      });

      function navToReprintStatement() {
        $("html, body").animate({scrollTop: $("#reprint-statement").offset().top - 80}, 800);
      }
    </script>



            <div class="tag_share" style="display: block;">
                <div class="post-meta__tag-list" style="display: inline-block;">
                    
                        <div class="article-tag">
                            
                                <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">
                                    <span class="chip bg-color">深度学习</span>
                                </a>
                            
                                <a href="/tags/%E8%AE%A1%E7%AE%97%E6%9C%BA%E8%A7%86%E8%A7%89/">
                                    <span class="chip bg-color">计算机视觉</span>
                                </a>
                            
                                <a href="/tags/Transformer/">
                                    <span class="chip bg-color">Transformer</span>
                                </a>
                            
                        </div>
                    
                </div>
                <div class="post_share" style="zoom: 80%; width: fit-content; display: inline-block; float: right; margin: -0.15rem 0;">
                    <link rel="stylesheet" type="text/css" href="/libs/share/css/share.min.css">
<div id="article-share">

    
    <div class="social-share" data-sites="twitter,facebook,google,qq,qzone,wechat,weibo,douban,linkedin" data-wechat-qrcode-helper="<p>微信扫一扫即可分享！</p>"></div>
    <script src="/libs/share/js/social-share.min.js"></script>
    

    

</div>

                </div>
            </div>
            
                <style>
    #reward {
        margin: 40px 0;
        text-align: center;
    }

    #reward .reward-link {
        font-size: 1.4rem;
        line-height: 38px;
    }

    #reward .btn-floating:hover {
        box-shadow: 0 6px 12px rgba(0, 0, 0, 0.2), 0 5px 15px rgba(0, 0, 0, 0.2);
    }

    #rewardModal {
        width: 320px;
        height: 350px;
    }

    #rewardModal .reward-title {
        margin: 15px auto;
        padding-bottom: 5px;
    }

    #rewardModal .modal-content {
        padding: 10px;
    }

    #rewardModal .close {
        position: absolute;
        right: 15px;
        top: 15px;
        color: rgba(0, 0, 0, 0.5);
        font-size: 1.3rem;
        line-height: 20px;
        cursor: pointer;
    }

    #rewardModal .close:hover {
        color: #ef5350;
        transform: scale(1.3);
        -moz-transform:scale(1.3);
        -webkit-transform:scale(1.3);
        -o-transform:scale(1.3);
    }

    #rewardModal .reward-tabs {
        margin: 0 auto;
        width: 210px;
    }

    .reward-tabs .tabs {
        height: 38px;
        margin: 10px auto;
        padding-left: 0;
    }

    .reward-content ul {
        padding-left: 0 !important;
    }

    .reward-tabs .tabs .tab {
        height: 38px;
        line-height: 38px;
    }

    .reward-tabs .tab a {
        color: #fff;
        background-color: #ccc;
    }

    .reward-tabs .tab a:hover {
        background-color: #ccc;
        color: #fff;
    }

    .reward-tabs .wechat-tab .active {
        color: #fff !important;
        background-color: #22AB38 !important;
    }

    .reward-tabs .alipay-tab .active {
        color: #fff !important;
        background-color: #019FE8 !important;
    }

    .reward-tabs .reward-img {
        width: 210px;
        height: 210px;
    }
</style>

<div id="reward">
    <a href="#rewardModal" class="reward-link modal-trigger btn-floating btn-medium waves-effect waves-light red">赏</a>

    <!-- Modal Structure -->
    <div id="rewardModal" class="modal">
        <div class="modal-content">
            <a class="close modal-close"><i class="fas fa-times"></i></a>
            <h4 class="reward-title">你的赏识是我前进的动力</h4>
            <div class="reward-content">
                <div class="reward-tabs">
                    <ul class="tabs row">
                        <li class="tab col s6 alipay-tab waves-effect waves-light"><a href="#alipay">支付宝</a></li>
                        <li class="tab col s6 wechat-tab waves-effect waves-light"><a href="#wechat">微 信</a></li>
                    </ul>
                    <div id="alipay">
                        <img src="/medias/reward/alipay.jpg" class="reward-img" alt="支付宝打赏二维码">
                    </div>
                    <div id="wechat">
                        <img src="/medias/reward/wechat.png" class="reward-img" alt="微信打赏二维码">
                    </div>
                </div>
            </div>
        </div>
    </div>
</div>

<script>
    $(function () {
        $('.tabs').tabs();
    });
</script>

            
        </div>
    </div>

    

    

    

    

    

    

    

    

    

<article id="prenext-posts" class="prev-next articles">
    <div class="row article-row">
        
        <div class="article col s12 m6" data-aos="fade-up">
            <div class="article-badge left-badge text-color">
                <i class="fas fa-chevron-left"></i>&nbsp;上一篇</div>
            <div class="card">
                <a href="/2021/11/29/python-xue-xi-bi-ji/">
                    <div class="card-image">
                        
                        
                        <img src="/medias/featureimages/22.jpg" class="responsive-img" alt="Python学习笔记">
                        
                        <span class="card-title">Python学习笔记</span>
                    </div>
                </a>
                <div class="card-content article-content">
                    <div class="summary block-with-text">
                        
                            Python学习笔记记录
                        
                    </div>
                    <div class="publish-info">
                        <span class="publish-date">
                            <i class="far fa-clock fa-fw icon-date"></i>2021-11-29
                        </span>
                        <span class="publish-author">
                            
                            <i class="fas fa-bookmark fa-fw icon-category"></i>
                            
                            <a href="/categories/Python/" class="post-category">
                                    Python
                                </a>
                            
                            
                        </span>
                    </div>
                </div>
                
                <div class="card-action article-tags">
                    
                    <a href="/tags/Python/">
                        <span class="chip bg-color">Python</span>
                    </a>
                    
                </div>
                
            </div>
        </div>
        
        
        <div class="article col s12 m6" data-aos="fade-up">
            <div class="article-badge right-badge text-color">
                下一篇&nbsp;<i class="fas fa-chevron-right"></i>
            </div>
            <div class="card">
                <a href="/2021/11/13/wu-lian-wang-ji-zhu-zai-nong-ye-zhong-de-ying-yong/">
                    <div class="card-image">
                        
                        
                        <img src="/medias/featureimages/0.jpg" class="responsive-img" alt="">
                        
                        <span class="card-title"></span>
                    </div>
                </a>
                <div class="card-content article-content">
                    <div class="summary block-with-text">
                        
                            
                        
                    </div>
                    <div class="publish-info">
                            <span class="publish-date">
                                <i class="far fa-clock fa-fw icon-date"></i>2021-11-13
                            </span>
                        <span class="publish-author">
                            
                            <i class="fas fa-user fa-fw"></i>
                            清欢
                            
                        </span>
                    </div>
                </div>
                
            </div>
        </div>
        
    </div>
</article>

</div>



<!-- 代码块功能依赖 -->
<script type="text/javascript" src="/libs/codeBlock/codeBlockFuction.js"></script>

<!-- 代码语言 -->

<script type="text/javascript" src="/libs/codeBlock/codeLang.js"></script>


<!-- 代码块复制 -->

<script type="text/javascript" src="/libs/codeBlock/codeCopy.js"></script>


<!-- 代码块收缩 -->

<script type="text/javascript" src="/libs/codeBlock/codeShrink.js"></script>


    </div>
    <div id="toc-aside" class="expanded col l3 hide-on-med-and-down">
        <div class="toc-widget card" style="background-color: white;">
            <div class="toc-title"><i class="far fa-list-alt"></i>&nbsp;&nbsp;目录</div>
            <div id="toc-content"></div>
        </div>
    </div>
</div>

<!-- TOC 悬浮按钮. -->

<div id="floating-toc-btn" class="hide-on-med-and-down">
    <a class="btn-floating btn-large bg-color">
        <i class="fas fa-list-ul"></i>
    </a>
</div>


<script src="/libs/tocbot/tocbot.min.js"></script>
<script>
    $(function () {
        tocbot.init({
            tocSelector: '#toc-content',
            contentSelector: '#articleContent',
            headingsOffset: -($(window).height() * 0.4 - 45),
            collapseDepth: Number('0'),
            headingSelector: 'h2, h3, h4, h5, h6'
        });

        // modify the toc link href to support Chinese.
        let i = 0;
        let tocHeading = 'toc-heading-';
        $('#toc-content a').each(function () {
            $(this).attr('href', '#' + tocHeading + (++i));
        });

        // modify the heading title id to support Chinese.
        i = 0;
        $('#articleContent').children('h2, h3, h4, h5, h6').each(function () {
            $(this).attr('id', tocHeading + (++i));
        });

        // Set scroll toc fixed.
        let tocHeight = parseInt($(window).height() * 0.4 - 64);
        let $tocWidget = $('.toc-widget');
        $(window).scroll(function () {
            let scroll = $(window).scrollTop();
            /* add post toc fixed. */
            if (scroll > tocHeight) {
                $tocWidget.addClass('toc-fixed');
            } else {
                $tocWidget.removeClass('toc-fixed');
            }
        });

        
        /* 修复文章卡片 div 的宽度. */
        let fixPostCardWidth = function (srcId, targetId) {
            let srcDiv = $('#' + srcId);
            if (srcDiv.length === 0) {
                return;
            }

            let w = srcDiv.width();
            if (w >= 450) {
                w = w + 21;
            } else if (w >= 350 && w < 450) {
                w = w + 18;
            } else if (w >= 300 && w < 350) {
                w = w + 16;
            } else {
                w = w + 14;
            }
            $('#' + targetId).width(w);
        };

        // 切换TOC目录展开收缩的相关操作.
        const expandedClass = 'expanded';
        let $tocAside = $('#toc-aside');
        let $mainContent = $('#main-content');
        $('#floating-toc-btn .btn-floating').click(function () {
            if ($tocAside.hasClass(expandedClass)) {
                $tocAside.removeClass(expandedClass).hide();
                $mainContent.removeClass('l9');
            } else {
                $tocAside.addClass(expandedClass).show();
                $mainContent.addClass('l9');
            }
            fixPostCardWidth('artDetail', 'prenext-posts');
        });
        
    });
</script>

    

</main>


<script src="https://cdn.bootcss.com/mathjax/2.7.5/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script>
    MathJax.Hub.Config({
        tex2jax: {inlineMath: [['$', '$'], ['\\(', '\\)']]}
    });
</script>



    <footer class="page-footer bg-color">
    

    <div class="container row center-align"
         style="margin-bottom: 15px !important;">
        <div class="col s12 m8 l8 copy-right">
            Copyright&nbsp;&copy;
            
                <span id="year">2020-2022</span>
            
            <a href="/about" target="_blank">清欢</a>
            |&nbsp;Powered by&nbsp;<a href="https://hexo.io/" target="_blank">Hexo</a>
            |&nbsp;Theme&nbsp;<a href="https://github.com/blinkfox/hexo-theme-matery" target="_blank">Matery</a>
            <br>
            
            
            
                
            
            
                <span id="busuanzi_container_site_pv">
                &nbsp;|&nbsp;<i class="far fa-eye"></i>&nbsp;总访问量:&nbsp;
                    <span id="busuanzi_value_site_pv" class="white-color"></span>
            </span>
            
            
                <span id="busuanzi_container_site_uv">
                &nbsp;|&nbsp;<i class="fas fa-users"></i>&nbsp;总访问人数:&nbsp;
                    <span id="busuanzi_value_site_uv" class="white-color"></span>
            </span>
            
            <br>

            <!-- 运行天数提醒. -->
            
                <span id="sitetime"> Loading ...</span>
                <script>
                    var calcSiteTime = function () {
                        var seconds = 1000;
                        var minutes = seconds * 60;
                        var hours = minutes * 60;
                        var days = hours * 24;
                        var years = days * 365;
                        var today = new Date();
                        var startYear = "2020";
                        var startMonth = "11";
                        var startDate = "21";
                        var startHour = "0";
                        var startMinute = "0";
                        var startSecond = "0";
                        var todayYear = today.getFullYear();
                        var todayMonth = today.getMonth() + 1;
                        var todayDate = today.getDate();
                        var todayHour = today.getHours();
                        var todayMinute = today.getMinutes();
                        var todaySecond = today.getSeconds();
                        var t1 = Date.UTC(startYear, startMonth, startDate, startHour, startMinute, startSecond);
                        var t2 = Date.UTC(todayYear, todayMonth, todayDate, todayHour, todayMinute, todaySecond);
                        var diff = t2 - t1;
                        var diffYears = Math.floor(diff / years);
                        var diffDays = Math.floor((diff / days) - diffYears * 365);

                        // 区分是否有年份.
                        var language = 'zh-CN';
                        if (startYear === String(todayYear)) {
                            document.getElementById("year").innerHTML = todayYear;
                            var daysTip = 'This site has been running for ' + diffDays + ' days';
                            if (language === 'zh-CN') {
                                daysTip = '本站已运行 ' + diffDays + ' 天';
                            } else if (language === 'zh-HK') {
                                daysTip = '本站已運行 ' + diffDays + ' 天';
                            }
                            document.getElementById("sitetime").innerHTML = daysTip;
                        } else {
                            document.getElementById("year").innerHTML = startYear + " - " + todayYear;
                            var yearsAndDaysTip = 'This site has been running for ' + diffYears + ' years and '
                                + diffDays + ' days';
                            if (language === 'zh-CN') {
                                yearsAndDaysTip = '本站已运行 ' + diffYears + ' 年 ' + diffDays + ' 天';
                            } else if (language === 'zh-HK') {
                                yearsAndDaysTip = '本站已運行 ' + diffYears + ' 年 ' + diffDays + ' 天';
                            }
                            document.getElementById("sitetime").innerHTML = yearsAndDaysTip;
                        }
                    }

                    calcSiteTime();
                </script>
            
            <br>
            
                <span id="icp"><img src="/medias/icp.png"
                                    style="vertical-align: text-bottom;"/>
                <a href="http://beian.miit.gov.cn/" target="_blank">鲁ICP备19005295号-2</a>
            </span>
            
        </div>
        <div class="col s12 m4 l4 social-link social-statis">
    <a href="https://github.com/jinxiqinghuan" class="tooltipped" target="_blank" data-tooltip="访问我的GitHub" data-position="top" data-delay="50">
        <i class="fab fa-github"></i>
    </a>



    <a href="mailto:1196680037@qq.com" class="tooltipped" target="_blank" data-tooltip="邮件联系我" data-position="top" data-delay="50">
        <i class="fas fa-envelope-open"></i>
    </a>





    <a href="https://twitter.com/realljt" class="tooltipped" target="_blank" data-tooltip="关注我的Twitter: https://twitter.com/realljt" data-position="top" data-delay="50">
        <i class="fab fa-twitter"></i>
    </a>



    <a href="tencent://AddContact/?fromId=50&fromSubId=1&subcmd=all&uin=1196680037" class="tooltipped" target="_blank" data-tooltip="QQ联系我: 1196680037" data-position="top" data-delay="50">
        <i class="fab fa-qq"></i>
    </a>







    <a href="/atom.xml" class="tooltipped" target="_blank" data-tooltip="RSS 订阅" data-position="top" data-delay="50">
        <i class="fas fa-rss"></i>
    </a>

</div>
    </div>
</footer>

<div class="progress-bar"></div>


    <!-- 搜索遮罩框 -->
<div id="searchModal" class="modal">
    <div class="modal-content">
        <div class="search-header">
            <span class="title"><i class="fas fa-search"></i>&nbsp;&nbsp;搜索</span>
            <input type="search" id="searchInput" name="s" placeholder="请输入搜索的关键字"
                   class="search-input">
        </div>
        <div id="searchResult"></div>
    </div>
</div>

<script type="text/javascript">
$(function () {
    var searchFunc = function (path, search_id, content_id) {
        'use strict';
        $.ajax({
            url: path,
            dataType: "xml",
            success: function (xmlResponse) {
                // get the contents from search data
                var datas = $("entry", xmlResponse).map(function () {
                    return {
                        title: $("title", this).text(),
                        content: $("content", this).text(),
                        url: $("url", this).text()
                    };
                }).get();
                var $input = document.getElementById(search_id);
                var $resultContent = document.getElementById(content_id);
                $input.addEventListener('input', function () {
                    var str = '<ul class=\"search-result-list\">';
                    var keywords = this.value.trim().toLowerCase().split(/[\s\-]+/);
                    $resultContent.innerHTML = "";
                    if (this.value.trim().length <= 0) {
                        return;
                    }
                    // perform local searching
                    datas.forEach(function (data) {
                        var isMatch = true;
                        var data_title = data.title.trim().toLowerCase();
                        var data_content = data.content.trim().replace(/<[^>]+>/g, "").toLowerCase();
                        var data_url = data.url;
                        data_url = data_url.indexOf('/') === 0 ? data.url : '/' + data_url;
                        var index_title = -1;
                        var index_content = -1;
                        var first_occur = -1;
                        // only match artiles with not empty titles and contents
                        if (data_title !== '' && data_content !== '') {
                            keywords.forEach(function (keyword, i) {
                                index_title = data_title.indexOf(keyword);
                                index_content = data_content.indexOf(keyword);
                                if (index_title < 0 && index_content < 0) {
                                    isMatch = false;
                                } else {
                                    if (index_content < 0) {
                                        index_content = 0;
                                    }
                                    if (i === 0) {
                                        first_occur = index_content;
                                    }
                                }
                            });
                        }
                        // show search results
                        if (isMatch) {
                            str += "<li><a href='" + data_url + "' class='search-result-title'>" + data_title + "</a>";
                            var content = data.content.trim().replace(/<[^>]+>/g, "");
                            if (first_occur >= 0) {
                                // cut out 100 characters
                                var start = first_occur - 20;
                                var end = first_occur + 80;
                                if (start < 0) {
                                    start = 0;
                                }
                                if (start === 0) {
                                    end = 100;
                                }
                                if (end > content.length) {
                                    end = content.length;
                                }
                                var match_content = content.substr(start, end);
                                // highlight all keywords
                                keywords.forEach(function (keyword) {
                                    var regS = new RegExp(keyword, "gi");
                                    match_content = match_content.replace(regS, "<em class=\"search-keyword\">" + keyword + "</em>");
                                });

                                str += "<p class=\"search-result\">" + match_content + "...</p>"
                            }
                            str += "</li>";
                        }
                    });
                    str += "</ul>";
                    $resultContent.innerHTML = str;
                });
            }
        });
    };

    searchFunc('/search.xml', 'searchInput', 'searchResult');
});
</script>

    <!-- 回到顶部按钮 -->
<div id="backTop" class="top-scroll">
    <a class="btn-floating btn-large waves-effect waves-light" href="#!">
        <i class="fas fa-arrow-up"></i>
    </a>
</div>


    <script src="/libs/materialize/materialize.min.js"></script>
    <script src="/libs/masonry/masonry.pkgd.min.js"></script>
    <script src="/libs/aos/aos.js"></script>
    <script src="/libs/scrollprogress/scrollProgress.min.js"></script>
    <script src="/libs/lightGallery/js/lightgallery-all.min.js"></script>
    <script src="/js/matery.js"></script>

    

    

    <!-- 雪花特效 -->
     
        <script type="text/javascript">
            // 只在桌面版网页启用特效
            var windowWidth = $(window).width();
            if (windowWidth > 768) {
                document.write('<script type="text/javascript" src="/libs/others/snow.js"><\/script>');
            }
        </script>
    

    <!-- 鼠标星星特效 -->
    

     
        <script src="https://ssl.captcha.qq.com/TCaptcha.js"></script>
        <script src="/libs/others/TencentCaptcha.js"></script>
        <button id="TencentCaptcha" data-appid="xxxxxxxxxx" data-cbfn="callback" type="button" hidden></button>
    

    <!-- Baidu Analytics -->

    <!-- Baidu Push -->

<script>
    (function () {
        var bp = document.createElement('script');
        var curProtocol = window.location.protocol.split(':')[0];
        if (curProtocol === 'https') {
            bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
        } else {
            bp.src = 'http://push.zhanzhang.baidu.com/push.js';
        }
        var s = document.getElementsByTagName("script")[0];
        s.parentNode.insertBefore(bp, s);
    })();
</script>

    
    <script src="/libs/others/clicklove.js" async="async"></script>
    
    
    <script async src="/libs/others/busuanzi.pure.mini.js"></script>
    

    

    

    <!--腾讯兔小巢-->
    
    

    

    

    
    <script src="/libs/instantpage/instantpage.js" type="module"></script>
    

</body>

</html>
