<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-CN">
<head>
  <meta charset="UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"/>
<meta name="theme-color" content="#222">









<meta http-equiv="Cache-Control" content="no-transform" />
<meta http-equiv="Cache-Control" content="no-siteapp" />















  
  
  <link href="/lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css" />




  
  
  
  

  
    
    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
    
    <link href="//fonts.googleapis.com/css?family=Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Inziu Iosevka Slab SC:300,300italic,400,400italic,700,700italic&subset=latin,latin-ext" rel="stylesheet" type="text/css">
  






<link href="/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css" />

<link href="/css/main.css?v=5.1.2" rel="stylesheet" type="text/css" />


  <meta name="keywords" content="Keras," />








  <link rel="shortcut icon" type="image/x-icon" href="/favicon.ico?v=5.1.2" />






<meta name="description" content="Keras">
<meta name="keywords" content="Keras">
<meta property="og:type" content="article">
<meta property="og:title" content="Keras Notes">
<meta property="og:url" content="http://idmk.oschina.io/2017/09/22/Keras-Notes/index.html">
<meta property="og:site_name" content="苦舟">
<meta property="og:description" content="Keras">
<meta property="og:locale" content="zh-CN">
<meta property="og:image" content="http://idmk.oschina.io/2017/09/22/Keras-Notes/markdown-img-paste-20170923095039320.png">
<meta property="og:image" content="http://idmk.oschina.io/2017/09/22/Keras-Notes/markdown-img-paste-20170923093334884.png">
<meta property="og:updated_time" content="2017-11-22T15:33:54.031Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="Keras Notes">
<meta name="twitter:description" content="Keras">
<meta name="twitter:image" content="http://idmk.oschina.io/2017/09/22/Keras-Notes/markdown-img-paste-20170923095039320.png">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/',
    scheme: 'Gemini',
    sidebar: {"position":"left","display":"hide","offset":12,"offset_float":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: true,
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="http://idmk.oschina.io/2017/09/22/Keras-Notes/"/>





  <title>Keras Notes | 苦舟</title>
  














</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-CN">

  
  
    
  

  <div class="container sidebar-position-left page-post-detail ">
    <div class="headband"></div>

    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/"  class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">苦舟</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle">学海无涯，吾将上下求索。</p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br />
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-categories">
          <a href="/categories/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-th"></i> <br />
            
            分类
          </a>
        </li>
      
        
        <li class="menu-item menu-item-about">
          <a href="/about/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-user"></i> <br />
            
            关于
          </a>
        </li>
      
        
        <li class="menu-item menu-item-archives">
          <a href="/archives/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-archive"></i> <br />
            
            归档
          </a>
        </li>
      
        
        <li class="menu-item menu-item-tags">
          <a href="/tags/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-tags"></i> <br />
            
            标签
          </a>
        </li>
      
        
        <li class="menu-item menu-item-commonweal">
          <a href="/404.html" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-heartbeat"></i> <br />
            
            公益404
          </a>
        </li>
      

      
        <li class="menu-item menu-item-search">
          
            <a href="javascript:;" class="popup-trigger">
          
            
              <i class="menu-item-icon fa fa-search fa-fw"></i> <br />
            
            搜索
          </a>
        </li>
      
    </ul>
  

  
    <div class="site-search">
      
  <div class="popup search-popup local-search-popup">
  <div class="local-search-header clearfix">
    <span class="search-icon">
      <i class="fa fa-search"></i>
    </span>
    <span class="popup-btn-close">
      <i class="fa fa-times-circle"></i>
    </span>
    <div class="local-search-input-wrapper">
      <input autocomplete="off"
             placeholder="搜索..." spellcheck="false"
             type="text" id="local-search-input">
    </div>
  </div>
  <div id="local-search-result"></div>
</div>



    </div>
  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="http://idmk.oschina.io/2017/09/22/Keras-Notes/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="东木金">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/uploads/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="苦舟">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">Keras Notes</h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2017-09-22T09:59:07+08:00">
                2017-09-22
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/categories/DL/" itemprop="url" rel="index">
                    <span itemprop="name">DL</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
          

          
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <p>Keras<br><a id="more"></a></p>
<p>keras.models.Sequential ?<br>keras.layers.Dense, keras.layers.Dropout ?<br>keras.optimizers.RMSprop ?</p>
<h2 id="utils"><a href="#utils" class="headerlink" title="utils"></a>utils</h2><p><strong>to_categorical</strong><br><code>keras.utils.to_categorical(y_test, num_classes)</code><br><figure class="highlight plain"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div></pre></td><td class="code"><pre><div class="line">In [15]: y_test.shape</div><div class="line">Out[15]: (10000,)</div><div class="line"></div><div class="line">In [16]: num_classes</div><div class="line">Out[16]: 10</div><div class="line"></div><div class="line">In [17]: y_test = keras.utils.to_categorical(y_test, num_classes); y_test.shape</div><div class="line">Out[17]: (10000, 10)</div></pre></td></tr></table></figure></p>
<h2 id="Layers"><a href="#Layers" class="headerlink" title="Layers"></a>Layers</h2><h3 id="Dense"><a href="#Dense" class="headerlink" title="Dense"></a>Dense</h3><p><a href="https://keras.io/layers/core/#dense" target="_blank" rel="external">https://keras.io/layers/core/#dense</a><br>Just your regular densely-connected NN layer.<br><code>keras.layers.core.Dense(units, activation=None, use_bias=True, kernel_initializer=&#39;glorot_uniform&#39;, bias_initializer=&#39;zeros&#39;, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)</code></p>
<p><code>Dense</code> implements the operation: <code>output = activation(dot(input, kernel) + bias)</code> where <code>activation</code> is the element-wise activation function passed as the <code>activation</code> argument, <code>kernel</code> is <em>a weights matrix created by the layer</em>, and <code>bias</code> is <em>a bias vector created by the layer</em> (only applicable if use_bias is True).</p>
<p>Note: if the input to the layer has a rank greater than 2, then it is flattened prior to the initial dot product with kernel.</p>
<p><strong>Example</strong><br><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div></pre></td><td class="code"><pre><div class="line"><span class="comment"># as first layer in a sequential model:</span></div><div class="line">model = Sequential()</div><div class="line">model.add(Dense(<span class="number">32</span>, input_shape=(<span class="number">16</span>, )))</div><div class="line"><span class="comment"># now the model will take as input arrays of shape (*, 16)</span></div><div class="line"><span class="comment"># and output arrays of shape (*, 32)</span></div><div class="line"></div><div class="line"><span class="comment"># after the first layer, you don't need to specify the size of the input anymore:</span></div><div class="line">model.add(Dense(<span class="number">32</span>))</div></pre></td></tr></table></figure></p>
<p><strong>Arguments</strong></p>
<ul>
<li>units: Positive integer, dimensionality of the output space.</li>
<li>activation: Activation function to use (see activations). If you don’t specify anything, no activation is applied (ie. “linear” activation: a(x) = x).</li>
<li>use_bias: Boolean, whether the layer uses a bias vector.</li>
<li>kernel_initializer: Initializer for the kernel weights matrix (see initializers).</li>
<li>bias_initializer: Initializer for the bias vector (see initializers).</li>
<li>kernel_regularizer: Regularizer function applied to the kernel weights matrix (see regularizer).</li>
<li>bias_regularizer: Regularizer function applied to the bias vector (see regularizer).</li>
<li>activity_regularizer: Regularizer function applied to the output of the layer (its “activation”). (see regularizer).</li>
<li>kernel_constraint: Constraint function applied to the kernel weights matrix (see constraints).</li>
<li>bias_constraint: Constraint function applied to the bias vector (see constraints).</li>
</ul>
<p><strong>Input shape</strong><br>nD tensor with shape: (batch_size, …, input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).</p>
<p><strong>Output shape</strong><br>nD tensor with shape: (batch_size, …, units). For instance, for a 2D input with shape  (batch_size, input_dim), the output would have shape (batch_size, units).</p>
<h3 id="Activation"><a href="#Activation" class="headerlink" title="Activation"></a>Activation</h3><p><code>keras.layers.core.Activation(activation)</code>: activation: name of activation function to use (see: activations), or alternatively, a Theano or TensorFlow operation.<br>Applies an activation function to an output.</p>
<h3 id="Dropout"><a href="#Dropout" class="headerlink" title="Dropout"></a>Dropout</h3><p><code>keras.layers.core.Dropout(rate, noise_shape=None, seed=None)</code><br>Applies Dropout to the input.</p>
<p>Dropout consists in randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting.</p>
<p><strong>Arguments</strong></p>
<ul>
<li>rate: float between 0 and 1. Fraction of the input units to drop.</li>
<li>noise_shape: 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape  (batch_size, timesteps, features) and you want the dropout mask to be the same for all timesteps, you can use noise_shape=(batch_size, 1, features).</li>
<li>seed: A Python integer to use as random seed.</li>
</ul>
<h2 id="Models"><a href="#Models" class="headerlink" title="Models"></a>Models</h2><h3 id="Sequential"><a href="#Sequential" class="headerlink" title="Sequential"></a>Sequential</h3><h4 id="构造模型"><a href="#构造模型" class="headerlink" title="构造模型"></a>构造模型</h4><p><code>add(self, layer)</code><br>向模型中添加一个层，layer: Layer 对象<br><code>pop(self)</code><br>弹出模型最后的一层，无返回值</p>
<h4 id="compile"><a href="#compile" class="headerlink" title="compile"></a>compile</h4><p>Configures the learning process.<br><code>compile(self, optimizer, loss, metrics=None, sample_weight_mode=None, weighted_metrics=None)</code></p>
<p><strong>Example</strong></p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div></pre></td><td class="code"><pre><div class="line">model = Sequential()</div><div class="line">model.add(Dense(<span class="number">32</span>, input_shape=(<span class="number">500</span>,)))</div><div class="line">model.add(Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>))</div><div class="line">model.compile(optimizer=<span class="string">'rmsprop'</span>, loss=<span class="string">'categorical_crossentropy'</span>, metrics=[<span class="string">'accuracy'</span>])</div></pre></td></tr></table></figure>
<p>网络第一层需要指定 input_shape ，参数类型为 tuple ，值为样本特征的大小，不需要传入样本的数量。</p>
<p><strong>Arguments</strong></p>
<ul>
<li>optimizer: str (name of optimizer) or optimizer object. See optimizers.</li>
<li>loss: str (name of objective function) or objective function. See losses.</li>
<li>metrics: list of metrics to be evaluated by the model during training and testing. Typically you will use  metrics=[‘accuracy’]. See metrics.</li>
<li>sample_weight_mode: if you need to do timestep-wise sample weighting (2D weights), set this to “temporal”. “None” defaults to sample-wise weights (1D).</li>
<li>weighted_metrics: list of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing</li>
<li>**kwargs: for Theano/CNTK backends, these are passed into K.function. When using the TensorFlow backend, these are passed into tf.Session.run.</li>
</ul>
<h4 id="fit"><a href="#fit" class="headerlink" title="fit"></a>fit</h4><p>Trains the model for a fixed number of epochs.<br><code>fit(self, x, y, batch_size=32, epochs=10, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0)</code></p>
<p><strong>Arguments</strong><br>x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs).<br>y: labels, as a Numpy array.<br>batch_size: integer. Number of samples per gradient update.<br>epochs: integer, the number of epochs to train the model.<br>verbose: 0 for no logging to stdout, 1 for progress bar logging, 2 for one log line per epoch.<br>callbacks: list of keras.callbacks.Callback instances. List of callbacks to apply during training. See callbacks.<br>validation_split: float (0. &lt; x &lt; 1). Fraction of the data to use as held-out validation data.<br>validation_data: tuple (x_val, y_val) or tuple (x_val, y_val, val_sample_weights) to be used as held-out validation data. Will override validation_split.<br>shuffle: boolean or str (for ‘batch’). Whether to shuffle the samples at each epoch. ‘batch’ is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks.<br>class_weight: dictionary mapping classes to a weight value, used for scaling the loss function (during training only).<br>sample_weight: Numpy array of weights for the training samples, used for scaling the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode=”temporal” in compile().<br>initial_epoch: epoch at which to start training (useful for resuming a previous training run)</p>
<p><strong>Returns</strong><br>A History object. Its History.history attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable).</p>
<p><strong>Raises</strong><br>RuntimeError: if the model was never compiled.</p>
<h4 id="evaluate"><a href="#evaluate" class="headerlink" title="evaluate"></a>evaluate</h4><p>Computes the loss on some input data, batch by batch.<br><code>evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None)</code></p>
<p><strong>Arguments</strong></p>
<ul>
<li>x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs).</li>
<li>y: labels, as a Numpy array.</li>
<li>batch_size: integer. Number of samples per gradient update.</li>
<li>verbose: verbosity mode, 0 or 1.</li>
<li>sample_weight: sample weights, as a Numpy array.</li>
</ul>
<p><strong>Returns</strong><br>Scalar test loss (if the model has no metrics) or list of scalars (if the model computes other metrics). The attribute  <code>model.metrics_names</code> will give you the display labels for the scalar outputs.</p>
<h4 id="predict"><a href="#predict" class="headerlink" title="predict"></a>predict</h4><p>Generates output predictions for the input samples.<br><code>predict(self, x, batch_size=32, verbose=0)</code><br>The input samples are processed batch by batch.</p>
<h4 id="train-on-batch"><a href="#train-on-batch" class="headerlink" title="train_on_batch"></a>train_on_batch</h4><p><code>train_on_batch(self, x, y, class_weight=None, sample_weight=None)</code><br>本函数在一个 batch 的数据上进行一次参数更新</p>
<p>函数返回训练误差的标量值或标量值的 list，与 evaluate 的情形相同。</p>
<h4 id="test-on-batch"><a href="#test-on-batch" class="headerlink" title="test_on_batch"></a>test_on_batch</h4><p><code>test_on_batch(self, x, y, sample_weight=None)</code><br>本函数在一个 batch 的样本上对模型进行评估</p>
<p>函数的返回与 evaluate 的情形相同</p>
<h4 id="predict-on-batch"><a href="#predict-on-batch" class="headerlink" title="predict_on_batch"></a>predict_on_batch</h4><p><code>predict_on_batch(self, x)</code><br>本函数在一个 batch 的样本上对模型进行测试</p>
<p>函数返回模型在一个 batch 上的预测结果</p>
<h4 id="fit-generator"><a href="#fit-generator" class="headerlink" title="fit_generator"></a>fit_generator</h4><p><code>fit_generator(self, generator, steps_per_epoch, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_q_size=10, workers=1, pickle_safe=False, initial_epoch=0)</code></p>
<h3 id="Functional"><a href="#Functional" class="headerlink" title="Functional"></a>Functional</h3><p>它的类名是 Model，因此我们有时候也用 Model 来代表函数式模型。<br>Keras 函数式模型接口是用户定义多输出模型、非循环有向模型或具有共享层的模型等复杂模型的途径。一句话，只要你的模型不是类似 VGG 一样一条路走到黑的模型，或者你的模型需要多于一个的输出，那么你总应该选择函数式模型。函数式模型是最广泛的一类模型，序贯模型（Sequential）只是它的一种特殊情况。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Model</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Input, Dense</div><div class="line"></div><div class="line">a = Input(shape=(<span class="number">32</span>,))</div><div class="line">b = Dense(<span class="number">32</span>)(a)</div><div class="line">model = Model(inputs=a, outputs=b)</div><div class="line"><span class="comment"># 在这里，我们的模型以 a 为输入，以 b 为输出，同样我们可以构造拥有多输入和多输出的模型</span></div><div class="line">model = Model(inputs=[a1, a2], outputs=[b1, b3, b3])</div></pre></td></tr></table></figure>
<p>常用 Model 属性<br><code>model.layers</code>：组成模型图的各个层<br><code>model.inputs</code>：模型的输入张量列表<br><code>model.outputs</code>：模型的输出张量列表</p>
<h4 id="全连接网络"><a href="#全连接网络" class="headerlink" title="全连接网络"></a>全连接网络</h4><p>Sequential 当然是实现全连接网络的最好方式，但我们从简单的全连接网络开始，有助于我们学习这部分的内容。在开始前，有几个概念需要澄清：</p>
<p>层对象接受张量为参数，返回一个张量。<br>输入是张量，输出也是张量的一个框架就是一个模型，通过 Model 定义。<br>这样的模型可以被像 Keras 的 Sequential 一样被训练</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Input, Dense</div><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Model</div><div class="line"></div><div class="line"><span class="comment"># This returns a tensor</span></div><div class="line">inputs = Input(shape=(<span class="number">784</span>,))</div><div class="line"></div><div class="line"><span class="comment"># a layer instance is callable on a tensor, and returns a tensor</span></div><div class="line">x = Dense(<span class="number">64</span>, activation=<span class="string">'relu'</span>)(inputs)</div><div class="line">x = Dense(<span class="number">64</span>, activation=<span class="string">'relu'</span>)(x)</div><div class="line">predictions = Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>)(x)</div><div class="line"></div><div class="line"><span class="comment"># This creates a model that includes</span></div><div class="line"><span class="comment"># the Input layer and three Dense layers</span></div><div class="line">model = Model(inputs=inputs, outputs=predictions)</div><div class="line">model.compile(optimizer=<span class="string">'rmsprop'</span>, loss=<span class="string">'categorical_crossentropy'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line">model.fit(data, labels)  <span class="comment"># starts training</span></div></pre></td></tr></table></figure>
<h4 id="所有的模型都是可调用的，就像层一样"><a href="#所有的模型都是可调用的，就像层一样" class="headerlink" title="所有的模型都是可调用的，就像层一样"></a>所有的模型都是可调用的，就像层一样</h4><p>利用函数式模型的接口，我们可以很容易的重用已经训练好的模型：你可以把模型当作一个层一样，通过提供一个 tensor 来调用它。注意当你调用一个模型时，你不仅仅重用了它的结构，也重用了它的权重。<br><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div></pre></td><td class="code"><pre><div class="line">x = Input(shape=(<span class="number">784</span>,))</div><div class="line"><span class="comment"># This works, and returns the 10-way softmax we defined above.</span></div><div class="line">y = model(x)</div></pre></td></tr></table></figure></p>
<p>这种方式可以允许你快速的创建能处理序列信号的模型，你可以很快将一个图像分类的模型变为一个对视频分类的模型，只需要一行代码：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> TimeDistributed</div><div class="line"></div><div class="line"><span class="comment"># Input tensor for sequences of 20 timesteps,</span></div><div class="line"><span class="comment"># each containing a 784-dimensional vector</span></div><div class="line">input_sequences = Input(shape=(<span class="number">20</span>, <span class="number">784</span>))</div><div class="line"></div><div class="line"><span class="comment"># This applies our previous model to every timestep in the input sequences.</span></div><div class="line"><span class="comment"># the output of the previous model was a 10-way softmax,</span></div><div class="line"><span class="comment"># so the output of the layer below will be a sequence of 20 vectors of size 10.</span></div><div class="line">processed_sequences = TimeDistributed(model)(input_sequences)</div></pre></td></tr></table></figure></p>
<h4 id="多输入和多输出模型"><a href="#多输入和多输出模型" class="headerlink" title="多输入和多输出模型"></a>多输入和多输出模型</h4><p>使用函数式模型的一个典型场景是搭建多输入、多输出的模型。</p>
<p>考虑这样一个模型。我们希望预测 Twitter 上一条新闻会被转发和点赞多少次。模型的主要输入是新闻本身，也就是一个词语的序列。但我们还可以拥有额外的输入，如新闻发布的日期等。这个模型的损失函数将由两部分组成，辅助的损失函数评估仅仅基于新闻本身做出预测的情况，主损失函数评估基于新闻和额外信息的预测的情况，即使来自主损失函数的梯度发生弥散，来自辅助损失函数的信息也能够训练 Embeddding 和 LSTM 层。在模型中早点使用主要的损失函数是对于深度网络的一个良好的正则方法。总而言之，该模型框图如下：</p>
<img src="/2017/09/22/Keras-Notes/markdown-img-paste-20170923095039320.png" alt="markdown-img-paste-20170923095039320.png" title="">
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Input, Embedding, LSTM, Dense</div><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Model</div><div class="line"></div><div class="line"><span class="comment"># 主要的输入接收新闻本身，即一个整数的序列（每个整数编码了一个词）。这些整数位于 1 到 10，000 之间（即我们的字典有 10，000 个词）。这个序列有 100 个单词</span></div><div class="line"><span class="comment"># Headline input: meant to receive sequences of 100 integers, between 1 and 10000.</span></div><div class="line"><span class="comment"># Note that we can name any layer by passing it a "name" argument.</span></div><div class="line">main_input = Input(shape=(<span class="number">100</span>,), dtype=<span class="string">'int32'</span>, name=<span class="string">'main_input'</span>)</div><div class="line"><span class="comment"># This embedding layer will encode the input sequence</span></div><div class="line"><span class="comment"># into a sequence of dense 512-dimensional vectors.</span></div><div class="line">embedding_1 = Embedding(output_dim=<span class="number">512</span>, input_dim=<span class="number">10000</span>, input_length=<span class="number">100</span>)(main_input)</div><div class="line"><span class="comment"># A LSTM will transform the vector sequence into a single vector,</span></div><div class="line"><span class="comment"># containing information about the entire sequence</span></div><div class="line">lstm1_1 = LSTM(<span class="number">32</span>)(embedding_1)</div><div class="line"></div><div class="line"><span class="comment"># 然后，我们插入一个额外的损失，使得即使在主损失很高的情况下，LSTM 和 Embedding 层也可以平滑的训练。</span></div><div class="line">auxiliary_output = Dense(<span class="number">1</span>, activation=<span class="string">'sigmoid'</span>, name=<span class="string">'aux_output'</span>)(lstm_out)</div></pre></td></tr></table></figure>
<h4 id="共享层"><a href="#共享层" class="headerlink" title="共享层"></a>共享层</h4><p>另一个使用函数式模型的场合是使用共享层的时候。<br>‘’<br>考虑微博数据，我们希望建立模型来判别两条微博是否是来自同一个用户，这个需求同样可以用来判断一个用户的两条微博的相似性。</p>
<p>一种实现方式是，我们建立一个模型，它分别将两条微博的数据映射到两个特征向量上，然后将特征向量串联并加一个 logistic 回归层，输出它们来自同一个用户的概率。这种模型的训练数据是一对对的微博。</p>
<p>因为这个问题是对称的，所以处理第一条微博的模型当然也能重用于处理第二条微博。所以这里我们使用一个共享的 LSTM 层来进行映射。</p>
<p>首先，我们将微博的数据转为（140，256）的矩阵，即每条微博有 140 个字符，每个单词的特征由一个 256 维的词向量表示，向量的每个元素为 1 表示某个字符出现，为 0 表示不出现，这是一个 one-hot 编码。</p>
<p>之所以是（140，256）是因为一条微博最多有 140 个字符，而扩展的 ASCII 码表编码了常见的 256 个字符。原文中此处为 Tweet，所以对外国人而言这是合理的。如果考虑中文字符，那一个单词的词向量就不止 256 了。<br><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">import</span> keras</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Input, LSTM, Dense</div><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Model</div><div class="line"></div><div class="line">tweet_a = Input(shape=(<span class="number">140</span>, <span class="number">256</span>))</div><div class="line">tweet_b = Input(shape=(<span class="number">140</span>, <span class="number">256</span>))</div></pre></td></tr></table></figure></p>
<p>若要对不同的输入共享同一层，就初始化该层一次，然后多次调用它<br><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div><div class="line">21</div><div class="line">22</div><div class="line">23</div><div class="line">24</div><div class="line">25</div></pre></td><td class="code"><pre><div class="line"><span class="comment"># This layer can take as input a matrix</span></div><div class="line"><span class="comment"># and will return a vector of size 64</span></div><div class="line">shared_lstm = LSTM(<span class="number">64</span>)</div><div class="line"></div><div class="line"><span class="comment"># When we reuse the same layer instance</span></div><div class="line"><span class="comment"># multiple times, the weights of the layer</span></div><div class="line"><span class="comment"># are also being reused</span></div><div class="line"><span class="comment"># (it is effectively *the same* layer)</span></div><div class="line">encoded_a = shared_lstm(tweet_a)</div><div class="line">encoded_b = shared_lstm(tweet_b)</div><div class="line"></div><div class="line"><span class="comment"># We can then concatenate the two vectors:</span></div><div class="line">merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=<span class="number">-1</span>)</div><div class="line"></div><div class="line"><span class="comment"># And add a logistic regression on top</span></div><div class="line">predictions = Dense(<span class="number">1</span>, activation=<span class="string">'sigmoid'</span>)(merged_vector)</div><div class="line"></div><div class="line"><span class="comment"># We define a trainable model linking the</span></div><div class="line"><span class="comment"># tweet inputs to the predictions</span></div><div class="line">model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)</div><div class="line"></div><div class="line">model.compile(optimizer=<span class="string">'rmsprop'</span>,</div><div class="line">              loss=<span class="string">'binary_crossentropy'</span>,</div><div class="line">              metrics=[<span class="string">'accuracy'</span>])</div><div class="line">model.fit([data_a, data_b], labels, epochs=<span class="number">10</span>)</div></pre></td></tr></table></figure></p>
<h2 id="Template"><a href="#Template" class="headerlink" title="Template"></a>Template</h2><h3 id="基于多层感知器的-softmax-多分类："><a href="#基于多层感知器的-softmax-多分类：" class="headerlink" title="基于多层感知器的 softmax 多分类："></a>基于多层感知器的 softmax 多分类：</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div><div class="line">21</div><div class="line">22</div><div class="line">23</div><div class="line">24</div><div class="line">25</div><div class="line">26</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Dense, Dropout, Activation</div><div class="line"><span class="keyword">from</span> keras.optimizers <span class="keyword">import</span> SGD</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy data</span></div><div class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</div><div class="line">x_train = np.random.random((<span class="number">1000</span>, <span class="number">20</span>))</div><div class="line">y_train = keras.utils.to_categorical(np.random.randint(<span class="number">10</span>, size=(<span class="number">1000</span>, <span class="number">1</span>)), num_classes=<span class="number">10</span>)</div><div class="line">x_test = np.random.random((<span class="number">100</span>, <span class="number">20</span>))</div><div class="line">y_test = keras.utils.to_categorical(np.random.randint(<span class="number">10</span>, size=(<span class="number">100</span>, <span class="number">1</span>)), num_classes=<span class="number">10</span>)</div><div class="line"></div><div class="line">model = Sequential()</div><div class="line"><span class="comment"># Dense(64) is a fully-connected layer with 64 hidden units.</span></div><div class="line"><span class="comment"># in the first layer, you must specify the expected input data shape:</span></div><div class="line"><span class="comment"># here, 20-dimensional vectors.</span></div><div class="line">model.add(Dense(<span class="number">64</span>, activation=<span class="string">'relu'</span>, input_dim=<span class="number">20</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">64</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>))</div><div class="line"></div><div class="line">sgd = SGD(lr=<span class="number">0.01</span>, decay=<span class="number">1e-6</span>, momentum=<span class="number">0.9</span>, nesterov=<span class="keyword">True</span>)</div><div class="line">model.compile(loss=<span class="string">'categorical_crossentropy'</span>, optimizer=sgd, metrics=[<span class="string">'accuracy'</span>])</div><div class="line"></div><div class="line">model.fit(x_train, y_train, epochs=<span class="number">20</span>, batch_size=<span class="number">128</span>)</div><div class="line">score = model.evaluate(x_test, y_test, batch_size=<span class="number">128</span>)</div></pre></td></tr></table></figure>
<h3 id="MLP-的二分类："><a href="#MLP-的二分类：" class="headerlink" title="MLP 的二分类："></a>MLP 的二分类：</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</div><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Dense, Dropout</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy data</span></div><div class="line">x_train = np.random.random((<span class="number">1000</span>, <span class="number">20</span>))</div><div class="line">y_train = np.random.randint(<span class="number">2</span>, size=(<span class="number">1000</span>, <span class="number">1</span>))</div><div class="line">x_test = np.random.random((<span class="number">100</span>, <span class="number">20</span>))</div><div class="line">y_test = np.random.randint(<span class="number">2</span>, size=(<span class="number">100</span>, <span class="number">1</span>))</div><div class="line"></div><div class="line">model = Sequential()</div><div class="line">model.add(Dense(<span class="number">64</span>, input_dim=<span class="number">20</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">64</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">1</span>, activation=<span class="string">'sigmoid'</span>))</div><div class="line"></div><div class="line">model.compile(loss=<span class="string">'binary_crossentropy'</span>, optimizer=<span class="string">'rmsprop'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line">model.fit(x_train, y_train, epochs=<span class="number">20</span>, batch_size=<span class="number">128</span>)</div><div class="line">score = model.evaluate(x_test, y_test, batch_size=<span class="number">128</span>)</div></pre></td></tr></table></figure>
<h3 id="类似-VGG-的卷积神经网络："><a href="#类似-VGG-的卷积神经网络：" class="headerlink" title="类似 VGG 的卷积神经网络："></a>类似 VGG 的卷积神经网络：</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div><div class="line">21</div><div class="line">22</div><div class="line">23</div><div class="line">24</div><div class="line">25</div><div class="line">26</div><div class="line">27</div><div class="line">28</div><div class="line">29</div><div class="line">30</div><div class="line">31</div><div class="line">32</div><div class="line">33</div><div class="line">34</div><div class="line">35</div><div class="line">36</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</div><div class="line"><span class="keyword">import</span> keras</div><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Dense, Dropout, Flatten</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Conv2D, MaxPooling2D</div><div class="line"><span class="keyword">from</span> keras.optimizers <span class="keyword">import</span> SGD</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy data</span></div><div class="line">x_train = np.random.random((<span class="number">100</span>, <span class="number">100</span>, <span class="number">100</span>, <span class="number">3</span>))</div><div class="line">y_train = keras.utils.to_categorical(np.random.randint(<span class="number">10</span>, size=(<span class="number">100</span>, <span class="number">1</span>)), num_classes=<span class="number">10</span>)</div><div class="line">x_test = np.random.random((<span class="number">20</span>, <span class="number">100</span>, <span class="number">100</span>, <span class="number">3</span>))</div><div class="line">y_test = keras.utils.to_categorical(np.random.randint(<span class="number">10</span>, size=(<span class="number">20</span>, <span class="number">1</span>)), num_classes=<span class="number">10</span>)</div><div class="line"></div><div class="line">model = Sequential()</div><div class="line"><span class="comment"># input: 100x100 images with 3 channels -&gt; (100, 100, 3) tensors.</span></div><div class="line"><span class="comment"># this applies 32 convolution filters of size 3x3 each.</span></div><div class="line">model.add(Conv2D(<span class="number">32</span>, (<span class="number">3</span>, <span class="number">3</span>), activation=<span class="string">'relu'</span>, input_shape=(<span class="number">100</span>, <span class="number">100</span>, <span class="number">3</span>)))</div><div class="line">model.add(Conv2D(<span class="number">32</span>, (<span class="number">3</span>, <span class="number">3</span>), activation=<span class="string">'relu'</span>))</div><div class="line">model.add(MaxPooling2D(pool_size=(<span class="number">2</span>, <span class="number">2</span>)))</div><div class="line">model.add(Dropout(<span class="number">0.25</span>))</div><div class="line"></div><div class="line">model.add(Conv2D(<span class="number">64</span>, (<span class="number">3</span>, <span class="number">3</span>), activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Conv2D(<span class="number">64</span>, (<span class="number">3</span>, <span class="number">3</span>), activation=<span class="string">'relu'</span>))</div><div class="line">model.add(MaxPooling2D(pool_size=(<span class="number">2</span>, <span class="number">2</span>)))</div><div class="line">model.add(Dropout(<span class="number">0.25</span>))</div><div class="line"></div><div class="line">model.add(Flatten())</div><div class="line">model.add(Dense(<span class="number">256</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>))</div><div class="line"></div><div class="line">sgd = SGD(lr=<span class="number">0.01</span>, decay=<span class="number">1e-6</span>, momentum=<span class="number">0.9</span>, nesterov=<span class="keyword">True</span>)</div><div class="line">model.compile(loss=<span class="string">'categorical_crossentropy'</span>, optimizer=sgd)</div><div class="line"></div><div class="line">model.fit(x_train, y_train, batch_size=<span class="number">32</span>, epochs=<span class="number">10</span>)</div><div class="line">score = model.evaluate(x_test, y_test, batch_size=<span class="number">32</span>)</div></pre></td></tr></table></figure>
<h3 id="使用-LSTM-的序列分类"><a href="#使用-LSTM-的序列分类" class="headerlink" title="使用 LSTM 的序列分类"></a>使用 LSTM 的序列分类</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Dense, Dropout</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Embedding</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> LSTM</div><div class="line"></div><div class="line">model = Sequential()</div><div class="line">model.add(Embedding(max_features, output_dim=<span class="number">256</span>))</div><div class="line">model.add(LSTM(<span class="number">128</span>))</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">1</span>, activation=<span class="string">'sigmoid'</span>))</div><div class="line"></div><div class="line">model.compile(loss=<span class="string">'binary_crossentropy'</span>, optimizer=<span class="string">'rmsprop'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line"></div><div class="line">model.fit(x_train, y_train, batch_size=<span class="number">16</span>, epochs=<span class="number">10</span>)</div><div class="line">score = model.evaluate(x_test, y_test, batch_size=<span class="number">16</span>)</div></pre></td></tr></table></figure>
<h3 id="使用-1D-卷积的序列分类"><a href="#使用-1D-卷积的序列分类" class="headerlink" title="使用 1D 卷积的序列分类"></a>使用 1D 卷积的序列分类</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Dense, Dropout</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Embedding</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> Conv1D, GlobalAveragePooling1D, MaxPooling1D</div><div class="line"></div><div class="line">model = Sequential()</div><div class="line">model.add(Conv1D(<span class="number">64</span>, <span class="number">3</span>, activation=<span class="string">'relu'</span>, input_shape=(seq_length, <span class="number">100</span>)))</div><div class="line">model.add(Conv1D(<span class="number">64</span>, <span class="number">3</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(MaxPooling1D(<span class="number">3</span>))</div><div class="line">model.add(Conv1D(<span class="number">128</span>, <span class="number">3</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(Conv1D(<span class="number">128</span>, <span class="number">3</span>, activation=<span class="string">'relu'</span>))</div><div class="line">model.add(GlobalAveragePooling1D())</div><div class="line">model.add(Dropout(<span class="number">0.5</span>))</div><div class="line">model.add(Dense(<span class="number">1</span>, activation=<span class="string">'sigmoid'</span>))</div><div class="line"></div><div class="line">model.compile(loss=<span class="string">'binary_crossentropy'</span>, optimizer=<span class="string">'rmsprop'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line"></div><div class="line">model.fit(x_train, y_train, batch_size=<span class="number">16</span>, epochs=<span class="number">10</span>)</div><div class="line">score = model.evaluate(x_test, y_test, batch_size=<span class="number">16</span>)</div></pre></td></tr></table></figure>
<h3 id="用于序列分类的栈式-LSTM"><a href="#用于序列分类的栈式-LSTM" class="headerlink" title="用于序列分类的栈式 LSTM"></a>用于序列分类的栈式 LSTM</h3><p>在该模型中，我们将三个 LSTM 堆叠在一起，是该模型能够学习更高层次的时域特征表示。</p>
<p>开始的两层 LSTM 返回其全部输出序列，而第三层 LSTM 只返回其输出序列的最后一步结果，从而其时域维度降低（即将输入序列转换为单个向量）</p>
<img src="/2017/09/22/Keras-Notes/markdown-img-paste-20170923093334884.png" alt="markdown-img-paste-20170923093334884.png" title="">
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div><div class="line">21</div><div class="line">22</div><div class="line">23</div><div class="line">24</div><div class="line">25</div><div class="line">26</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> LSTM, Dense</div><div class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</div><div class="line"></div><div class="line">data_dim = <span class="number">16</span></div><div class="line">timesteps = <span class="number">8</span></div><div class="line">num_classes = <span class="number">10</span></div><div class="line"></div><div class="line"><span class="comment"># expected input data shape: (batch_size, timesteps, data_dim)</span></div><div class="line">model = Sequential()</div><div class="line">model.add(LSTM(<span class="number">32</span>, return_sequences=<span class="keyword">True</span>, input_shape=(timesteps, data_dim)))  <span class="comment"># returns a sequence of vectors of dimension 32</span></div><div class="line">model.add(LSTM(<span class="number">32</span>, return_sequences=<span class="keyword">True</span>))  <span class="comment"># returns a sequence of vectors of dimension 32</span></div><div class="line">model.add(LSTM(<span class="number">32</span>))  <span class="comment"># return a single vector of dimension 32</span></div><div class="line">model.add(Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>))</div><div class="line"></div><div class="line">model.compile(loss=<span class="string">'categorical_crossentropy'</span>, optimizer=<span class="string">'rmsprop'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy training data</span></div><div class="line">x_train = np.random.random((<span class="number">1000</span>, timesteps, data_dim))</div><div class="line">y_train = np.random.random((<span class="number">1000</span>, num_classes))</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy validation data</span></div><div class="line">x_val = np.random.random((<span class="number">100</span>, timesteps, data_dim))</div><div class="line">y_val = np.random.random((<span class="number">100</span>, num_classes))</div><div class="line"></div><div class="line">model.fit(x_train, y_train, batch_size=<span class="number">64</span>, epochs=<span class="number">5</span>, validation_data=(x_val, y_val))</div></pre></td></tr></table></figure>
<h3 id="采用-stateful-LSTM-的相同模型"><a href="#采用-stateful-LSTM-的相同模型" class="headerlink" title="采用 stateful LSTM 的相同模型"></a>采用 stateful LSTM 的相同模型</h3><p>stateful LSTM 的特点是，在处理过一个 batch 的训练数据后，其内部状态（记忆）会被作为下一个 batch 的训练数据的初始状态。状态 LSTM 使得我们可以在合理的计算复杂度内处理较长序列</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><div class="line">1</div><div class="line">2</div><div class="line">3</div><div class="line">4</div><div class="line">5</div><div class="line">6</div><div class="line">7</div><div class="line">8</div><div class="line">9</div><div class="line">10</div><div class="line">11</div><div class="line">12</div><div class="line">13</div><div class="line">14</div><div class="line">15</div><div class="line">16</div><div class="line">17</div><div class="line">18</div><div class="line">19</div><div class="line">20</div><div class="line">21</div><div class="line">22</div><div class="line">23</div><div class="line">24</div><div class="line">25</div><div class="line">26</div><div class="line">27</div><div class="line">28</div><div class="line">29</div><div class="line">30</div></pre></td><td class="code"><pre><div class="line"><span class="keyword">from</span> keras.models <span class="keyword">import</span> Sequential</div><div class="line"><span class="keyword">from</span> keras.layers <span class="keyword">import</span> LSTM, Dense</div><div class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</div><div class="line"></div><div class="line">data_dim = <span class="number">16</span></div><div class="line">timesteps = <span class="number">8</span></div><div class="line">num_classes = <span class="number">10</span></div><div class="line">batch_size = <span class="number">32</span></div><div class="line"></div><div class="line"><span class="comment"># Expected input batch shape: (batch_size, timesteps, data_dim)</span></div><div class="line"><span class="comment"># Note that we have to provide the full batch_input_shape since the network is stateful.</span></div><div class="line"><span class="comment"># the sample of index i in batch k is the follow-up for the sample i in batch k-1.</span></div><div class="line">model = Sequential()</div><div class="line">model.add(LSTM(<span class="number">32</span>, return_sequences=<span class="keyword">True</span>, stateful=<span class="keyword">True</span>,</div><div class="line">               batch_input_shape=(batch_size, timesteps, data_dim)))</div><div class="line">model.add(LSTM(<span class="number">32</span>, return_sequences=<span class="keyword">True</span>, stateful=<span class="keyword">True</span>))</div><div class="line">model.add(LSTM(<span class="number">32</span>, stateful=<span class="keyword">True</span>))</div><div class="line">model.add(Dense(<span class="number">10</span>, activation=<span class="string">'softmax'</span>))</div><div class="line"></div><div class="line">model.compile(loss=<span class="string">'categorical_crossentropy'</span>, optimizer=<span class="string">'rmsprop'</span>, metrics=[<span class="string">'accuracy'</span>])</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy training data</span></div><div class="line">x_train = np.random.random((batch_size * <span class="number">10</span>, timesteps, data_dim))</div><div class="line">y_train = np.random.random((batch_size * <span class="number">10</span>, num_classes))</div><div class="line"></div><div class="line"><span class="comment"># Generate dummy validation data</span></div><div class="line">x_val = np.random.random((batch_size * <span class="number">3</span>, timesteps, data_dim))</div><div class="line">y_val = np.random.random((batch_size * <span class="number">3</span>, num_classes))</div><div class="line"></div><div class="line">model.fit(x_train, y_train, batch_size=batch_size, epochs=<span class="number">5</span>, shuffle=<span class="keyword">False</span>, validation_data=(x_val, y_val))</div></pre></td></tr></table></figure>

      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      
        <div class="post-tags">
          
            <a href="/tags/Keras/" rel="tag"># Keras</a>
          
        </div>
      

      
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="/2017/09/22/A-Few-Useful-Things-to-Know-about-Machine-Learning/" rel="next" title="A Few Useful Things to Know About Machine Learning">
                <i class="fa fa-chevron-left"></i> A Few Useful Things to Know About Machine Learning
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="/2017/09/24/Getting-started-with-the-Keras-Sequential-model/" rel="prev" title="Getting Started With the Keras Sequential Model">
                Getting Started With the Keras Sequential Model <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>



    <div class="post-spread">
      
    </div>
  </div>


          </div>
          


          
  <div class="comments" id="comments">
    
  </div>


        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap" >
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
          <img class="site-author-image" itemprop="image"
               src="/uploads/avatar.jpg"
               alt="东木金" />
          <p class="site-author-name" itemprop="name">东木金</p>
           
              <p class="site-description motion-element" itemprop="description">正在学习机器学习，希望能变得很强！</p>
          
        </div>
        <nav class="site-state motion-element">

          
            <div class="site-state-item site-state-posts">
              <a href="/archives/">
                <span class="site-state-item-count">162</span>
                <span class="site-state-item-name">日志</span>
              </a>
            </div>
          

          
            
            
            <div class="site-state-item site-state-categories">
              <a href="/categories/index.html">
                <span class="site-state-item-count">18</span>
                <span class="site-state-item-name">分类</span>
              </a>
            </div>
          

          
            
            
            <div class="site-state-item site-state-tags">
              <a href="/tags/index.html">
                <span class="site-state-item-count">42</span>
                <span class="site-state-item-name">标签</span>
              </a>
            </div>
          

        </nav>

        

        <div class="links-of-author motion-element">
          
            
              <span class="links-of-author-item">
                <a href="https://github.com/bdmk" target="_blank" title="GitHub">
                  
                    <i class="fa fa-fw fa-github"></i>
                  
                    
                      GitHub
                    
                </a>
              </span>
            
              <span class="links-of-author-item">
                <a href="mailto:catcherchan94@outlook.com" target="_blank" title="E-Mail">
                  
                    <i class="fa fa-fw fa-envelope"></i>
                  
                    
                      E-Mail
                    
                </a>
              </span>
            
          
        </div>

        
        

        
        

        


      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-2"><a class="nav-link" href="#utils"><span class="nav-number">1.</span> <span class="nav-text">utils</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Layers"><span class="nav-number">2.</span> <span class="nav-text">Layers</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Dense"><span class="nav-number">2.1.</span> <span class="nav-text">Dense</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Activation"><span class="nav-number">2.2.</span> <span class="nav-text">Activation</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Dropout"><span class="nav-number">2.3.</span> <span class="nav-text">Dropout</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Models"><span class="nav-number">3.</span> <span class="nav-text">Models</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Sequential"><span class="nav-number">3.1.</span> <span class="nav-text">Sequential</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#构造模型"><span class="nav-number">3.1.1.</span> <span class="nav-text">构造模型</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#compile"><span class="nav-number">3.1.2.</span> <span class="nav-text">compile</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#fit"><span class="nav-number">3.1.3.</span> <span class="nav-text">fit</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#evaluate"><span class="nav-number">3.1.4.</span> <span class="nav-text">evaluate</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#predict"><span class="nav-number">3.1.5.</span> <span class="nav-text">predict</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#train-on-batch"><span class="nav-number">3.1.6.</span> <span class="nav-text">train_on_batch</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#test-on-batch"><span class="nav-number">3.1.7.</span> <span class="nav-text">test_on_batch</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#predict-on-batch"><span class="nav-number">3.1.8.</span> <span class="nav-text">predict_on_batch</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#fit-generator"><span class="nav-number">3.1.9.</span> <span class="nav-text">fit_generator</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Functional"><span class="nav-number">3.2.</span> <span class="nav-text">Functional</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#全连接网络"><span class="nav-number">3.2.1.</span> <span class="nav-text">全连接网络</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#所有的模型都是可调用的，就像层一样"><span class="nav-number">3.2.2.</span> <span class="nav-text">所有的模型都是可调用的，就像层一样</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#多输入和多输出模型"><span class="nav-number">3.2.3.</span> <span class="nav-text">多输入和多输出模型</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#共享层"><span class="nav-number">3.2.4.</span> <span class="nav-text">共享层</span></a></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#Template"><span class="nav-number">4.</span> <span class="nav-text">Template</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#基于多层感知器的-softmax-多分类："><span class="nav-number">4.1.</span> <span class="nav-text">基于多层感知器的 softmax 多分类：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#MLP-的二分类："><span class="nav-number">4.2.</span> <span class="nav-text">MLP 的二分类：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#类似-VGG-的卷积神经网络："><span class="nav-number">4.3.</span> <span class="nav-text">类似 VGG 的卷积神经网络：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#使用-LSTM-的序列分类"><span class="nav-number">4.4.</span> <span class="nav-text">使用 LSTM 的序列分类</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#使用-1D-卷积的序列分类"><span class="nav-number">4.5.</span> <span class="nav-text">使用 1D 卷积的序列分类</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#用于序列分类的栈式-LSTM"><span class="nav-number">4.6.</span> <span class="nav-text">用于序列分类的栈式 LSTM</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#采用-stateful-LSTM-的相同模型"><span class="nav-number">4.7.</span> <span class="nav-text">采用 stateful LSTM 的相同模型</span></a></li></ol></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright" >
  
  &copy;  2017 - 
  <span itemprop="copyrightYear">2018</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">东木金</span>
</div>


<div class="powered-by">
  由 <a class="theme-link" href="https://hexo.io">Hexo</a> 强力驱动
</div>

<div class="theme-info">
  主题 -
  <a class="theme-link" href="https://github.com/iissnan/hexo-theme-next">
    NexT.Gemini
  </a>
</div>


        

        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  












  
  <script type="text/javascript" src="/lib/jquery/index.js?v=2.1.3"></script>

  
  <script type="text/javascript" src="/lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>

  
  <script type="text/javascript" src="/lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>

  
  <script type="text/javascript" src="/lib/velocity/velocity.min.js?v=1.2.1"></script>

  
  <script type="text/javascript" src="/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>

  
  <script type="text/javascript" src="/lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>


  


  <script type="text/javascript" src="/js/src/utils.js?v=5.1.2"></script>

  <script type="text/javascript" src="/js/src/motion.js?v=5.1.2"></script>



  
  


  <script type="text/javascript" src="/js/src/affix.js?v=5.1.2"></script>

  <script type="text/javascript" src="/js/src/schemes/pisces.js?v=5.1.2"></script>



  
  <script type="text/javascript" src="/js/src/scrollspy.js?v=5.1.2"></script>
<script type="text/javascript" src="/js/src/post-details.js?v=5.1.2"></script>



  


  <script type="text/javascript" src="/js/src/bootstrap.js?v=5.1.2"></script>



  


  




	





  





  






  

  <script type="text/javascript">
    // Popup Window;
    var isfetched = false;
    var isXml = true;
    // Search DB path;
    var search_path = "search.xml";
    if (search_path.length === 0) {
      search_path = "search.xml";
    } else if (/json$/i.test(search_path)) {
      isXml = false;
    }
    var path = "/" + search_path;
    // monitor main search box;

    var onPopupClose = function (e) {
      $('.popup').hide();
      $('#local-search-input').val('');
      $('.search-result-list').remove();
      $('#no-result').remove();
      $(".local-search-pop-overlay").remove();
      $('body').css('overflow', '');
    }

    function proceedsearch() {
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay"></div>')
        .css('overflow', 'hidden');
      $('.search-popup-overlay').click(onPopupClose);
      $('.popup').toggle();
      var $localSearchInput = $('#local-search-input');
      $localSearchInput.attr("autocapitalize", "none");
      $localSearchInput.attr("autocorrect", "off");
      $localSearchInput.focus();
    }

    // search function;
    var searchFunc = function(path, search_id, content_id) {
      'use strict';

      // start loading animation
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay">' +
          '<div id="search-loading-icon">' +
          '<i class="fa fa-spinner fa-pulse fa-5x fa-fw"></i>' +
          '</div>' +
          '</div>')
        .css('overflow', 'hidden');
      $("#search-loading-icon").css('margin', '20% auto 0 auto').css('text-align', 'center');

      $.ajax({
        url: path,
        dataType: isXml ? "xml" : "json",
        async: true,
        success: function(res) {
          // get the contents from search data
          isfetched = true;
          $('.popup').detach().appendTo('.header-inner');
          var datas = isXml ? $("entry", res).map(function() {
            return {
              title: $("title", this).text(),
              content: $("content",this).text(),
              url: $("url" , this).text()
            };
          }).get() : res;
          var input = document.getElementById(search_id);
          var resultContent = document.getElementById(content_id);
          var inputEventFunction = function() {
            var searchText = input.value.trim().toLowerCase();
            var keywords = searchText.split(/[\s\-]+/);
            if (keywords.length > 1) {
              keywords.push(searchText);
            }
            var resultItems = [];
            if (searchText.length > 0) {
              // perform local searching
              datas.forEach(function(data) {
                var isMatch = false;
                var hitCount = 0;
                var searchTextCount = 0;
                var title = data.title.trim();
                var titleInLowerCase = title.toLowerCase();
                var content = data.content.trim().replace(/<[^>]+>/g,"");
                var contentInLowerCase = content.toLowerCase();
                var articleUrl = decodeURIComponent(data.url);
                var indexOfTitle = [];
                var indexOfContent = [];
                // only match articles with not empty titles
                if(title != '') {
                  keywords.forEach(function(keyword) {
                    function getIndexByWord(word, text, caseSensitive) {
                      var wordLen = word.length;
                      if (wordLen === 0) {
                        return [];
                      }
                      var startPosition = 0, position = [], index = [];
                      if (!caseSensitive) {
                        text = text.toLowerCase();
                        word = word.toLowerCase();
                      }
                      while ((position = text.indexOf(word, startPosition)) > -1) {
                        index.push({position: position, word: word});
                        startPosition = position + wordLen;
                      }
                      return index;
                    }

                    indexOfTitle = indexOfTitle.concat(getIndexByWord(keyword, titleInLowerCase, false));
                    indexOfContent = indexOfContent.concat(getIndexByWord(keyword, contentInLowerCase, false));
                  });
                  if (indexOfTitle.length > 0 || indexOfContent.length > 0) {
                    isMatch = true;
                    hitCount = indexOfTitle.length + indexOfContent.length;
                  }
                }

                // show search results

                if (isMatch) {
                  // sort index by position of keyword

                  [indexOfTitle, indexOfContent].forEach(function (index) {
                    index.sort(function (itemLeft, itemRight) {
                      if (itemRight.position !== itemLeft.position) {
                        return itemRight.position - itemLeft.position;
                      } else {
                        return itemLeft.word.length - itemRight.word.length;
                      }
                    });
                  });

                  // merge hits into slices

                  function mergeIntoSlice(text, start, end, index) {
                    var item = index[index.length - 1];
                    var position = item.position;
                    var word = item.word;
                    var hits = [];
                    var searchTextCountInSlice = 0;
                    while (position + word.length <= end && index.length != 0) {
                      if (word === searchText) {
                        searchTextCountInSlice++;
                      }
                      hits.push({position: position, length: word.length});
                      var wordEnd = position + word.length;

                      // move to next position of hit

                      index.pop();
                      while (index.length != 0) {
                        item = index[index.length - 1];
                        position = item.position;
                        word = item.word;
                        if (wordEnd > position) {
                          index.pop();
                        } else {
                          break;
                        }
                      }
                    }
                    searchTextCount += searchTextCountInSlice;
                    return {
                      hits: hits,
                      start: start,
                      end: end,
                      searchTextCount: searchTextCountInSlice
                    };
                  }

                  var slicesOfTitle = [];
                  if (indexOfTitle.length != 0) {
                    slicesOfTitle.push(mergeIntoSlice(title, 0, title.length, indexOfTitle));
                  }

                  var slicesOfContent = [];
                  while (indexOfContent.length != 0) {
                    var item = indexOfContent[indexOfContent.length - 1];
                    var position = item.position;
                    var word = item.word;
                    // cut out 100 characters
                    var start = position - 20;
                    var end = position + 80;
                    if(start < 0){
                      start = 0;
                    }
                    if (end < position + word.length) {
                      end = position + word.length;
                    }
                    if(end > content.length){
                      end = content.length;
                    }
                    slicesOfContent.push(mergeIntoSlice(content, start, end, indexOfContent));
                  }

                  // sort slices in content by search text's count and hits' count

                  slicesOfContent.sort(function (sliceLeft, sliceRight) {
                    if (sliceLeft.searchTextCount !== sliceRight.searchTextCount) {
                      return sliceRight.searchTextCount - sliceLeft.searchTextCount;
                    } else if (sliceLeft.hits.length !== sliceRight.hits.length) {
                      return sliceRight.hits.length - sliceLeft.hits.length;
                    } else {
                      return sliceLeft.start - sliceRight.start;
                    }
                  });

                  // select top N slices in content

                  var upperBound = parseInt('1');
                  if (upperBound >= 0) {
                    slicesOfContent = slicesOfContent.slice(0, upperBound);
                  }

                  // highlight title and content

                  function highlightKeyword(text, slice) {
                    var result = '';
                    var prevEnd = slice.start;
                    slice.hits.forEach(function (hit) {
                      result += text.substring(prevEnd, hit.position);
                      var end = hit.position + hit.length;
                      result += '<b class="search-keyword">' + text.substring(hit.position, end) + '</b>';
                      prevEnd = end;
                    });
                    result += text.substring(prevEnd, slice.end);
                    return result;
                  }

                  var resultItem = '';

                  if (slicesOfTitle.length != 0) {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + highlightKeyword(title, slicesOfTitle[0]) + "</a>";
                  } else {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + title + "</a>";
                  }

                  slicesOfContent.forEach(function (slice) {
                    resultItem += "<a href='" + articleUrl + "'>" +
                      "<p class=\"search-result\">" + highlightKeyword(content, slice) +
                      "...</p>" + "</a>";
                  });

                  resultItem += "</li>";
                  resultItems.push({
                    item: resultItem,
                    searchTextCount: searchTextCount,
                    hitCount: hitCount,
                    id: resultItems.length
                  });
                }
              })
            };
            if (keywords.length === 1 && keywords[0] === "") {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-search fa-5x" /></div>'
            } else if (resultItems.length === 0) {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-frown-o fa-5x" /></div>'
            } else {
              resultItems.sort(function (resultLeft, resultRight) {
                if (resultLeft.searchTextCount !== resultRight.searchTextCount) {
                  return resultRight.searchTextCount - resultLeft.searchTextCount;
                } else if (resultLeft.hitCount !== resultRight.hitCount) {
                  return resultRight.hitCount - resultLeft.hitCount;
                } else {
                  return resultRight.id - resultLeft.id;
                }
              });
              var searchResultList = '<ul class=\"search-result-list\">';
              resultItems.forEach(function (result) {
                searchResultList += result.item;
              })
              searchResultList += "</ul>";
              resultContent.innerHTML = searchResultList;
            }
          }

          if ('auto' === 'manual') {
            input.addEventListener('input', inputEventFunction);
          } else {
            $('.search-icon').click(inputEventFunction);
            input.addEventListener('keypress', function (event) {
              if (event.keyCode === 13) {
                inputEventFunction();
              }
            });
          }

          // remove loading animation
          $(".local-search-pop-overlay").remove();
          $('body').css('overflow', '');

          proceedsearch();
        }
      });
    }

    // handle and trigger popup window;
    $('.popup-trigger').click(function(e) {
      e.stopPropagation();
      if (isfetched === false) {
        searchFunc(path, 'local-search-input', 'local-search-result');
      } else {
        proceedsearch();
      };
    });

    $('.popup-btn-close').click(onPopupClose);
    $('.popup').click(function(e){
      e.stopPropagation();
    });
    $(document).on('keyup', function (event) {
      var shouldDismissSearchPopup = event.which === 27 &&
        $('.search-popup').is(':visible');
      if (shouldDismissSearchPopup) {
        onPopupClose();
      }
    });
  </script>





  

  

  

  
  
    <script type="text/x-mathjax-config">
      MathJax.Hub.Config({
        tex2jax: {
          inlineMath: [ ['$','$'], ["\\(","\\)"]  ],
          processEscapes: true,
          skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
        }
      });
    </script>

    <script type="text/x-mathjax-config">
      MathJax.Hub.Queue(function() {
        var all = MathJax.Hub.getAllJax(), i;
        for (i=0; i < all.length; i += 1) {
          all[i].SourceElement().parentNode.className += ' has-jax';
        }
      });
    </script>
    <script type="text/javascript" src="//cdn.bootcss.com/mathjax/2.7.1/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
  


  

  

</body>
</html>
