<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-CN">
<head>
  <meta charset="UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"/>
<meta name="theme-color" content="#222">









<meta http-equiv="Cache-Control" content="no-transform" />
<meta http-equiv="Cache-Control" content="no-siteapp" />















  
  
  <link href="/lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css" />




  
  
  
  

  
    
    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
    
    <link href="//fonts.googleapis.com/css?family=Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Microsoft YaHei:300,300italic,400,400italic,700,700italic|Inziu Iosevka Slab SC:300,300italic,400,400italic,700,700italic&subset=latin,latin-ext" rel="stylesheet" type="text/css">
  






<link href="/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css" />

<link href="/css/main.css?v=5.1.2" rel="stylesheet" type="text/css" />


  <meta name="keywords" content="Hexo, NexT" />








  <link rel="shortcut icon" type="image/x-icon" href="/favicon.ico?v=5.1.2" />






<meta name="description" content="Everything about Machine Learning">
<meta property="og:type" content="article">
<meta property="og:title" content="Machine Learning">
<meta property="og:url" content="http://idmk.oschina.io/2017/11/26/Machine-Learning/index.html">
<meta property="og:site_name" content="苦舟">
<meta property="og:description" content="Everything about Machine Learning">
<meta property="og:locale" content="zh-CN">
<meta property="og:image" content="http://idmk.oschina.io/2017/11/26/Machine-Learning/markdown-img-paste-20171126185139332.png">
<meta property="og:image" content="http://idmk.oschina.io/2017/11/26/Machine-Learning/markdown-img-paste-20171126213555811.png">
<meta property="og:image" content="http://idmk.oschina.io/2017/11/26/Machine-Learning/2018-03-10-11-18-54.png">
<meta property="og:updated_time" content="2017-11-26T10:07:39.000Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="Machine Learning">
<meta name="twitter:description" content="Everything about Machine Learning">
<meta name="twitter:image" content="http://idmk.oschina.io/2017/11/26/Machine-Learning/markdown-img-paste-20171126185139332.png">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/',
    scheme: 'Gemini',
    sidebar: {"position":"left","display":"hide","offset":12,"offset_float":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: true,
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="http://idmk.oschina.io/2017/11/26/Machine-Learning/"/>





  <title>Machine Learning | 苦舟</title>
  














</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-CN">

  
  
    
  

  <div class="container sidebar-position-left page-post-detail ">
    <div class="headband"></div>

    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/"  class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">苦舟</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle">学海无涯，吾将上下求索。</p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br />
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-categories">
          <a href="/categories/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-th"></i> <br />
            
            分类
          </a>
        </li>
      
        
        <li class="menu-item menu-item-about">
          <a href="/about/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-user"></i> <br />
            
            关于
          </a>
        </li>
      
        
        <li class="menu-item menu-item-archives">
          <a href="/archives/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-archive"></i> <br />
            
            归档
          </a>
        </li>
      
        
        <li class="menu-item menu-item-tags">
          <a href="/tags/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-tags"></i> <br />
            
            标签
          </a>
        </li>
      
        
        <li class="menu-item menu-item-commonweal">
          <a href="/404.html" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-heartbeat"></i> <br />
            
            公益404
          </a>
        </li>
      

      
        <li class="menu-item menu-item-search">
          
            <a href="javascript:;" class="popup-trigger">
          
            
              <i class="menu-item-icon fa fa-search fa-fw"></i> <br />
            
            搜索
          </a>
        </li>
      
    </ul>
  

  
    <div class="site-search">
      
  <div class="popup search-popup local-search-popup">
  <div class="local-search-header clearfix">
    <span class="search-icon">
      <i class="fa fa-search"></i>
    </span>
    <span class="popup-btn-close">
      <i class="fa fa-times-circle"></i>
    </span>
    <div class="local-search-input-wrapper">
      <input autocomplete="off"
             placeholder="搜索..." spellcheck="false"
             type="text" id="local-search-input">
    </div>
  </div>
  <div id="local-search-result"></div>
</div>



    </div>
  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="http://idmk.oschina.io/2017/11/26/Machine-Learning/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="东木金">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/uploads/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="苦舟">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">Machine Learning</h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2017-11-26T18:07:39+08:00">
                2017-11-26
              </time>
            

            

            
          </span>

          

          
            
          

          
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <p>Everything about Machine Learning<br><a id="more"></a></p>
<h1 id="The-Discipline-of-Machine-Learning"><a href="#The-Discipline-of-Machine-Learning" class="headerlink" title="The Discipline of Machine Learning"></a>The Discipline of Machine Learning</h1><p>涉及概率论、统计学、逼近论、凸分析、算法复杂度理论等多门学科。</p>
<h2 id="Defining-Questions"><a href="#Defining-Questions" class="headerlink" title="Defining Questions"></a>Defining Questions</h2><p>A scientific field is best defined by the central question it studies. The field of Machine Learning seeks to answer the question<br>    “How can we build computer systems that <strong>automatically improve with experience</strong>, and what are <strong>the fundamental laws that govern all learning processes</strong>?”</p>
<h1 id="A-Few-Useful-Things-to-Know-about-Machine-Learning"><a href="#A-Few-Useful-Things-to-Know-about-Machine-Learning" class="headerlink" title="A Few Useful Things to Know about Machine Learning"></a>A Few Useful Things to Know about Machine Learning</h1><p>Developing successful machine learning applications requires a substantial amount of “ black art ” that is hard to find in textbooks. This article summarizes twelve key lessons that machine learning researchers and practitioners have learned. These include pitfalls to avoid, important issues to focus on, and answers to common questions.</p>
<p>Several fine textbooks are available to interested practitioners and researchers (e.g, [17, 25]). However, much of the “ folk knowledge ” that is needed to successfully develop machine learning applications is not readily available in them. As a result, many machine learning projects take much longer than necessary or wind up producing less than-ideal results. Yet much of this folk knowledge is fairly easy to communicate. This is the purpose of this article.</p>
<p>Many different types of machine learning exist, but for illustration purposes I will focus on the most mature and widely used one: classification. Nevertheless, the issues I will discuss apply across all of machine learning. A classifier is a system that inputs (typically) a vector of discrete and/or continuous feature values and outputs a single discrete value, the class. For example, a spam filter classifies email messages into “ spam ” or “ not spam, ” and its input may be a Boolean vector x = (x1 , . . . , xj , . . . , xd ), where xj = 1 if the j th word in the dictionary appears in the email and xj = 0 otherwise. A learner inputs a training set of examples (xi , yi ), where xi = (xi,1 , . . . , xi,d ) is an observed input and yi is the corresponding output, and outputs a classifier.<br>The test of the learner is whether this classifier produces the</p>
<h2 id="LEARNING-REPRESENTATION-EVALUATION-OPTIMIZATION"><a href="#LEARNING-REPRESENTATION-EVALUATION-OPTIMIZATION" class="headerlink" title="LEARNING = REPRESENTATION + EVALUATION + OPTIMIZATION"></a>LEARNING = REPRESENTATION + EVALUATION + OPTIMIZATION</h2><p>Suppose you have an application that you think machine learning might be good for. The first problem facing you is the bewildering variety of learning algorithms available.<br>Which one to use? There are literally thousands available, and hundreds more are published each year. The key to not getting lost in this huge space is to realize that it consists of combinations of just three components. The components are:</p>
<h3 id="Representation"><a href="#Representation" class="headerlink" title="Representation"></a>Representation</h3><p>A classifier must be represented in some formal language that the computer can handle. Conversely, choosing a representation for a learner is tantamount to choosing the set of classifiers that it can possibly learn. This set is called the hypothesis space of the learner. If a classifier is not in the hypothesis space, it cannot be learned. A related question, which we will address in a later section, is how to represent the input, i.e., what features to use.</p>
<h3 id="Evaluation"><a href="#Evaluation" class="headerlink" title="Evaluation"></a>Evaluation</h3><p>An evaluation function (also called objective function or scoring function) is needed to distinguish good classifiers from bad ones. The evaluation function used internally by the algorithm may differ from the external one that we want the classifier to optimize, for ease of optimization (see below) and due to the issues discussed in the next section.</p>
<h3 id="Optimization"><a href="#Optimization" class="headerlink" title="Optimization"></a>Optimization</h3><p>Finally, we need a method to search among the classifiers in the language for the highest-scoring one. The choice of optimization technique is key to the efficiency of the learner, and also helps determine the classifier produced if the evaluation function has more than one optimum. It is common for new learners to start out using off-the-shelf optimizers, which are later replaced by custom-designed ones.</p>
<p>Table 1: The three components of learning algorithms.<br><img src="/2017/11/26/Machine-Learning/markdown-img-paste-20171126185139332.png" alt="markdown-img-paste-20171126185139332.png" title=""></p>
<p>Of course, not all combinations of one component from each column of Table 1 make equal sense. For example, discrete representations naturally go with combinatorial optimization, and continuous ones with continuous optimization. Nevertheless, many learners have both discrete and continuous components, and in fact the day may not be far when every single possible combination has appeared in some learner!</p>
<p>Most textbooks are organized by representation, and it ’ s easy to overlook the fact that the other components are equally important. There is no simple recipe for choosing each component, but the next sections touch on some of the key issues. And, as we will see below, some choices in a machine learning project may be even more important than the choice of learner.</p>
<h2 id="IT-’-S-GENERALIZATION-THAT-COUNTS"><a href="#IT-’-S-GENERALIZATION-THAT-COUNTS" class="headerlink" title="IT ’ S GENERALIZATION THAT COUNTS"></a>IT ’ S GENERALIZATION THAT COUNTS</h2><p>Of course, holding out data reduces the amount available for training. This can be mitigated by doing cross-validation: randomly dividing your training data into (say) ten subsets, holding out each one while training on the rest, testing each learned classifier on the examples it did not see, and averaging the results to see how well the particular parameter setting does.</p>
<h2 id="DATA-ALONE-IS-NOT-ENOUGH"><a href="#DATA-ALONE-IS-NOT-ENOUGH" class="headerlink" title="DATA ALONE IS NOT ENOUGH"></a>DATA ALONE IS NOT ENOUGH</h2><p>Generalization being the goal has another major consequence: data alone is not enough, no matter how much of it you have.<br>Consider learning a Boolean function of (say) 100 variables from a million examples. There are 2100 − 106 examples whose classes you don ’ t know. How do you figure out what those classes are? In the absence of further information, there is just no way to do this that beats flipping a coin. This observation was first made (in somewhat different form) by the philosopher David Hume over 200 years ago, but even today many mistakes in machine learning stem from failing to appreciate it. Every learner must embody some knowledge or assumptions beyond the data it ’ s given in order to generalize beyond it. This was formalized by Wolpert in his famous “ no free lunch ” theorems, according to which no learner can beat random guessing over all possible functions to be learned [26].</p>
<p>This seems like rather depressing news. How then can we ever hope to learn anything? Luckily, the functions we want to learn in the real world are not drawn uniformly from the set of all mathematically possible functions! In fact, very general assumptions — like smoothness, similar examples having similar classes, limited dependences, or limited complexity — are often enough to do very well, and this is a large part of why machine learning has been so successful.<br>Like deduction, induction (what learners do) is a knowledge lever: it turns a small amount of input knowledge into a large amount of output knowledge. Induction is a vastly more powerful lever than deduction, requiring much less input knowledge to produce useful results, but it still needs more than zero input knowledge to work. And, as with any lever, the more we put in, the more we can get out.<br>A corollary of this is that one of the key criteria for choosing a representation is which kinds of knowledge are easily expressed in it. For example, if we have a lot of knowledge about what makes examples similar in our domain, instancebased methods may be a good choice. If we have knowledge about probabilistic dependencies, graphical models are a good fit. And if we have knowledge about what kinds of preconditions are required by each class, “ IF . . . THEN . . . ” rules may be the the best option. The most useful learners in this regard are those that don ’ t just have assumptions hard-wired into them, but allow us to state them explicitly, vary them widely, and incorporate them automatically into the learning (e.g., using first-order logic [22] or grammars [6]).</p>
<h2 id="OVERFITTING-HAS-MANY-FACES"><a href="#OVERFITTING-HAS-MANY-FACES" class="headerlink" title="OVERFITTING HAS MANY FACES"></a>OVERFITTING HAS MANY FACES</h2><img src="/2017/11/26/Machine-Learning/markdown-img-paste-20171126213555811.png" alt="markdown-img-paste-20171126213555811.png" title="">
<h2 id="INTUITION-FAILS-IN-HIGH-DIMENSIONS"><a href="#INTUITION-FAILS-IN-HIGH-DIMENSIONS" class="headerlink" title="INTUITION FAILS IN HIGH DIMENSIONS"></a>INTUITION FAILS IN HIGH DIMENSIONS</h2><p>Many algorithms that work fine in low dimensions become intractable when the input is high-dimensional.<br>But in machine learning it refers to much more. Generalizing correctly becomes exponentially harder as the dimensionality (number of features) of the examples grows, because a fixed-size training set covers a dwindling fraction of the input space. Even with a moderate dimension of 100 and a huge training set of a trillion examples, the latter covers only a fraction of about 10 − 18 of the input space. This is what makes machine learning both necessary and hard.</p>
<h2 id="THEORETICAL-GUARANTEES-ARE-NOT-WHAT-THEY-SEEM"><a href="#THEORETICAL-GUARANTEES-ARE-NOT-WHAT-THEY-SEEM" class="headerlink" title="THEORETICAL GUARANTEES ARE NOT WHAT THEY SEEM"></a>THEORETICAL GUARANTEES ARE NOT WHAT THEY SEEM</h2><p>Machine learning papers are full of theoretical guarantees.<br>The most common type is a bound on the number of examples needed to ensure good generalization. What should you make of these guarantees? First of all, it ’ s remarkable that they are even possible. Induction is traditionally contrasted with deduction: in deduction you can guarantee that the conclusions are correct; in induction all bets are o ﬀ . Or such was the conventional wisdom for many centuries. One of the ma jor developments of recent decades has been the realization that in fact we can have guarantees on the results of induction, particularly if we ’ re willing to settle for probabilistic guarantees.</p>
<h2 id="FEATURE-ENGINEERING-IS-THE-KEY"><a href="#FEATURE-ENGINEERING-IS-THE-KEY" class="headerlink" title="FEATURE ENGINEERING IS THE KEY"></a>FEATURE ENGINEERING IS THE KEY</h2><p>First-timers are often surprised by how little time in a machine learning pro ject is spent actually doing machine learning. But it makes sense if you consider how time-consuming it is to gather data, integrate it, clean it and pre-process it, and how much trial and error can go into feature design.<br>Also, machine learning is not a one-shot process of building a data set and running a learner, but rather an iterative process of running the learner, analyzing the results, modifying the data and/or the learner, and repeating. Learning is often the quickest part of this, but that ’ s because we ’ ve already mastered it pretty well! Feature engineering is more di ﬃ cult because it ’ s domain-specific, while learners can be largely general-purpose. However, there is no sharp frontier between the two, and this is another reason the most useful learners are those that facilitate incorporating knowledge.<br>Of course, one of the holy grails of machine learning is to automate more and more of the feature engineering process.<br>One way this is often done today is by automatically generating large numbers of candidate features and selecting the best by (say) their information gain with respect to the class. But bear in mind that features that look irrelevant in isolation may be relevant in combination. For example, if the class is an XOR of k input features, each of them by itself carries no information about the class. (If you want to annoy machine learners, bring up XOR.) On the other hand, running a learner with a very large number of features to find out which ones are useful in combination may be too time-consuming, or cause overfitting. So there is ultimately no replacement for the smarts you put into feature engineering.</p>
<h2 id="MORE-DATA-BEATS-A-CLEVERER-ALGORITHM"><a href="#MORE-DATA-BEATS-A-CLEVERER-ALGORITHM" class="headerlink" title="MORE DATA BEATS A CLEVERER ALGORITHM"></a>MORE DATA BEATS A CLEVERER ALGORITHM</h2><p>Suppose you ’ ve constructed the best set of features you can, but the classifiers you ’ re getting are still not accurate enough. What can you do now?</p>
<h2 id="LEARN-MANY-MODELS-NOT-JUST-ONE"><a href="#LEARN-MANY-MODELS-NOT-JUST-ONE" class="headerlink" title="LEARN MANY MODELS, NOT JUST ONE"></a>LEARN MANY MODELS, NOT JUST ONE</h2><p>But then researchers noticed that, if instead of selecting the best variation found, we combine many variations, the results are better — often much better — and at little extra e ﬀ ort for the user.</p>
<h2 id="SIMPLICITY-DOES-NOT-IMPLY-ACCURACY"><a href="#SIMPLICITY-DOES-NOT-IMPLY-ACCURACY" class="headerlink" title="SIMPLICITY DOES NOT IMPLY ACCURACY"></a>SIMPLICITY DOES NOT IMPLY ACCURACY</h2><p>pass</p>
<h1 id="A-Few-Useful-Things-to-Know-about-Machine-Learning-1"><a href="#A-Few-Useful-Things-to-Know-about-Machine-Learning-1" class="headerlink" title="A Few Useful Things to Know about Machine Learning"></a>A Few Useful Things to Know about Machine Learning</h1><h2 id="概述"><a href="#概述" class="headerlink" title="概述"></a>概述</h2><p>机器学习被普遍认为任何一个 IT 民工必备的工具之一，和学习任何语言工具一样，简单的体验一下或许非常简单。但这并不代表你能真正驾驭它，如果想深入不是一年两年能搞得定事情，本文作者总结了对于刚入门机器学习者需要注意一些事项。<br>什么是学习</p>
<p>机器学习包括无监督、半监督、监督、强化学习等，但是对于很多初入门的 MLer 可能就是监督学习，即给定训练样本和标签，学习出某个模型进行新样本预测。</p>
<p>学习 = 表示 + 评估 + 优化</p>
<pre><code>表示：表示的过程也是模型选择过程，也是假设空间确定过程，也是特征选择过程。也是从给定训练数据中找到一个合适形式表示数据，从而能够进行泛化。也有人认为学习过程也是一种记忆和推理过程，根据已有事实进行新样本推理。
评估：模型的泛化能力，好的模型不仅仅能够对训练样本能够有效预测，还能够对未知样本进行有效预测。评估过程提供一个评价函数对模型进行量化。
优化：寻找最优表示的过程，确定表示参数的过程。不同的优化算法从假设空间遍历的过程都是不同的。
</code></pre><h2 id="泛化很重要"><a href="#泛化很重要" class="headerlink" title="泛化很重要"></a>泛化很重要</h2><p>由于我们不可能获取到全部训练样本，即使能也可能存储不下全部样本，这样必须有效提供模型泛化能力。<br>一个好的模型不仅仅是在训练数据上表现出好的效果，而是对于不可见样本也能表现出好的效果。因此训练时常将训练数据划分为测试集合和训练集合，通过训练集合进行模型学习，测试集合进行模型效果验证。对于模型参数的选择可以通过交叉验证（CV）进行选择。</p>
<h2 id="仅仅有数据是不够的"><a href="#仅仅有数据是不够的" class="headerlink" title="仅仅有数据是不够的"></a>仅仅有数据是不够的</h2><p>由于机器学习目标是泛化，一切不以泛化为目的的模型选择都是胡搞。因此需要对数据进行合理假设和推理演绎。<br>好在在统计领域有大量工具能够发现数据一些本质，如何有效分析数据能够找到数据合理假设是关键。</p>
<h2 id="过拟合问题"><a href="#过拟合问题" class="headerlink" title="过拟合问题"></a>过拟合问题</h2><p>机器学习非常大两个问题过拟合和欠拟合，过拟合一个常见表现为对于训练数据有非常好效果，对于测试数据效果一般或者非常差。<br>过拟合问题常常被表示为偏差和方差问题，任何机器学习书籍都会介绍这个分解过程。<br>关键是如何发现并解决过拟合问题。</p>
<h2 id="维度灾难"><a href="#维度灾难" class="headerlink" title="维度灾难"></a>维度灾难</h2><p>很多模型在低维空间能够有效表示数据，但是高维空间效果非常差，因此模型对于维度的支持能力也是 MLer 需要考虑的。</p>
<h2 id="理论保证不总是可信"><a href="#理论保证不总是可信" class="headerlink" title="理论保证不总是可信"></a>理论保证不总是可信</h2><p>理论保证不总是可信的，理论边界都是一个极限过程，在实际中这种极限不总是可达的，关键是一般情况下的适用能力。</p>
<h2 id="特征工程是关键"><a href="#特征工程是关键" class="headerlink" title="特征工程是关键"></a>特征工程是关键</h2><p>特征工程有时候能够使得模型发挥最大作用，然而发现特征过程一般都是痛苦的，而且是领域相关的。<br>深度学习出现能够有效减少特征工程消耗时间，然而一个好的特征仍然非常重要</p>
<h2 id="更多的数据胜过更聪明算法"><a href="#更多的数据胜过更聪明算法" class="headerlink" title="更多的数据胜过更聪明算法"></a>更多的数据胜过更聪明算法</h2><p>数据比模型重要，越多数据越能揭露数据本质。因此遇到模型效果不好时，先考虑如何扩充训练数据。在训练数据固定时，才去寻找更好的表示</p>
<h2 id="学习多个模型而不是一个"><a href="#学习多个模型而不是一个" class="headerlink" title="学习多个模型而不是一个"></a>学习多个模型而不是一个</h2><p>由于训练数据并不总是独立同分布（IID）任何一份训练数据都有可能是偏置的，因此模型集成是一个非常重要的手段。常见的集成方法包括 bagging 和 boosting</p>
<h2 id="简单并不意味着精确"><a href="#简单并不意味着精确" class="headerlink" title="简单并不意味着精确"></a>简单并不意味着精确</h2><p>简单是为了提高泛化能力，如何同时提高泛化和精确才是深入考虑的。</p>
<h2 id="可表示并不意味可学习"><a href="#可表示并不意味可学习" class="headerlink" title="可表示并不意味可学习"></a>可表示并不意味可学习</h2><p>pass</p>
<h2 id="相关并不意味因果"><a href="#相关并不意味因果" class="headerlink" title="相关并不意味因果"></a>相关并不意味因果</h2><p>pass</p>
<h2 id="总结"><a href="#总结" class="headerlink" title="总结"></a>总结</h2><p>一个好的机器学习模型能够解决实际应用，而不是机器学习多高级，能够解决问题的算法都是好模型。<br>“天下没有白吃午餐”：任何模型都不是通吃的，在应用中寻找最合适的算法而不是最好的算法。</p>
<h1 id="Evaluation-1"><a href="#Evaluation-1" class="headerlink" title="Evaluation"></a>Evaluation</h1><p>【准确率】 accuracy：正确分类的样本 / 总样本，(TP+TN)/(ALL)<br>在不平衡分类问题中难以准确度量：比如 98% 的正样本只需全部预测为正即可获得 98% 准确率</p>
<p>【精确率】【查准率】 precision：TP/(TP+FP)，在你预测为 1 的样本中实际为 1 的概率<br>查准率在检索系统中：检出的相关文献与检出的全部文献的百分比，衡量检索的信噪比</p>
<p>【召回率】【查全率】 recall：TP/(TP+FN)，在实际为 1 的样本中你预测为 1 的概率<br>查全率在检索系统中：检出的相关文献与全部相关文献的百分比，衡量检索的覆盖率</p>
<p>【 ROC 】：常被用来评价一个二值分类器的优劣<br><img src="/2017/11/26/Machine-Learning/2018-03-10-11-18-54.png" alt="2018-03-10-11-18-54.png" title=""><br>ROC 曲线的横坐标为 false positive rate（FPR）：FP/(FP+TN)<br>假阳性率，即实际无病，但根据筛检被判为有病的百分比。<br>在实际为 0 的样本中你预测为 1 的概率<br>纵坐标为 true positive rate（TPR）：TP/(TP+FN)<br>真阳性率，即实际有病，但根据筛检被判为有病的百分比。<br>在实际为 1 的样本中你预测为 1 的概率，此处即【召回率】【查全率】 recall</p>
<p>接下来我们考虑 ROC 曲线图中的四个点和一条线。<br>第一个点，(0,1)，即 FPR=0,TPR=1，这意味着无病的没有被误判，有病的都全部检测到，这是一个完美的分类器，它将所有的样本都正确分类。<br>第二个点，(1,0)，即 FPR=1，TPR=0，类似地分析可以发现这是一个最糟糕的分类器，因为它成功避开了所有的正确答案。<br>第三个点，(0,0)，即 FPR=TPR=0，即 FP（false positive）=TP（true positive）=0，没病的没有被误判但有病的全都没被检测到，即全部选 0<br>类似的，第四个点（1,1），分类器实际上预测所有的样本都为 1。<br>经过以上的分析可得到：ROC 曲线越接近左上角，该分类器的性能越好。</p>

      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="/2017/11/22/How-to-Draw-a-Dynamic-Graph/" rel="next" title="How to Draw a Dynamic Graph">
                <i class="fa fa-chevron-left"></i> How to Draw a Dynamic Graph
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="/2017/11/26/Deep-Learning-Tutorial-Hung-yi-Lee/" rel="prev" title="Deep Learning Tutorial Hung-Yi Lee">
                Deep Learning Tutorial Hung-Yi Lee <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>



    <div class="post-spread">
      
    </div>
  </div>


          </div>
          


          
  <div class="comments" id="comments">
    
  </div>


        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap" >
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
          <img class="site-author-image" itemprop="image"
               src="/uploads/avatar.jpg"
               alt="东木金" />
          <p class="site-author-name" itemprop="name">东木金</p>
           
              <p class="site-description motion-element" itemprop="description">正在学习机器学习，希望能变得很强！</p>
          
        </div>
        <nav class="site-state motion-element">

          
            <div class="site-state-item site-state-posts">
              <a href="/archives/">
                <span class="site-state-item-count">162</span>
                <span class="site-state-item-name">日志</span>
              </a>
            </div>
          

          
            
            
            <div class="site-state-item site-state-categories">
              <a href="/categories/index.html">
                <span class="site-state-item-count">18</span>
                <span class="site-state-item-name">分类</span>
              </a>
            </div>
          

          
            
            
            <div class="site-state-item site-state-tags">
              <a href="/tags/index.html">
                <span class="site-state-item-count">42</span>
                <span class="site-state-item-name">标签</span>
              </a>
            </div>
          

        </nav>

        

        <div class="links-of-author motion-element">
          
            
              <span class="links-of-author-item">
                <a href="https://github.com/bdmk" target="_blank" title="GitHub">
                  
                    <i class="fa fa-fw fa-github"></i>
                  
                    
                      GitHub
                    
                </a>
              </span>
            
              <span class="links-of-author-item">
                <a href="mailto:catcherchan94@outlook.com" target="_blank" title="E-Mail">
                  
                    <i class="fa fa-fw fa-envelope"></i>
                  
                    
                      E-Mail
                    
                </a>
              </span>
            
          
        </div>

        
        

        
        

        


      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#The-Discipline-of-Machine-Learning"><span class="nav-number">1.</span> <span class="nav-text">The Discipline of Machine Learning</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#Defining-Questions"><span class="nav-number">1.1.</span> <span class="nav-text">Defining Questions</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#A-Few-Useful-Things-to-Know-about-Machine-Learning"><span class="nav-number">2.</span> <span class="nav-text">A Few Useful Things to Know about Machine Learning</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#LEARNING-REPRESENTATION-EVALUATION-OPTIMIZATION"><span class="nav-number">2.1.</span> <span class="nav-text">LEARNING = REPRESENTATION + EVALUATION + OPTIMIZATION</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Representation"><span class="nav-number">2.1.1.</span> <span class="nav-text">Representation</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Evaluation"><span class="nav-number">2.1.2.</span> <span class="nav-text">Evaluation</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Optimization"><span class="nav-number">2.1.3.</span> <span class="nav-text">Optimization</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#IT-’-S-GENERALIZATION-THAT-COUNTS"><span class="nav-number">2.2.</span> <span class="nav-text">IT ’ S GENERALIZATION THAT COUNTS</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#DATA-ALONE-IS-NOT-ENOUGH"><span class="nav-number">2.3.</span> <span class="nav-text">DATA ALONE IS NOT ENOUGH</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#OVERFITTING-HAS-MANY-FACES"><span class="nav-number">2.4.</span> <span class="nav-text">OVERFITTING HAS MANY FACES</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#INTUITION-FAILS-IN-HIGH-DIMENSIONS"><span class="nav-number">2.5.</span> <span class="nav-text">INTUITION FAILS IN HIGH DIMENSIONS</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#THEORETICAL-GUARANTEES-ARE-NOT-WHAT-THEY-SEEM"><span class="nav-number">2.6.</span> <span class="nav-text">THEORETICAL GUARANTEES ARE NOT WHAT THEY SEEM</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#FEATURE-ENGINEERING-IS-THE-KEY"><span class="nav-number">2.7.</span> <span class="nav-text">FEATURE ENGINEERING IS THE KEY</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#MORE-DATA-BEATS-A-CLEVERER-ALGORITHM"><span class="nav-number">2.8.</span> <span class="nav-text">MORE DATA BEATS A CLEVERER ALGORITHM</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#LEARN-MANY-MODELS-NOT-JUST-ONE"><span class="nav-number">2.9.</span> <span class="nav-text">LEARN MANY MODELS, NOT JUST ONE</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#SIMPLICITY-DOES-NOT-IMPLY-ACCURACY"><span class="nav-number">2.10.</span> <span class="nav-text">SIMPLICITY DOES NOT IMPLY ACCURACY</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#A-Few-Useful-Things-to-Know-about-Machine-Learning-1"><span class="nav-number">3.</span> <span class="nav-text">A Few Useful Things to Know about Machine Learning</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#概述"><span class="nav-number">3.1.</span> <span class="nav-text">概述</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#泛化很重要"><span class="nav-number">3.2.</span> <span class="nav-text">泛化很重要</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#仅仅有数据是不够的"><span class="nav-number">3.3.</span> <span class="nav-text">仅仅有数据是不够的</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#过拟合问题"><span class="nav-number">3.4.</span> <span class="nav-text">过拟合问题</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#维度灾难"><span class="nav-number">3.5.</span> <span class="nav-text">维度灾难</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#理论保证不总是可信"><span class="nav-number">3.6.</span> <span class="nav-text">理论保证不总是可信</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#特征工程是关键"><span class="nav-number">3.7.</span> <span class="nav-text">特征工程是关键</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#更多的数据胜过更聪明算法"><span class="nav-number">3.8.</span> <span class="nav-text">更多的数据胜过更聪明算法</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#学习多个模型而不是一个"><span class="nav-number">3.9.</span> <span class="nav-text">学习多个模型而不是一个</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#简单并不意味着精确"><span class="nav-number">3.10.</span> <span class="nav-text">简单并不意味着精确</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#可表示并不意味可学习"><span class="nav-number">3.11.</span> <span class="nav-text">可表示并不意味可学习</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#相关并不意味因果"><span class="nav-number">3.12.</span> <span class="nav-text">相关并不意味因果</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#总结"><span class="nav-number">3.13.</span> <span class="nav-text">总结</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#Evaluation-1"><span class="nav-number">4.</span> <span class="nav-text">Evaluation</span></a></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright" >
  
  &copy;  2017 - 
  <span itemprop="copyrightYear">2018</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">东木金</span>
</div>


<div class="powered-by">
  由 <a class="theme-link" href="https://hexo.io">Hexo</a> 强力驱动
</div>

<div class="theme-info">
  主题 -
  <a class="theme-link" href="https://github.com/iissnan/hexo-theme-next">
    NexT.Gemini
  </a>
</div>


        

        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  












  
  <script type="text/javascript" src="/lib/jquery/index.js?v=2.1.3"></script>

  
  <script type="text/javascript" src="/lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>

  
  <script type="text/javascript" src="/lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>

  
  <script type="text/javascript" src="/lib/velocity/velocity.min.js?v=1.2.1"></script>

  
  <script type="text/javascript" src="/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>

  
  <script type="text/javascript" src="/lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>


  


  <script type="text/javascript" src="/js/src/utils.js?v=5.1.2"></script>

  <script type="text/javascript" src="/js/src/motion.js?v=5.1.2"></script>



  
  


  <script type="text/javascript" src="/js/src/affix.js?v=5.1.2"></script>

  <script type="text/javascript" src="/js/src/schemes/pisces.js?v=5.1.2"></script>



  
  <script type="text/javascript" src="/js/src/scrollspy.js?v=5.1.2"></script>
<script type="text/javascript" src="/js/src/post-details.js?v=5.1.2"></script>



  


  <script type="text/javascript" src="/js/src/bootstrap.js?v=5.1.2"></script>



  


  




	





  





  






  

  <script type="text/javascript">
    // Popup Window;
    var isfetched = false;
    var isXml = true;
    // Search DB path;
    var search_path = "search.xml";
    if (search_path.length === 0) {
      search_path = "search.xml";
    } else if (/json$/i.test(search_path)) {
      isXml = false;
    }
    var path = "/" + search_path;
    // monitor main search box;

    var onPopupClose = function (e) {
      $('.popup').hide();
      $('#local-search-input').val('');
      $('.search-result-list').remove();
      $('#no-result').remove();
      $(".local-search-pop-overlay").remove();
      $('body').css('overflow', '');
    }

    function proceedsearch() {
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay"></div>')
        .css('overflow', 'hidden');
      $('.search-popup-overlay').click(onPopupClose);
      $('.popup').toggle();
      var $localSearchInput = $('#local-search-input');
      $localSearchInput.attr("autocapitalize", "none");
      $localSearchInput.attr("autocorrect", "off");
      $localSearchInput.focus();
    }

    // search function;
    var searchFunc = function(path, search_id, content_id) {
      'use strict';

      // start loading animation
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay">' +
          '<div id="search-loading-icon">' +
          '<i class="fa fa-spinner fa-pulse fa-5x fa-fw"></i>' +
          '</div>' +
          '</div>')
        .css('overflow', 'hidden');
      $("#search-loading-icon").css('margin', '20% auto 0 auto').css('text-align', 'center');

      $.ajax({
        url: path,
        dataType: isXml ? "xml" : "json",
        async: true,
        success: function(res) {
          // get the contents from search data
          isfetched = true;
          $('.popup').detach().appendTo('.header-inner');
          var datas = isXml ? $("entry", res).map(function() {
            return {
              title: $("title", this).text(),
              content: $("content",this).text(),
              url: $("url" , this).text()
            };
          }).get() : res;
          var input = document.getElementById(search_id);
          var resultContent = document.getElementById(content_id);
          var inputEventFunction = function() {
            var searchText = input.value.trim().toLowerCase();
            var keywords = searchText.split(/[\s\-]+/);
            if (keywords.length > 1) {
              keywords.push(searchText);
            }
            var resultItems = [];
            if (searchText.length > 0) {
              // perform local searching
              datas.forEach(function(data) {
                var isMatch = false;
                var hitCount = 0;
                var searchTextCount = 0;
                var title = data.title.trim();
                var titleInLowerCase = title.toLowerCase();
                var content = data.content.trim().replace(/<[^>]+>/g,"");
                var contentInLowerCase = content.toLowerCase();
                var articleUrl = decodeURIComponent(data.url);
                var indexOfTitle = [];
                var indexOfContent = [];
                // only match articles with not empty titles
                if(title != '') {
                  keywords.forEach(function(keyword) {
                    function getIndexByWord(word, text, caseSensitive) {
                      var wordLen = word.length;
                      if (wordLen === 0) {
                        return [];
                      }
                      var startPosition = 0, position = [], index = [];
                      if (!caseSensitive) {
                        text = text.toLowerCase();
                        word = word.toLowerCase();
                      }
                      while ((position = text.indexOf(word, startPosition)) > -1) {
                        index.push({position: position, word: word});
                        startPosition = position + wordLen;
                      }
                      return index;
                    }

                    indexOfTitle = indexOfTitle.concat(getIndexByWord(keyword, titleInLowerCase, false));
                    indexOfContent = indexOfContent.concat(getIndexByWord(keyword, contentInLowerCase, false));
                  });
                  if (indexOfTitle.length > 0 || indexOfContent.length > 0) {
                    isMatch = true;
                    hitCount = indexOfTitle.length + indexOfContent.length;
                  }
                }

                // show search results

                if (isMatch) {
                  // sort index by position of keyword

                  [indexOfTitle, indexOfContent].forEach(function (index) {
                    index.sort(function (itemLeft, itemRight) {
                      if (itemRight.position !== itemLeft.position) {
                        return itemRight.position - itemLeft.position;
                      } else {
                        return itemLeft.word.length - itemRight.word.length;
                      }
                    });
                  });

                  // merge hits into slices

                  function mergeIntoSlice(text, start, end, index) {
                    var item = index[index.length - 1];
                    var position = item.position;
                    var word = item.word;
                    var hits = [];
                    var searchTextCountInSlice = 0;
                    while (position + word.length <= end && index.length != 0) {
                      if (word === searchText) {
                        searchTextCountInSlice++;
                      }
                      hits.push({position: position, length: word.length});
                      var wordEnd = position + word.length;

                      // move to next position of hit

                      index.pop();
                      while (index.length != 0) {
                        item = index[index.length - 1];
                        position = item.position;
                        word = item.word;
                        if (wordEnd > position) {
                          index.pop();
                        } else {
                          break;
                        }
                      }
                    }
                    searchTextCount += searchTextCountInSlice;
                    return {
                      hits: hits,
                      start: start,
                      end: end,
                      searchTextCount: searchTextCountInSlice
                    };
                  }

                  var slicesOfTitle = [];
                  if (indexOfTitle.length != 0) {
                    slicesOfTitle.push(mergeIntoSlice(title, 0, title.length, indexOfTitle));
                  }

                  var slicesOfContent = [];
                  while (indexOfContent.length != 0) {
                    var item = indexOfContent[indexOfContent.length - 1];
                    var position = item.position;
                    var word = item.word;
                    // cut out 100 characters
                    var start = position - 20;
                    var end = position + 80;
                    if(start < 0){
                      start = 0;
                    }
                    if (end < position + word.length) {
                      end = position + word.length;
                    }
                    if(end > content.length){
                      end = content.length;
                    }
                    slicesOfContent.push(mergeIntoSlice(content, start, end, indexOfContent));
                  }

                  // sort slices in content by search text's count and hits' count

                  slicesOfContent.sort(function (sliceLeft, sliceRight) {
                    if (sliceLeft.searchTextCount !== sliceRight.searchTextCount) {
                      return sliceRight.searchTextCount - sliceLeft.searchTextCount;
                    } else if (sliceLeft.hits.length !== sliceRight.hits.length) {
                      return sliceRight.hits.length - sliceLeft.hits.length;
                    } else {
                      return sliceLeft.start - sliceRight.start;
                    }
                  });

                  // select top N slices in content

                  var upperBound = parseInt('1');
                  if (upperBound >= 0) {
                    slicesOfContent = slicesOfContent.slice(0, upperBound);
                  }

                  // highlight title and content

                  function highlightKeyword(text, slice) {
                    var result = '';
                    var prevEnd = slice.start;
                    slice.hits.forEach(function (hit) {
                      result += text.substring(prevEnd, hit.position);
                      var end = hit.position + hit.length;
                      result += '<b class="search-keyword">' + text.substring(hit.position, end) + '</b>';
                      prevEnd = end;
                    });
                    result += text.substring(prevEnd, slice.end);
                    return result;
                  }

                  var resultItem = '';

                  if (slicesOfTitle.length != 0) {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + highlightKeyword(title, slicesOfTitle[0]) + "</a>";
                  } else {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + title + "</a>";
                  }

                  slicesOfContent.forEach(function (slice) {
                    resultItem += "<a href='" + articleUrl + "'>" +
                      "<p class=\"search-result\">" + highlightKeyword(content, slice) +
                      "...</p>" + "</a>";
                  });

                  resultItem += "</li>";
                  resultItems.push({
                    item: resultItem,
                    searchTextCount: searchTextCount,
                    hitCount: hitCount,
                    id: resultItems.length
                  });
                }
              })
            };
            if (keywords.length === 1 && keywords[0] === "") {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-search fa-5x" /></div>'
            } else if (resultItems.length === 0) {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-frown-o fa-5x" /></div>'
            } else {
              resultItems.sort(function (resultLeft, resultRight) {
                if (resultLeft.searchTextCount !== resultRight.searchTextCount) {
                  return resultRight.searchTextCount - resultLeft.searchTextCount;
                } else if (resultLeft.hitCount !== resultRight.hitCount) {
                  return resultRight.hitCount - resultLeft.hitCount;
                } else {
                  return resultRight.id - resultLeft.id;
                }
              });
              var searchResultList = '<ul class=\"search-result-list\">';
              resultItems.forEach(function (result) {
                searchResultList += result.item;
              })
              searchResultList += "</ul>";
              resultContent.innerHTML = searchResultList;
            }
          }

          if ('auto' === 'manual') {
            input.addEventListener('input', inputEventFunction);
          } else {
            $('.search-icon').click(inputEventFunction);
            input.addEventListener('keypress', function (event) {
              if (event.keyCode === 13) {
                inputEventFunction();
              }
            });
          }

          // remove loading animation
          $(".local-search-pop-overlay").remove();
          $('body').css('overflow', '');

          proceedsearch();
        }
      });
    }

    // handle and trigger popup window;
    $('.popup-trigger').click(function(e) {
      e.stopPropagation();
      if (isfetched === false) {
        searchFunc(path, 'local-search-input', 'local-search-result');
      } else {
        proceedsearch();
      };
    });

    $('.popup-btn-close').click(onPopupClose);
    $('.popup').click(function(e){
      e.stopPropagation();
    });
    $(document).on('keyup', function (event) {
      var shouldDismissSearchPopup = event.which === 27 &&
        $('.search-popup').is(':visible');
      if (shouldDismissSearchPopup) {
        onPopupClose();
      }
    });
  </script>





  

  

  

  
  
    <script type="text/x-mathjax-config">
      MathJax.Hub.Config({
        tex2jax: {
          inlineMath: [ ['$','$'], ["\\(","\\)"]  ],
          processEscapes: true,
          skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
        }
      });
    </script>

    <script type="text/x-mathjax-config">
      MathJax.Hub.Queue(function() {
        var all = MathJax.Hub.getAllJax(), i;
        for (i=0; i < all.length; i += 1) {
          all[i].SourceElement().parentNode.className += ' has-jax';
        }
      });
    </script>
    <script type="text/javascript" src="//cdn.bootcss.com/mathjax/2.7.1/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
  


  

  

</body>
</html>
