<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-Hans">
<head>
  <meta charset="UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"/>
<meta name="theme-color" content="#222">









<meta http-equiv="Cache-Control" content="no-transform" />
<meta http-equiv="Cache-Control" content="no-siteapp" />
















  
  
  <link href="/blog/lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css" />







<link href="/blog/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css" />

<link href="/blog/css/main.css?v=5.1.4" rel="stylesheet" type="text/css" />


  <link rel="apple-touch-icon" sizes="180x180" href="/blog/images/apple-touch-icon-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="32x32" href="/blog/images/favicon-32x32-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="16x16" href="/blog/images/favicon-16x16-next.png?v=5.1.4">


  <link rel="mask-icon" href="/blog/images/logo.svg?v=5.1.4" color="#222">





  <meta name="keywords" content="贝叶斯," />





  <link rel="alternate" href="/blog/atom.xml" title="稻草人的编程之路" type="application/atom+xml" />






<meta name="description" content="贝叶斯算法这篇博客介绍机器学习中非常常见的贝叶斯算法 前言贝叶斯算法要解决的是什么问题？ 正向概率: 假设袋子里面有N个白球，M个黑球，你伸手进去摸一把，摸出黑球的概率是多大？ 逆向概率: 如果我们事先并不知道袋子里黑白球的比例，而是闭着眼睛摸出一个（或者好几个）球，观察这些取出来的球的颜色之后，那么我们可以就此对袋子里的黑白球的比例作出什么样的推测？ 有没有疑惑，我们事先就知道了白球、黑球的概率">
<meta name="keywords" content="贝叶斯">
<meta property="og:type" content="article">
<meta property="og:title" content="机器学习之贝叶斯算法">
<meta property="og:url" content="https://wangxiaochuang.github.io/2018/11/04/2.html">
<meta property="og:site_name" content="稻草人的编程之路">
<meta property="og:description" content="贝叶斯算法这篇博客介绍机器学习中非常常见的贝叶斯算法 前言贝叶斯算法要解决的是什么问题？ 正向概率: 假设袋子里面有N个白球，M个黑球，你伸手进去摸一把，摸出黑球的概率是多大？ 逆向概率: 如果我们事先并不知道袋子里黑白球的比例，而是闭着眼睛摸出一个（或者好几个）球，观察这些取出来的球的颜色之后，那么我们可以就此对袋子里的黑白球的比例作出什么样的推测？ 有没有疑惑，我们事先就知道了白球、黑球的概率">
<meta property="og:locale" content="zh-Hans">
<meta property="og:updated_time" content="2019-01-07T22:36:29.352Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="机器学习之贝叶斯算法">
<meta name="twitter:description" content="贝叶斯算法这篇博客介绍机器学习中非常常见的贝叶斯算法 前言贝叶斯算法要解决的是什么问题？ 正向概率: 假设袋子里面有N个白球，M个黑球，你伸手进去摸一把，摸出黑球的概率是多大？ 逆向概率: 如果我们事先并不知道袋子里黑白球的比例，而是闭着眼睛摸出一个（或者好几个）球，观察这些取出来的球的颜色之后，那么我们可以就此对袋子里的黑白球的比例作出什么样的推测？ 有没有疑惑，我们事先就知道了白球、黑球的概率">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/blog/',
    scheme: 'Gemini',
    version: '5.1.4',
    sidebar: {"position":"left","display":"post","offset":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="https://wangxiaochuang.github.io/2018/11/04/2.html"/>





  <title>机器学习之贝叶斯算法 | 稻草人的编程之路</title>
  








</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-Hans">

  
  
    
  

  <div class="container sidebar-position-left page-post-detail">
    <div class="headband"></div>

    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/blog/"  class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">稻草人的编程之路</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle">众人皆醉我独醒 举世皆浊我独清</p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="/blog/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br />
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-archives">
          <a href="/blog/archives/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-archive"></i> <br />
            
            归档
          </a>
        </li>
      

      
    </ul>
  

  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/04/2.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">机器学习之贝叶斯算法</h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-04T12:25:24+00:00">
                2018-11-04
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/贝叶斯/" itemprop="url" rel="index">
                    <span itemprop="name">贝叶斯</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/04/2.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/04/2.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/04/2.html" class="leancloud_visitors" data-flag-title="机器学习之贝叶斯算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <h1 id="贝叶斯算法"><a href="#贝叶斯算法" class="headerlink" title="贝叶斯算法"></a>贝叶斯算法</h1><p>这篇博客介绍机器学习中非常常见的贝叶斯算法</p>
<h2 id="前言"><a href="#前言" class="headerlink" title="前言"></a>前言</h2><p>贝叶斯算法要解决的是什么问题？</p>
<p><strong>正向概率:</strong> 假设袋子里面有N个白球，M个黑球，你伸手进去摸一把，摸出黑球的概率是多大？</p>
<p><strong>逆向概率:</strong> 如果我们事先并不知道袋子里黑白球的比例，而是闭着眼睛摸出一个（或者好几个）球，观察这些取出来的球的颜色之后，那么我们可以就此对袋子里的黑白球的比例作出什么样的推测？</p>
<p>有没有疑惑，我们事先就知道了白球、黑球的概率，为什么还要反着来呢？</p>
<p>答案是：现实世界本身就是不确定的，人类的观察能力是有局限性的，对于某些问题，我们可能根本就不可能知道黑球、白球的比例</p>
<h2 id="公式推导"><a href="#公式推导" class="headerlink" title="公式推导"></a>公式推导</h2><p>这里还是拿一个跟生活比较贴近的例子。</p>
<blockquote>
<p>一个学校男生女生的比例分别是60%与40%。男生总是穿长裤，女生则一半穿长裤一半穿裙子<br>正向概率：随机选取一个学生，他（她）穿长裤的概率和穿裙子的概率是多大？<br>逆向概率：迎面走来一个穿长裤的学生，无法确定性别的情况下，你能推断出他（她）是女生的概率是多大么？</p>
</blockquote>
<ol>
<li>假设学校里面人的总数是U个</li>
<li><p>穿长裤的（男生）：$U \times P(Boy) \times P(Pants|Boy)$</p>
<ul>
<li>P(Boy)是男生的概率 = 60%</li>
<li>P(Pants|Boy) 是条件概率，即在Boy这个条件下穿长裤的概率是多大，这里是100%</li>
</ul>
</li>
<li><p>穿长裤的（女生）：</p>
<script type="math/tex; mode=display">
 U \times P(Girl) \times P(Pants|Girl)</script></li>
<li><p>我们可以得到穿长裤的总数：</p>
<script type="math/tex; mode=display">
 SUM = U \times P(Boy) \times P(Pants|Boy) + U \times P(Girl) \times P(Pants|Girl)</script></li>
<li><p>我们的求解目标是穿长裤的女生的概率：</p>
<script type="math/tex; mode=display">
 \begin{equation}\begin{split}
 P(Girl|Pants) &= \frac {U \times P(Girl) \times P(Pants|Girl)} {SUM} \\
 &= \frac {P(Pants, Girl)} {P(Pants)} \\
 \end{split}\end{equation}</script><p> 分子就是穿裤子的女孩子的概率，分母就是穿裤子的概率</p>
</li>
<li><p>贝叶斯公式</p>
<script type="math/tex; mode=display">
 P(A|B) = \frac {P(B|A)P(A)} {P(B)}</script><p> 当在B条件下不好求A，但是反过来在条件A下B的概率的情况下，就可以用贝叶斯算法</p>
</li>
</ol>
<h2 id="模型比较理论"><a href="#模型比较理论" class="headerlink" title="模型比较理论"></a>模型比较理论</h2><ol>
<li><p>最大似然：最符合观测数据的（即 P(D|h) 最大的）最有优势</p>
<p> 我们之前讨论的机器学习算法都是基于最大似然估计来做的</p>
</li>
<li><p>奥卡姆剃刀：P(h)较大的模型有较大的优势</p>
<p> 如果平面上有N个点，近似构成一条直线，但绝不精确的位于一条直线。这时候我们可以用直线来拟合（1阶）、也可以用曲线（n阶）去拟合所有的点，但通常我们会用低阶的多项式去拟合，因为越是高阶的多项式越是不常见</p>
</li>
</ol>
<h2 id="应用实例"><a href="#应用实例" class="headerlink" title="应用实例"></a>应用实例</h2><h3 id="拼写纠正实例"><a href="#拼写纠正实例" class="headerlink" title="拼写纠正实例"></a>拼写纠正实例</h3><p>当我们看到用户输入了一个不在字典中的单词，我们需要去猜测：“这个家伙到底真正想要输入的单词是什么呢？”</p>
<p>P(我们猜测他想输入的单词|他实际输入的单词)，比如用户输入了tha，我们需要猜测他想输入the呢还是than</p>
<ol>
<li>用户实际输入的单词记为D</li>
<li>猜测1：P(h1|D)，猜测2：P(h2|D)，猜测3：P(h3|D)；统一为：P(h|D)</li>
<li>$P(h|D) = \frac {P(h) * P(D|h)}{P(D)}$</li>
<li>p(h)为先验概率，我们会根据一个语料库计算出这些单词的出现概率</li>
<li>P(D|h)表示正确单词输错为了D的概率</li>
<li>这里的P(D)表示输入单词D的概率，这个一般都会被约分掉，我们是比较的概率大小，并不需要得出实际的值</li>
<li>P(h|D) 正比于 P(h) * P(D|h)，对于给定的观测数据，一个猜测是好是坏，取决于“这个猜测本身独立的可能性大小（先验概率，Prior）”和“这个猜测生成我们观测到的数据的可能性大小”</li>
<li>这个的先验概率可以使用语料库计算单词h出现的概率，这里的 P(D|h)根据衡量的指标不同而不同，我们可以按键盘上的概率计算也可以按单词的距离计算等等</li>
<li>比如用户输入tlp，那到底是top还是tip？这个时候，当最大似然不能做出决定性的判断是，先验概率就可以插手进来给出指示—“既然你无法决定，那么我告诉你，一般来说top出现的程度要高许多，所以更可能他想打的是top”</li>
</ol>
<h3 id="实现一个简单的拼写检查器"><a href="#实现一个简单的拼写检查器" class="headerlink" title="实现一个简单的拼写检查器"></a>实现一个简单的拼写检查器</h3><ol>
<li><p>我们的求解的目标：$argmaxc P(c|w) \to argmaxc \frac{P(w|c) \times P(c)}{P(w)}$</p>
<ul>
<li>P(c)，文章中一个正确拼写词c的概率，也就是说，在英语文章中，c出现的概率有多大</li>
<li>P(w|c)，在用户想键入c的情况下敲成了w的概率，因为这个是代表用户会以多大的概率把c敲错成w</li>
<li>argmaxc，用来枚举所有可能的c并且选取概率最大的</li>
</ul>
</li>
<li><p>把语料中的单词全部抽取出来，转成小写，并且去除单词中间的特殊符号</p>
 <div><div class="fold_hider"><div class="close hider_title">  </div></div><div class="fold">
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> re</span><br><span class="line"><span class="keyword">import</span> collections</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">words</span><span class="params">(text)</span>:</span> <span class="keyword">return</span> re.findall(<span class="string">'[a-z]+'</span>, text.lower())</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">train</span><span class="params">(features)</span>:</span></span><br><span class="line">    <span class="comment"># 默认单词出现一次，如果为0不好计算</span></span><br><span class="line">    model = collections.defaultdict(<span class="keyword">lambda</span>: <span class="number">1</span>)</span><br><span class="line">    <span class="keyword">for</span> f <span class="keyword">in</span> features:</span><br><span class="line">        model[f] += <span class="number">1</span></span><br><span class="line">    <span class="keyword">return</span> model</span><br><span class="line">NWORDS = train(words(open(<span class="string">'big.txt'</span>).read()))</span><br></pre></td></tr></table></figure>

</div></div></li>
<li><p>返回所有单词编辑距离为1的单词集合</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">alphabet = <span class="string">'abcdefghijklmnopqrstuvwxyz'</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">edits1</span><span class="params">(word)</span>:</span></span><br><span class="line">    n = len(word)</span><br><span class="line">    <span class="keyword">return</span> set(</span><br><span class="line">        [word[<span class="number">0</span>:i] + word[i+<span class="number">1</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n)] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + word[i+<span class="number">1</span>] + word[i] +  word[i+<span class="number">2</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n<span class="number">-1</span>)] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + c + word[i+<span class="number">1</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n) <span class="keyword">for</span> c <span class="keyword">in</span> alphabet] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + c + word[i:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n+<span class="number">1</span>) <span class="keyword">for</span> c <span class="keyword">in</span> alphabet])</span><br></pre></td></tr></table></figure>
</li>
<li><p>返回所有单词编辑距离为2的单词集合</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">edits2</span><span class="params">(word)</span>:</span></span><br><span class="line">    <span class="keyword">return</span> set(e2 <span class="keyword">for</span> e1 <span class="keyword">in</span> edits1(word) <span class="keyword">for</span> e2 <span class="keyword">in</span> edits1(e1))</span><br></pre></td></tr></table></figure>
</li>
<li><p>为了简单起见，我们认为编辑距离为1的正确单词比编辑距离为2的正确单词的概率要高，编辑距离为0的单词比编辑距离为1的单词的概率高</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">known</span><span class="params">(words)</span>:</span> <span class="keyword">return</span> set(w <span class="keyword">for</span> w <span class="keyword">in</span> words <span class="keyword">if</span> w <span class="keyword">in</span> NWORDS)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">correct</span><span class="params">(word)</span>:</span></span><br><span class="line">    candidates = known([word]) <span class="keyword">or</span> known(edits1(word)) <span class="keyword">or</span> known_edits2(word) <span class="keyword">or</span> [word]</span><br><span class="line">    <span class="keyword">return</span> max(candidates, key=<span class="keyword">lambda</span> w: NWORDS[w])</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用correct试一下效果吧</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">correct(<span class="string">"tlp"</span>)</span><br></pre></td></tr></table></figure>
</li>
</ol>
<h3 id="垃圾邮件过滤实例"><a href="#垃圾邮件过滤实例" class="headerlink" title="垃圾邮件过滤实例"></a>垃圾邮件过滤实例</h3><p>给定一封邮件，判定它是否属于垃圾邮件</p>
<p>D来表示这封邮件，注意D由N个单词组成。我们用h+来表示垃圾邮件，h-表示正常邮件</p>
<script type="math/tex; mode=display">
\begin{equation}\begin{split}
P(h+|D) &= \frac {P(h+) \times P(D|h+)} {P(D)} \\
P(h-|D) &= \frac {P(h-) \times P(D|h-)} {P(D)} \\
\end{split}\end{equation}</script><p>先验概率：P(h+)和P(h-)这两个先验概率都是很容易求出来的。只需要计算一个邮件库里面垃圾邮件和正常邮件的比例就行了</p>
<p>D里面含有N个单词d1，d2，d3，dn，P(D|h+)=P(d1,d2,…,dn|h+)</p>
<p>P(d1,d2,…,dn|h+)的意思是说在垃圾邮件中出现跟我们当前这封邮件一模一样的一封邮件的概率是多大，这个概率就太小了吧，一般来说是不可能的，这种情况我们认为只要这封邮件邮件与垃圾邮件大致相同就认为是垃圾邮件吧，所以就可以进行如下的扩展</p>
<p>P(d1,d2,…,dn|h+)扩展为：P(d1|h+) <em> P(d2|d1,h+) </em> P(d3|d2,d1,h+) * … ，怎么理解呢？我们依次求概率的乘积，垃圾邮件中出现d1的概率乘以垃圾邮件中包含d1单词的情况下出现d2的概率再乘以垃圾邮件中包含d1、d2单词的情况下出现d3的概率 。。。</p>
<p>现在假设di与di-1是完全条件无关的（朴素贝叶斯假设特征之间是独立且互不影响的），因此就可以简化为 P(d1|h+) <em> P(d2|h+) </em> P(d3|h+) * …</p>
<p>对于P(d1|h+) <em> P(d2|h+) </em> P(d3|h+) * … 只要统计di这个单词在垃圾邮件中出现的频率即可</p>
<p>关于这个假设的问题，两个单词之间肯定是有关系的吧，那是不是假设就错了，结果就有问题呢？其实也不是，我们的假设的目的是为了简化运算，如果这个假设对我们的影响不那么大，我们的这个假设就是有意义的</p>
<h3 id="中文新闻分类实例"><a href="#中文新闻分类实例" class="headerlink" title="中文新闻分类实例"></a>中文新闻分类实例</h3><p>这里我们会给一个新闻数据集，一条新闻包含了类别、主题、URL、内容四项内容</p>
<h4 id="停用词"><a href="#停用词" class="headerlink" title="停用词"></a>停用词</h4><p>对于语言内容的数据的分类，我们需要提取出关键词才能大概知道文本的内容。而对于这种新闻类的数据中会包含很多对数据分析没有任何作用的词，这类词就是停用词，比如：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">！  一下</span><br><span class="line">“   一个</span><br><span class="line">&amp;   一些</span><br><span class="line">￥  一切</span><br><span class="line">（  一天</span><br><span class="line">）  一定</span><br><span class="line">‘   一方面</span><br><span class="line">。。。 。。。</span><br></pre></td></tr></table></figure>
<p>对于停用词表，网络上有一大堆，可以自行搜索后直接使用。</p>
<p>当我们在进行中分类的时候，就将内容中的停用词都给去掉，只留下精简有意义的内容</p>
<h4 id="TF-IDF：关键词提取"><a href="#TF-IDF：关键词提取" class="headerlink" title="TF-IDF：关键词提取"></a>TF-IDF：关键词提取</h4><p>举个例子来说明这是个什么东西。比如我们要对《中国的蜜蜂养殖》进行词频统计，这个就称为TF（Term Frequency）</p>
<script type="math/tex; mode=display">
词频(TF) = \frac{某个词在文章中的出现次数}{该文档的总单词数量}</script><p>当我们去掉其中的停用词后，现在出现了三个词（中国、蜜蜂、养殖），他们出现的次数都是一样多的，这样能说明这三个词的重要性都是一样的么？</p>
<p>“中国”是个很常见的词，这篇叫《中国的蜜蜂养殖》、另一篇可能叫《中国的美食》等等；然后蜜蜂和养殖可能就没有那么常见了，我们要找的是不那么常见的词在当前这篇文章中出现又比较频繁的词，这就是逆文档频率（IDF）</p>
<script type="math/tex; mode=display">
逆文档频率（IDF）=log(\frac{语料库的文档总数}{包含该词的文档数+1})</script><p>在计算某个词是否为关键词使用的公式：</p>
<script type="math/tex; mode=display">
TF-IDF = 词频（TF） \times 逆文档频率</script><p>例如：《中国的蜜蜂养殖》鉴定该文长度为1000个词，“中国“、“蜜蜂”、“养殖”各出现20次，则这三个词的“词频”（TF）都为0.02；搜索Google关键词“的”的网页共有250亿张，假定这就是中文网页总数。包含“中国”的网页共有62.3亿张，包含“蜜蜂”的网页为0.484亿张，包含“养殖”的网页为0.973亿张</p>
<div class="table-container">
<table>
<thead>
<tr>
<th></th>
<th>包含该词的文档数（亿）</th>
<th>IDF</th>
<th>TF-IDF</th>
</tr>
</thead>
<tbody>
<tr>
<td>中国</td>
<td>62.3</td>
<td>0.603</td>
<td>0.0121</td>
</tr>
<tr>
<td>蜜蜂</td>
<td>0.484</td>
<td>2.713</td>
<td>0.0543</td>
</tr>
<tr>
<td>养殖</td>
<td>0.973</td>
<td>2.410</td>
<td>0.0482</td>
</tr>
</tbody>
</table>
</div>
<p>TF-IDF 越大，关键词的重要程度越低 </p>
<h4 id="LDA-主题模型"><a href="#LDA-主题模型" class="headerlink" title="LDA 主题模型"></a>LDA 主题模型</h4><p>拿新闻的例子。现在又一堆新闻数据，我们要将其分成N个主题，比如军事、文化、娱乐等。具体细节后续补充</p>
<p>一个比较好的用于LDA模型分析的库 <code>gensim</code></p>
<h4 id="相似度"><a href="#相似度" class="headerlink" title="相似度"></a>相似度</h4><p>现在有两句话，如果让你去计算他们的相似度，你会如何去计算呢？对于计算机来说，就是个字符串，什么也做不了，我们需要转换成计算机能够识别的样子，这里看个例子：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line">句子A：我喜欢看电视，不喜欢看电影</span><br><span class="line">句子B：我不喜欢看电视，也不喜欢看电影</span><br><span class="line"></span><br><span class="line">分词：</span><br><span class="line">句子A：我/喜欢/看/电视，不/喜欢/看/电影</span><br><span class="line">句子B：我/不/喜欢/看/电视，也/不/喜欢/看/电影</span><br><span class="line"></span><br><span class="line">语料库：[我, 喜欢, 看, 电视, 电影, 不, 也]</span><br><span class="line"></span><br><span class="line">词频：</span><br><span class="line">句子A：我 1, 喜欢 2, 看 2, 电视 1, 电影 1, 不 1, 也 0</span><br><span class="line">句子B：我 1, 喜欢 2, 看 2, 电视 1, 电影 1, 不 2, 也 1</span><br><span class="line"></span><br><span class="line">词频向量：</span><br><span class="line">句子A：[1, 2, 2, 1, 1, 1, 0]</span><br><span class="line">句子B：[1, 2, 2, 1, 1, 2, 1]</span><br><span class="line"></span><br><span class="line">相似度计算：</span><br><span class="line">有了词频向量后就有很多种方式来计算相似度了，比较常见的是余弦相似度计算</span><br></pre></td></tr></table></figure>
<p>余弦相似度</p>
<script type="math/tex; mode=display">
cos\theta = \frac{\sum_{i=1}^n (A_i \times B_i)}{\sqrt{\sum_{i=1}^n (A_i)^2} \times \sqrt{\sum_{i=1}^n{B_i}^2}} = \frac{A \cdot B}{|A| \times |B|}</script><p>进行相似度计算的时候，数据的预处理是很重要的，需要花大量的时间去做数据预处理</p>
<h4 id="使用python完成新闻分类"><a href="#使用python完成新闻分类" class="headerlink" title="使用python完成新闻分类"></a>使用python完成新闻分类</h4><p>数据来源: <a href="http://www.sogou.com/labs/resource/ca.php">搜狗实验室</a></p>
<ol>
<li><p>搜狗实验室下载的数据为xml格式，需要自行转换为pandas能读的格式，假设已经处理好</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line">df_news = pd.read_table(<span class="string">'./data/val.txt'</span>, names=[<span class="string">'category'</span>, <span class="string">'theme'</span>, <span class="string">'URL'</span>, <span class="string">'content'</span>],encoding=<span class="string">'utf-8'</span>)</span><br><span class="line">df_news = df_news.dropna()</span><br></pre></td></tr></table></figure>
</li>
<li><p>分词：使用结巴分词器</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># pip install jieba</span></span><br><span class="line"><span class="keyword">import</span> jieba</span><br><span class="line">content = df_news.content.values.tolist()</span><br><span class="line"><span class="comment"># print(content[1000])</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 将内容分词后存为list of list格式</span></span><br><span class="line">content_S = []</span><br><span class="line"><span class="keyword">for</span> line <span class="keyword">in</span> content:</span><br><span class="line">    current_segment = jieba.lcut(line)</span><br><span class="line">    <span class="keyword">if</span> len(current_segment) &gt; <span class="number">1</span> <span class="keyword">and</span> current_segment != <span class="string">'\r\n'</span>:</span><br><span class="line">        content_S.append(current_segment)</span><br><span class="line"></span><br><span class="line">df_content = pd.DataFrame(&#123;<span class="string">'content_S'</span>: content_S&#125;)</span><br><span class="line"><span class="comment"># print(df_content[1000])</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>清洗：使用停用词表过滤内容</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line">stopwords = pd.read_csv(<span class="string">"stopwords.txt"</span>, index_col=<span class="keyword">False</span>, sep=<span class="string">"\t"</span>, quoting=<span class="number">3</span>, names=[<span class="string">'stopword'</span>], encoding=<span class="string">'utf-8'</span>)</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">drop_stopwords</span><span class="params">(contents, stopwords)</span>:</span></span><br><span class="line">    contents_clean = []</span><br><span class="line">    all_words = []</span><br><span class="line">    <span class="keyword">for</span> line <span class="keyword">in</span> contents:</span><br><span class="line">        line_clean = []</span><br><span class="line">        <span class="keyword">for</span> word <span class="keyword">in</span> line:</span><br><span class="line">            <span class="keyword">if</span> word <span class="keyword">in</span> stopwords:</span><br><span class="line">                <span class="keyword">continue</span></span><br><span class="line">            <span class="comment"># 将过滤后的word放入line_clean中</span></span><br><span class="line">            line_clean.append(word)</span><br><span class="line">            <span class="comment"># 将所有单词都放到all_words列表，后面会使用词云展示</span></span><br><span class="line">            all_words.append(str(word))</span><br><span class="line">        contents_clean.append(line_clean)</span><br><span class="line">    <span class="keyword">return</span> contents_clean, all_words</span><br><span class="line">contents = df_content.content_S.values.tolist()</span><br><span class="line">stopwords = stopwords.stopword.values.tolist()</span><br><span class="line">contents_clean, all_words = drop_stopwords(contents, stopwords)</span><br><span class="line"><span class="comment"># print(contents_clean[1000])</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>将所有的单词的列表进行统计计数，画出词云图（分分类没有关系，可选）</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line">df_all_words = pd.DataFrame(&#123;<span class="string">'all_words'</span>: all_words&#125;)</span><br><span class="line"><span class="comment"># print(df_all_words.head())</span></span><br><span class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line">words_count = df_all_words.groupby(by=[<span class="string">'all_words'</span>])[<span class="string">'all_words'</span>].agg(&#123;<span class="string">"count"</span>: np.size&#125;)</span><br><span class="line">words_count = words_count.reset_index().sort_values(by=[<span class="string">"count"</span>],ascending=<span class="keyword">False</span>)</span><br><span class="line"><span class="comment"># print(words_count.head())</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># pip install wordcloud</span></span><br><span class="line"><span class="comment"># https://github.com/amueller/word_cloud</span></span><br><span class="line"><span class="keyword">from</span> wordcloud <span class="keyword">import</span> WordCloud</span><br><span class="line"><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line">%matplotlib inline</span><br><span class="line"><span class="keyword">import</span> matplotlib</span><br><span class="line">matplotlib.rcParams[<span class="string">'figure.figsize'</span>] = (<span class="number">10.0</span>, <span class="number">5.0</span>)</span><br><span class="line"></span><br><span class="line">wordcloud = WordCloud(font_path=<span class="string">"./data/simhei.ttf"</span>, background_color=<span class="string">"white"</span>, max_font_size=<span class="number">80</span>)</span><br><span class="line">word_frequence = &#123;x[<span class="number">0</span>]:x[<span class="number">1</span>] <span class="keyword">for</span> x <span class="keyword">in</span> words_count.head(<span class="number">100</span>).values&#125;</span><br><span class="line">wordcloud = wordcloud.fit_words(word_frequence)</span><br><span class="line">plt.imshow(wordcloud)</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用 TF-IDF 提取关键词</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> jieba.analyse</span><br><span class="line">index = <span class="number">1000</span></span><br><span class="line">print(df_news[<span class="string">'content'</span>][index])</span><br><span class="line">content_S_str = <span class="string">""</span>.join(content_S[index])</span><br><span class="line"><span class="comment"># topK: 返回前K个词</span></span><br><span class="line">print(<span class="string">" "</span>.join(jieba.analyse.extract_tags(content_S_str, topK=<span class="number">5</span>, withWeight=<span class="keyword">False</span>)))</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用gensim进行LDA主题模型分析</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> gensim</span><br><span class="line"><span class="keyword">from</span> gensim <span class="keyword">import</span> corpora, models, similarities</span><br><span class="line"></span><br><span class="line"><span class="comment"># 制作为字典映射表</span></span><br><span class="line">dictionary = corpora.Dictionary(contents_clean)</span><br><span class="line"><span class="comment"># 每一个新闻都做词袋模型</span></span><br><span class="line">corpus = [dictionary.doc2bow(sentence) <span class="keyword">for</span> sentence <span class="keyword">in</span> contents_clean]</span><br><span class="line"></span><br><span class="line"><span class="comment">#  num_topics 主题数</span></span><br><span class="line">lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=<span class="number">20</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印第一个主题中topK的关键词</span></span><br><span class="line">print(lda.print_topic(<span class="number">1</span>, topn=<span class="number">5</span>))</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印每个topic的关键词及权重</span></span><br><span class="line"><span class="keyword">for</span> topic <span class="keyword">in</span> lda.print_topics(num_topics=<span class="number">20</span>, num_words=<span class="number">5</span>):</span><br><span class="line">    print(topic[<span class="number">1</span>])</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用贝叶斯完成新闻的分类</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br></pre></td><td class="code"><pre><span class="line">df_train = pd.DataFrame(&#123;<span class="string">'contents_clean'</span>: contents_clean, <span class="string">'label'</span>: df_news[<span class="string">'category'</span>]&#125;)</span><br><span class="line"><span class="comment"># print(df_train.tail())</span></span><br><span class="line">df_train.label.unique()</span><br><span class="line"></span><br><span class="line"><span class="comment"># label替换为数字</span></span><br><span class="line">label_mapping = &#123;<span class="string">'汽车'</span>: <span class="number">1</span>, <span class="string">'财经'</span>: <span class="number">2</span>, <span class="string">'科技'</span>: <span class="number">3</span>, <span class="string">'健康'</span>: <span class="number">4</span>, <span class="string">'体育'</span>: <span class="number">5</span>, <span class="string">'教育'</span>: <span class="number">6</span>, <span class="string">'文化'</span>: <span class="number">7</span>, <span class="string">'军事'</span>: <span class="number">8</span>, <span class="string">'娱乐'</span>: <span class="number">9</span>, <span class="string">'时尚'</span>: <span class="number">0</span>&#125;</span><br><span class="line">df_train[<span class="string">'label'</span>] = df_train[<span class="string">'label'</span>].map(label_mapping)</span><br><span class="line">print(df_train.head())</span><br><span class="line"></span><br><span class="line"><span class="comment"># 将数据拆分为训练集与测试集</span></span><br><span class="line"><span class="keyword">from</span> sklearn.model_selection <span class="keyword">import</span> train_test_split</span><br><span class="line">x_train, x_test, y_train, y_test = train_test_split(df_train[<span class="string">'contents_clean'</span>].values, df_train[<span class="string">'label'</span>].values, random_state=<span class="number">1</span>)</span><br><span class="line"><span class="comment"># 将数据转换为sklearn.naive_bayes 要求的格式: 列表的每项为一个字符串，是单词的集合，以空格分割</span></span><br><span class="line">words = []</span><br><span class="line"><span class="keyword">for</span> line_index <span class="keyword">in</span> range(len(x_train)):</span><br><span class="line">    <span class="keyword">try</span>:</span><br><span class="line">        words.append(<span class="string">' '</span>.join(x_train[line_index]))</span><br><span class="line">    <span class="keyword">except</span>:</span><br><span class="line">        print(line_index, word_index)</span><br><span class="line">words[<span class="number">0</span>]</span><br><span class="line"></span><br><span class="line"><span class="comment"># 使用sklearn构造向量，这个构造原理见下一节</span></span><br><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> CountVectorizer</span><br><span class="line">vec = CountVectorizer(analyzer=<span class="string">'word'</span>, max_features=<span class="number">4000</span>, lowercase=<span class="keyword">False</span>)</span><br><span class="line">vec.fit(words)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 上面的words就是构造好的向量值，在这里进行bayes的分类</span></span><br><span class="line"><span class="keyword">from</span> sklearn.naive_bayes <span class="keyword">import</span> MultinomialNB</span><br><span class="line">classifier = MultinomialNB()</span><br><span class="line">classifier.fit(vec.transform(words), y_train)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 对测试集执行同样的预处理</span></span><br><span class="line">test_words = []</span><br><span class="line"><span class="keyword">for</span> line_index <span class="keyword">in</span> range(len(x_test)):</span><br><span class="line">    <span class="keyword">try</span>:</span><br><span class="line">        test_words.append(<span class="string">' '</span>.join(x_test[line_index]))</span><br><span class="line">    <span class="keyword">except</span>:</span><br><span class="line">        print(line_index, word_index)</span><br><span class="line">test_words[<span class="number">0</span>]</span><br><span class="line"></span><br><span class="line"><span class="comment"># 看看使用贝叶斯分类的效果，这里是精度值</span></span><br><span class="line">classifier.score(vec.transform(test_words), y_test)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 上面是基于词频进行分类的，我们还可以根据TF-IDF进行分类，与基于词频的类似</span></span><br><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> TfidfVectorizer</span><br><span class="line"><span class="keyword">from</span> sklearn.naive_bayes <span class="keyword">import</span> MultinomialNB</span><br><span class="line">vectorizer = TfidfVectorizer(analyzer=<span class="string">'word'</span>, max_features=<span class="number">4000</span>, lowercase=<span class="keyword">False</span>)</span><br><span class="line">vectorizer.fit(words)</span><br><span class="line">classifier = MultinomialNB()</span><br><span class="line">classifier.fit(vectorizer.transform(words), y_train)</span><br><span class="line">classifier.score(vectorizer.transform(test_words), y_test)</span><br></pre></td></tr></table></figure>
</li>
<li><p>sklearn构造向量</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> CountVectorizer</span><br><span class="line">texts = [<span class="string">"dog cat fish"</span>, <span class="string">"dog cat cat"</span>, <span class="string">"fish bird"</span>, <span class="string">"bird"</span>]</span><br><span class="line">cv = CountVectorizer()</span><br><span class="line">cv_fit = cv.fit_transform(texts)</span><br><span class="line"></span><br><span class="line">print(cv.get_feature_names())</span><br><span class="line">print(cv_fit.toarray())</span><br><span class="line">print(cv_fit.toarray().sum(axis=<span class="number">0</span>))</span><br></pre></td></tr></table></figure>
<p> texts有4个元素，每个元素都是以空格分割的字符串，所以放在一起去重后就是可使用的单词集合，这个有个单词”dog cat fish bird”, 那么构造出来的向量就有4列，每个元素表示对应单词出现的次数</p>
</li>
</ol>

      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      
        <div class="post-tags">
          
            <a href="/blog/tags/贝叶斯/" rel="tag"># 贝叶斯</a>
          
        </div>
      

      
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="/blog/2018/11/04/1.html" rel="next" title="机器学习之集成算法">
                <i class="fa fa-chevron-left"></i> 机器学习之集成算法
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="/blog/2018/11/18/1.html" rel="prev" title="计算机的负数表示详解">
                计算机的负数表示详解 <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>



    <div class="post-spread">
      
    </div>
  </div>


          </div>
          


          

  
    <div class="comments" id="comments">
    </div>
  



        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap">
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview-wrap">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview-wrap sidebar-panel">
        <div class="site-overview">
          <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
            
              <img class="site-author-image" itemprop="image"
                src="/blog/images/avatar.jpg"
                alt="jackstraw" />
            
              <p class="site-author-name" itemprop="name">jackstraw</p>
              <p class="site-description motion-element" itemprop="description">人生的意义，不在于最终获得了什么，而在于曾经努力追求过什么</p>
          </div>

          <nav class="site-state motion-element">

            
              <div class="site-state-item site-state-posts">
              
                <a href="/blog/archives/">
              
                  <span class="site-state-item-count">19</span>
                  <span class="site-state-item-name">日志</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-categories">
                
                  <span class="site-state-item-count">22</span>
                  <span class="site-state-item-name">分类</span>
                
              </div>
            

            
              
              
              <div class="site-state-item site-state-tags">
                
                  <span class="site-state-item-count">22</span>
                  <span class="site-state-item-name">标签</span>
                
              </div>
            

          </nav>

          
            <div class="feed-link motion-element">
              <a href="/blog/atom.xml" rel="alternate">
                <i class="fa fa-rss"></i>
                RSS
              </a>
            </div>
          

          

          
          

          
          

          

        </div>
      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#贝叶斯算法"><span class="nav-number">1.</span> <span class="nav-text">贝叶斯算法</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#前言"><span class="nav-number">1.1.</span> <span class="nav-text">前言</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#公式推导"><span class="nav-number">1.2.</span> <span class="nav-text">公式推导</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#模型比较理论"><span class="nav-number">1.3.</span> <span class="nav-text">模型比较理论</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#应用实例"><span class="nav-number">1.4.</span> <span class="nav-text">应用实例</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#拼写纠正实例"><span class="nav-number">1.4.1.</span> <span class="nav-text">拼写纠正实例</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#实现一个简单的拼写检查器"><span class="nav-number">1.4.2.</span> <span class="nav-text">实现一个简单的拼写检查器</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#垃圾邮件过滤实例"><span class="nav-number">1.4.3.</span> <span class="nav-text">垃圾邮件过滤实例</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#中文新闻分类实例"><span class="nav-number">1.4.4.</span> <span class="nav-text">中文新闻分类实例</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#停用词"><span class="nav-number">1.4.4.1.</span> <span class="nav-text">停用词</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#TF-IDF：关键词提取"><span class="nav-number">1.4.4.2.</span> <span class="nav-text">TF-IDF：关键词提取</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#LDA-主题模型"><span class="nav-number">1.4.4.3.</span> <span class="nav-text">LDA 主题模型</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#相似度"><span class="nav-number">1.4.4.4.</span> <span class="nav-text">相似度</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#使用python完成新闻分类"><span class="nav-number">1.4.4.5.</span> <span class="nav-text">使用python完成新闻分类</span></a></li></ol></li></ol></li></ol></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright">&copy; <span itemprop="copyrightYear">2019</span>
  <span class="with-love">
    <i class="fa fa-user"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">jackstraw</span>

  
</div>









        
<div class="busuanzi-count">
  <script async src="https://busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script>

  
    <span class="site-uv">
      <i class="fa fa-user"></i>访问人数
      <span class="busuanzi-value" id="busuanzi_value_site_uv"></span>
      人
    </span>
  

  
    <span class="site-pv">
      <i class="fa fa-eye"></i>总访问量
      <span class="busuanzi-value" id="busuanzi_value_site_pv"></span>
      次
    </span>
  
</div>








        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  












  
  
    <script type="text/javascript" src="/blog/lib/jquery/index.js?v=2.1.3"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/velocity/velocity.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>
  


  


  <script type="text/javascript" src="/blog/js/src/utils.js?v=5.1.4"></script>

  <script type="text/javascript" src="/blog/js/src/motion.js?v=5.1.4"></script>



  
  


  <script type="text/javascript" src="/blog/js/src/affix.js?v=5.1.4"></script>

  <script type="text/javascript" src="/blog/js/src/schemes/pisces.js?v=5.1.4"></script>



  
  <script type="text/javascript" src="/blog/js/src/scrollspy.js?v=5.1.4"></script>
<script type="text/javascript" src="/blog/js/src/post-details.js?v=5.1.4"></script>



  


  <script type="text/javascript" src="/blog/js/src/bootstrap.js?v=5.1.4"></script>



  


  




	





  





  










  <script src="//cdn1.lncld.net/static/js/3.0.4/av-min.js"></script>
  <script src="//unpkg.com/valine/dist/Valine.min.js"></script>
  
  <script type="text/javascript">
    var GUEST = ['nick','mail','link'];
    var guest = 'nick,mail';
    guest = guest.split(',').filter(item=>{
      return GUEST.indexOf(item)>-1;
    });
    new Valine({
        el: '#comments' ,
        verify: true,
        notify: false,
        appId: 'lQM75w94ggNR0TjX61NLerrg-gzGzoHsz',
        appKey: 'I2wtQ2rd9KtoJmcEOiYG9zqT',
        placeholder: '如需帮助，请留下邮箱',
        avatar:'mm',
        guest_info:guest,
        pageSize:'10' || 10,
    });
    var infoEle = document.querySelector('#comments .info');
    if (infoEle && infoEle.childNodes && infoEle.childNodes.length > 0){
      infoEle.childNodes.forEach(function(item) {
        item.parentNode.removeChild(item);
      });
    }
  </script>



  





  

  
  <script src="https://cdn1.lncld.net/static/js/av-core-mini-0.6.4.js"></script>
  <script>AV.initialize("lQM75w94ggNR0TjX61NLerrg-gzGzoHsz", "I2wtQ2rd9KtoJmcEOiYG9zqT");</script>
  <script>
    function showTime(Counter) {
      var query = new AV.Query(Counter);
      var entries = [];
      var $visitors = $(".leancloud_visitors");

      $visitors.each(function () {
        entries.push( $(this).attr("id").trim() );
      });

      query.containedIn('url', entries);
      query.find()
        .done(function (results) {
          var COUNT_CONTAINER_REF = '.leancloud-visitors-count';

          if (results.length === 0) {
            $visitors.find(COUNT_CONTAINER_REF).text(0);
            return;
          }

          for (var i = 0; i < results.length; i++) {
            var item = results[i];
            var url = item.get('url');
            var time = item.get('time');
            var element = document.getElementById(url);

            $(element).find(COUNT_CONTAINER_REF).text(time);
          }
          for(var i = 0; i < entries.length; i++) {
            var url = entries[i];
            var element = document.getElementById(url);
            var countSpan = $(element).find(COUNT_CONTAINER_REF);
            if( countSpan.text() == '') {
              countSpan.text(0);
            }
          }
        })
        .fail(function (object, error) {
          console.log("Error: " + error.code + " " + error.message);
        });
    }

    function addCount(Counter) {
      var $visitors = $(".leancloud_visitors");
      var url = $visitors.attr('id').trim();
      var title = $visitors.attr('data-flag-title').trim();
      var query = new AV.Query(Counter);

      query.equalTo("url", url);
      query.find({
        success: function(results) {
          if (results.length > 0) {
            var counter = results[0];
            counter.fetchWhenSave(true);
            counter.increment("time");
            counter.save(null, {
              success: function(counter) {
                var $element = $(document.getElementById(url));
                $element.find('.leancloud-visitors-count').text(counter.get('time'));
              },
              error: function(counter, error) {
                console.log('Failed to save Visitor num, with error message: ' + error.message);
              }
            });
          } else {
            var newcounter = new Counter();
            /* Set ACL */
            var acl = new AV.ACL();
            acl.setPublicReadAccess(true);
            acl.setPublicWriteAccess(true);
            newcounter.setACL(acl);
            /* End Set ACL */
            newcounter.set("title", title);
            newcounter.set("url", url);
            newcounter.set("time", 1);
            newcounter.save(null, {
              success: function(newcounter) {
                var $element = $(document.getElementById(url));
                $element.find('.leancloud-visitors-count').text(newcounter.get('time'));
              },
              error: function(newcounter, error) {
                console.log('Failed to create');
              }
            });
          }
        },
        error: function(error) {
          console.log('Error:' + error.code + " " + error.message);
        }
      });
    }

    $(function() {
      var Counter = AV.Object.extend("Counter");
      if ($('.leancloud_visitors').length == 1) {
        addCount(Counter);
      } else if ($('.post-title-link').length > 1) {
        showTime(Counter);
      }
    });
  </script>



  

  

  
  

  
  
    <script type="text/x-mathjax-config">
      MathJax.Hub.Config({
        tex2jax: {
          inlineMath: [ ['$','$'], ["\\(","\\)"]  ],
          processEscapes: true,
          skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
        }
      });
    </script>

    <script type="text/x-mathjax-config">
      MathJax.Hub.Queue(function() {
        var all = MathJax.Hub.getAllJax(), i;
        for (i=0; i < all.length; i += 1) {
          all[i].SourceElement().parentNode.className += ' has-jax';
        }
      });
    </script>
    <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
  


  

  

</body>
</html>
