<!DOCTYPE html>
<html lang="zh">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 5.3.0">


  <link rel="apple-touch-icon" sizes="180x180" href="/yuwanzi.io/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/yuwanzi.io/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/yuwanzi.io/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/yuwanzi.io/images/logo.svg" color="#222">

<link rel="stylesheet" href="/yuwanzi.io/css/main.css">



<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free@5.15.1/css/all.min.css">
  <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/animate.css@3.1.1/animate.min.css">

<script class="hexo-configurations">
    var NexT = window.NexT || {};
    var CONFIG = {"hostname":"suyuhuan.gitee.io","root":"/yuwanzi.io/","images":"/yuwanzi.io/images","scheme":"Muse","version":"8.2.0","exturl":false,"sidebar":{"position":"left","display":"post","padding":18,"offset":12},"copycode":false,"bookmark":{"enable":false,"color":"#222","save":"auto"},"fancybox":false,"mediumzoom":false,"lazyload":false,"pangu":false,"comments":{"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},"motion":{"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"fadeInDown","post_body":"fadeInDown","coll_header":"fadeInLeft","sidebar":"fadeInUp"}},"prism":false,"i18n":{"placeholder":"Suche...","empty":"We didn't find any results for the search: ${query}","hits_time":"${hits} results found in ${time} ms","hits":"${hits} results found"}};
  </script>
<meta name="description" content="在机器学习领域中，朴素贝叶斯是一种基于贝叶斯定理的简单概率分类器（分类又被称为监督式学习，所谓监督式学习即从已知样本数据中的特征信息去推测可能出现的输出以完成分类，反之聚类问题被称为非监督式学习），朴素贝叶斯在处理文本数据时可以得到较好的分类结果，所以它被广泛应用于文本分类&#x2F;垃圾邮件过滤&#x2F;自然语言处理等场景。 朴素贝叶斯假设了样本的每个特征之间是互相独立、互不影响的，比方说，如果有一个水果是红色的">
<meta property="og:type" content="article">
<meta property="og:title" content="朴素贝叶斯的那点事儿">
<meta property="og:url" content="https://suyuhuan.gitee.io/yuwanzi.io/2017/12/20/2017-12-20-naive_bayes/index.html">
<meta property="og:site_name" content="玉丸子 | Blog">
<meta property="og:description" content="在机器学习领域中，朴素贝叶斯是一种基于贝叶斯定理的简单概率分类器（分类又被称为监督式学习，所谓监督式学习即从已知样本数据中的特征信息去推测可能出现的输出以完成分类，反之聚类问题被称为非监督式学习），朴素贝叶斯在处理文本数据时可以得到较好的分类结果，所以它被广泛应用于文本分类&#x2F;垃圾邮件过滤&#x2F;自然语言处理等场景。 朴素贝叶斯假设了样本的每个特征之间是互相独立、互不影响的，比方说，如果有一个水果是红色的">
<meta property="og:locale">
<meta property="og:image" content="http://wx2.sinaimg.cn/large/63503acbly1fmnd8qww3lj20d308fdfw.jpg">
<meta property="og:image" content="http://wx1.sinaimg.cn/large/63503acbly1fmne20wdcpj20a8078q2w.jpg">
<meta property="og:image" content="http://wx3.sinaimg.cn/large/63503acbly1fmne218l47j209l0723yg.jpg">
<meta property="og:image" content="http://wx2.sinaimg.cn/large/63503acbly1fmne21knj0j20a3071q2w.jpg">
<meta property="og:image" content="http://wx4.sinaimg.cn/large/63503acbly1fmnd8rfet4j20ka0imdgo.jpg">
<meta property="og:image" content="http://wx4.sinaimg.cn/large/63503acbly1fmrz54fusig20ge01hdfm.gif">
<meta property="og:image" content="http://wx3.sinaimg.cn/large/63503acbly1fmpnscvyl1j20np0sfaeh.jpg">
<meta property="og:image" content="http://wx1.sinaimg.cn/large/63503acbly1fmrz54ux0tg20480130nd.gif">
<meta property="og:image" content="http://wx1.sinaimg.cn/large/63503acbly1fmrz55ag8ng205j0130sh.gif">
<meta property="article:published_time" content="2017-12-20T10:00:00.000Z">
<meta property="article:modified_time" content="2020-11-07T00:58:17.000Z">
<meta property="article:author" content="玉丸子">
<meta property="article:tag" content="2017">
<meta property="article:tag" content="机器学习">
<meta property="article:tag" content="监督式学习">
<meta property="article:tag" content="贝叶斯">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="http://wx2.sinaimg.cn/large/63503acbly1fmnd8qww3lj20d308fdfw.jpg">


<link rel="canonical" href="https://suyuhuan.gitee.io/yuwanzi.io/2017/12/20/2017-12-20-naive_bayes/">


<script class="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome : false,
    isPost : true,
    lang   : 'zh'
  };
</script>
<title>朴素贝叶斯的那点事儿 | 玉丸子 | Blog</title>
  




  <noscript>
  <style>
  body { margin-top: 2rem; }

  .use-motion .menu-item,
  .use-motion .sidebar,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header {
    visibility: visible;
  }

  .use-motion .header,
  .use-motion .site-brand-container .toggle,
  .use-motion .footer { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle,
  .use-motion .custom-logo-image {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line {
    transform: scaleX(1);
  }

  .search-pop-overlay, .sidebar-nav { display: none; }
  .sidebar-panel { display: block; }
  </style>
</noscript>

<link rel="alternate" href="/yuwanzi.io/atom.xml" title="玉丸子 | Blog" type="application/atom+xml">
</head>

<body itemscope itemtype="http://schema.org/WebPage" class="use-motion">
  <div class="headband"></div>

  <main class="main">
    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-nav-toggle">
    <div class="toggle" aria-label="Navigationsleiste an/ausschalten" role="button">
    </div>
  </div>

  <div class="site-meta">

    <a href="/yuwanzi.io/" class="brand" rel="start">
      <i class="logo-line"></i>
      <h1 class="site-title">玉丸子 | Blog</h1>
      <i class="logo-line"></i>
    </a>
  </div>

  <div class="site-nav-right">
    <div class="toggle popup-trigger">
    </div>
  </div>
</div>







</div>
        
  
  <div class="toggle sidebar-toggle" role="button">
    <span class="toggle-line"></span>
    <span class="toggle-line"></span>
    <span class="toggle-line"></span>
  </div>

  <aside class="sidebar">

    <div class="sidebar-inner sidebar-nav-active sidebar-toc-active">
      <ul class="sidebar-nav">
        <li class="sidebar-nav-toc">
          Inhaltsverzeichnis
        </li>
        <li class="sidebar-nav-overview">
          Übersicht
        </li>
      </ul>

      <div class="sidebar-panel-container">
        <!--noindex-->
        <div class="post-toc-wrap sidebar-panel">
            <div class="post-toc animated"><ol class="nav"><li class="nav-item nav-level-3"><a class="nav-link" href="#%E6%9D%A1%E4%BB%B6%E6%A6%82%E7%8E%87"><span class="nav-number">1.</span> <span class="nav-text">条件概率</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E5%85%A8%E6%A6%82%E7%8E%87%E5%85%AC%E5%BC%8F"><span class="nav-number">2.</span> <span class="nav-text">全概率公式</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E8%B4%9D%E5%8F%B6%E6%96%AF%E5%AE%9A%E7%90%86"><span class="nav-number">3.</span> <span class="nav-text">贝叶斯定理</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E6%9C%B4%E7%B4%A0%E8%B4%9D%E5%8F%B6%E6%96%AF%E7%9A%84%E6%A6%82%E7%8E%87%E6%A8%A1%E5%9E%8B"><span class="nav-number">4.</span> <span class="nav-text">朴素贝叶斯的概率模型</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E6%9C%B4%E7%B4%A0%E8%B4%9D%E5%8F%B6%E6%96%AF%E7%9A%84%E7%AE%97%E6%B3%95%E6%A8%A1%E5%9E%8B"><span class="nav-number">5.</span> <span class="nav-text">朴素贝叶斯的算法模型</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E6%9C%B4%E7%B4%A0%E8%B4%9D%E5%8F%B6%E6%96%AF%E7%9A%84%E5%AE%9E%E7%8E%B0"><span class="nav-number">6.</span> <span class="nav-text">朴素贝叶斯的实现</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#%E5%8F%82%E8%80%83%E6%96%87%E7%8C%AE"><span class="nav-number">7.</span> <span class="nav-text">参考文献</span></a></li></ol></div>
        </div>
        <!--/noindex-->

        <div class="site-overview-wrap sidebar-panel">
          <div class="site-author site-overview-item animated" itemprop="author" itemscope itemtype="http://schema.org/Person">
  <p class="site-author-name" itemprop="name">玉丸子</p>
  <div class="site-description" itemprop="description">这里是玉丸子的个人博客,与你一起发现更大的世界。</div>
</div>
<div class="site-state-wrap site-overview-item animated">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/yuwanzi.io/archives">
          <span class="site-state-item-count">68</span>
          <span class="site-state-item-name">Artikel</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/yuwanzi.io/categories/">
        <span class="site-state-item-count">39</span>
        <span class="site-state-item-name">Kategorien</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/yuwanzi.io/tags/">
        <span class="site-state-item-count">46</span>
        <span class="site-state-item-name">schlagwörter</span></a>
      </div>
  </nav>
</div>



        </div>
      </div>
    </div>
  </aside>
  <div class="sidebar-dimmer"></div>


    </header>

    
  <div class="back-to-top" role="button">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>

<noscript>
  <div class="noscript-warning">Theme NexT works best with JavaScript enabled</div>
</noscript>


    <div class="main-inner post posts-expand">


  


<div class="post-block">
  
  

  <article itemscope itemtype="http://schema.org/Article" class="post-content" lang="zh">
    <link itemprop="mainEntityOfPage" href="https://suyuhuan.gitee.io/yuwanzi.io/2017/12/20/2017-12-20-naive_bayes/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/yuwanzi.io/images/avatar.gif">
      <meta itemprop="name" content="玉丸子">
      <meta itemprop="description" content="这里是玉丸子的个人博客,与你一起发现更大的世界。">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="玉丸子 | Blog">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          朴素贝叶斯的那点事儿
        </h1>

        <div class="post-meta-container">
          <div class="post-meta">
    <span class="post-meta-item">
      <span class="post-meta-item-icon">
        <i class="far fa-calendar"></i>
      </span>
      <span class="post-meta-item-text">Veröffentlicht am</span>

      <time title="Erstellt: 2017-12-20 18:00:00" itemprop="dateCreated datePublished" datetime="2017-12-20T18:00:00+08:00">2017-12-20</time>
    </span>
      <span class="post-meta-item">
        <span class="post-meta-item-icon">
          <i class="far fa-calendar-check"></i>
        </span>
        <span class="post-meta-item-text">Bearbeitet am</span>
        <time title="Geändert am: 2020-11-07 08:58:17" itemprop="dateModified" datetime="2020-11-07T08:58:17+08:00">2020-11-07</time>
      </span>
    <span class="post-meta-item">
      <span class="post-meta-item-icon">
        <i class="far fa-folder"></i>
      </span>
      <span class="post-meta-item-text">in</span>
        <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
          <a href="/yuwanzi.io/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" itemprop="url" rel="index"><span itemprop="name">机器学习</span></a>
        </span>
          . 
        <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
          <a href="/yuwanzi.io/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/%E7%9B%91%E7%9D%A3%E5%BC%8F%E5%AD%A6%E4%B9%A0/" itemprop="url" rel="index"><span itemprop="name">监督式学习</span></a>
        </span>
    </span>

  
</div>

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">
        <p>在机器学习领域中，朴素贝叶斯是一种基于贝叶斯定理的简单概率分类器（分类又被称为监督式学习，所谓监督式学习即从已知样本数据中的特征信息去推测可能出现的输出以完成分类，反之聚类问题被称为非监督式学习），朴素贝叶斯在处理文本数据时可以得到较好的分类结果，所以它被广泛应用于文本分类/垃圾邮件过滤/自然语言处理等场景。</p>
<p>朴素贝叶斯假设了样本的每个特征之间是互相独立、互不影响的，比方说，如果有一个水果是红色的，形状为圆形，并且直径大约为70毫米，那么它就有可能被认为是苹果（具有最高概率的类将会被认为是最有可能的类，这被称为最大后验概率 Maximum A Posteriori），即使上述的这些特征可能会有依赖关系或有其他特征存在，朴素贝叶斯都会认为这些特征都独立地贡献了这个水果是一个苹果的概率，这种假设关系太过于理想，所以这也是朴素贝叶斯的”Naive”之处。</p>
<p>朴素贝叶斯的原名为Naive Bayes Classifier，朴素本身并不是一个正确的翻译，之所以这样翻译是因为朴素贝叶斯虽然Naive，但不代表它的效率会差，相反它的优点正在于实现简单与只需要少量的训练数据，还有另一个原因是它与贝叶斯网络等算法相比，确实是“朴素”了些。</p>
<p>在继续探讨朴素贝叶斯之前，我们先需要理解贝叶斯定理与它的前置理论条件概率与全概率公式。</p>
<blockquote>
<p>本文作者为<a target="_blank" rel="noopener" href="https://github.com/SylvanasSun">SylvanasSun(sylvanas.sun@gmail.com)</a>，首发于<a target="_blank" rel="noopener" href="https://sylvanassun.github.io/">SylvanasSun’s Blog</a>。<br>原文链接：<a target="_blank" rel="noopener" href="https://sylvanassun.github.io/2017/12/20/2017-12-20-naive_bayes/">https://sylvanassun.github.io/2017/12/20/2017-12-20-naive_bayes/</a><br>（转载请务必保留本段声明，并且保留超链接。）</p>
</blockquote>
<h3 id="条件概率"><a href="#条件概率" class="headerlink" title="条件概率"></a>条件概率</h3><hr>
<p>条件概率（Conditional Probability）是指在事件B发生的情况下，事件A发生的概率，用$P(A|B)$表示，读作在B条件下的A的概率。</p>
<p><img src="http://wx2.sinaimg.cn/large/63503acbly1fmnd8qww3lj20d308fdfw.jpg"></p>
<p>在上方的文氏图中，描述了两个事件A和B，与它们的交集<code>A ∩ B</code>，代入条件概率公式，可推出事件A发生的概率为$P(A|B) = \frac{P({A}\bigcap{B})}{P(B)}$。</p>
<p>对该公式稍作变换可推得${P({A}\bigcap{B})} = {P(A|B)}{P(B)}$与${P({A}\bigcap{B})} = {P(B|A)}{P(A)}$（<code>P(B|A)</code>为在A条件下的B的概率）。</p>
<p>然后根据这个关系可推得${P(A|B)}{P(B)} = {P(B|A)}{P(A)}$。</p>
<p>让我们举个栗子，假设有两个人在扔两个个六面的骰子<code>D1</code>与<code>D2</code>，我们来预测<code>D1</code>与<code>D2</code>的向上面的结果的概率。</p>
<p><img src="http://wx1.sinaimg.cn/large/63503acbly1fmne20wdcpj20a8078q2w.jpg"></p>
<p>在<code>Table1</code>中描述了一个含有36个结果的样本空间，标红处为<code>D1</code>的向上面为2的6个结果，概率为$P(D1=2) = \frac{6}{36} = \frac{1}{6}$。</p>
<p><img src="http://wx3.sinaimg.cn/large/63503acbly1fmne218l47j209l0723yg.jpg"></p>
<p><code>Table2</code>描述了<code>D1 + D2 &lt;= 5</code>的概率，一共10个结果，用条件概率公式表示为${P(D1+D2\leq5)} = \frac{10}{36}$。</p>
<p><img src="http://wx2.sinaimg.cn/large/63503acbly1fmne21knj0j20a3071q2w.jpg"></p>
<p><code>Table3</code>描述了满足<code>Table2</code>的条件同时也满足<code>D1 = 2</code>的结果，它选中了<code>Table2</code>中的3个结果，用条件概率公式表示为${P(D1=2 | D1+D2\leq5)} = \frac{3}{10} = 0.3$。</p>
<h3 id="全概率公式"><a href="#全概率公式" class="headerlink" title="全概率公式"></a>全概率公式</h3><hr>
<p>全概率公式是将边缘概率与条件概率关联起来的基本规则，它表示了一个结果的总概率，可以通过几个不同的事件来实现。</p>
<p>全概率公式将对一复杂事件的概率求解问题转化为了在不同情况下发生的简单事件的概率的求和问题，公式为$P(B) = {\sum_{i=1}^n}P(A_i)P(B|A_i)$。</p>
<p>假定一个样本空间S，它是两个事件A与C之和，同时事件B与它们两个都有交集，如下图所示：</p>
<p><img src="http://wx4.sinaimg.cn/large/63503acbly1fmnd8rfet4j20ka0imdgo.jpg"></p>
<p>那么事件B的概率可以表示为$P(B) = P({B}\bigcap{A}) + P({B}\bigcap{C})$</p>
<p>通过条件概率，可以推断出$P({B}\bigcap{A}) = P(B|A)P(A)$，所以$P(B) = P(B|A)P(A) + P(B|C)P(C)$</p>
<p>这就是全概率公式，即事件B的概率等于事件A与事件C的概率分别乘以B对这两个事件的条件概率之和。</p>
<p>同样举个栗子来应用这个公式，假设有两家工厂生产并对外提供电灯泡，工厂X生产的电灯泡在99%的情况下能够工作超过5000小时，工厂Y生产的电灯泡在95%的情况下能够工作超过5000小时。工厂X在市场的占有率为60%，工厂Y为40%，如何推测出购买的灯泡的工作时间超过5000小时的概率是多少呢？</p>
<p>运用全概率公式，可以得出：<br>$$<br>\begin{equation}\begin{split}<br>Pr(A) &amp;=Pr(A | B_x) \cdot Pr(B_x) + Pr(A|B_y) \cdot Pr(B_y)\<br>&amp;= \frac{99}{100} \cdot \frac{6}{10} + \frac{95}{100} \cdot \frac{4}{10}\<br>&amp;= \frac{594 + 380}{1000}\<br>&amp;= \frac{974}{1000}<br>\end{split}\end{equation}<br>$$</p>
<ul>
<li><p>$Pr(B_x) = \frac{6}{10}$：购买到工厂X制造的电灯泡的概率。</p>
</li>
<li><p>$Pr(B_y) = \frac{4}{10}$：购买到工厂y制造的电灯泡的概率。</p>
</li>
<li><p>$Pr(A|B_x) = \frac{99}{100}$：工厂x制造的电灯泡工作时间超过5000小时的概率。</p>
</li>
<li><p>$Pr(A|B_y) = \frac{95}{100}$：工厂y制造的电灯泡工作时间超过5000小时的概率。</p>
</li>
</ul>
<p>因此，可以得知购买一个工作时间超过5000小时的电灯泡的概率为97.4%。</p>
<h3 id="贝叶斯定理"><a href="#贝叶斯定理" class="headerlink" title="贝叶斯定理"></a>贝叶斯定理</h3><hr>
<p>贝叶斯定理最早由英国数学家（同时也是神学家和哲学家）Thomas Bayes（1701-1761）提出，有趣的是他生前并没有发表过什么有关数学的学术文章，就连他最著名的成就贝叶斯定理也是由他的朋友Richard Price从他死后的遗物（笔记）中找到并发表的。</p>
<p>Thomas Bayes在晚年对概率学产生了兴趣，所谓的贝叶斯定理只是他生前为了解决一个逆概率问题（为了证明上帝是否存在，似乎哲学家们都很喜欢这个问题啊）所写的一篇文章。在那个时期，人们已经能够计算出正向概率问题，比方说，有一个袋子中有X个白球，Y个黑球，你伸手进去摸到黑球的概率是多少？这就是一个正向概率问题，而逆概率问题正好反过来，我们事先并不知道袋子中球的比例，而是不断伸手去摸好几个球，然后根据它们的颜色来推测黑球与白球的比例。</p>
<p>贝叶斯定理是关于随机事件A和B的条件概率的一则定理。通常，事件A在事件B（发生）的条件下的概率，与事件B在事件A（发生）的条件下的概率是不一样的，但它们两者之间是有确定的关系的，贝叶斯定理陈述了这个关系。</p>
<p>贝叶斯定理的一个主要应用为贝叶斯推理，它是一种建立在主观判断基础之上的推理方法，也就是说，你只需要先预估一个值，然后再去根据实际结果去不断修正，不需要任何客观因素。这种推理方式需要大量的计算，因此一直遭到其他人的诟病，无法得到广泛的应用，直到计算机的高速发展，并且人们发现很多事情都是无法事先进行客观判断的，因此贝叶斯推理才得以东山再起。</p>
<p>说了这么多理论知识（很多数学理论都像是在说绕口令），让我们来看一看公式吧，其实只需要把我们在上面推导出的条件概率公式继续进行推理，就可以得出贝叶斯公式。</p>
<p>$$P(A|B) = \frac{P(B|A)P(A)}{P(B)}$$</p>
<ul>
<li><p>$P(A|B)$：在B条件下的事件A的概率，在贝叶斯定理中，条件概率也被称为后验概率，即在事件B发生之后，我们对事件A概率的重新评估。</p>
</li>
<li><p>$P(B|A)$：在A条件下的事件B的概率，与上一条同理。</p>
</li>
<li><p>$P(A)$与$P(B)$被称为先验概率（也被称为边缘概率），即在事件B发生之前，我们对事件A概率的一个推断（不考虑任何事件B方面的因素），后面同理。</p>
</li>
<li><p>$\frac{P(B|A)}{P(B)}$被称为标准相似度，它是一个调整因子，主要是为了保证预测概率更接近真实概率。</p>
</li>
<li><p>根据这些术语，贝叶斯定理表述为： 后验概率 = 标准相似度 * 先验概率。</p>
</li>
</ul>
<p>让我们以著名的假阳性问题为例，假设某种疾病的发病率为0.001（1000个人中会有一个人得病），现有一种试剂在患者确实得病的情况下，有99%的几率呈现为阳性，而在患者没有得病的情况下，它有5%的几率呈现为阳性（也就是假阳性），如有一位病人的检验成果为阳性，那么他的得病概率是多少呢？</p>
<p>代入贝叶斯定理，假定事件A表示为得病的概率（<code>P(A) = 0.001</code>），这是我们的先验概率，它是在病人在实际注射试剂（缺乏实验的结果）之前预计的发病率，再假定事件B为试剂结果为阳性的概率，我们需要计算的是条件概率<code>P(A|B)</code>，即在事件B条件下的A概率，这就是后验概率，也就是病人在注射试剂之后（得到实验结果）得出的发病率。</p>
<p>由于还有未得病的概率，所以还需要假设事件C为未得病的先验概率（<code>P(C) = 1 - 0.001 = 0.999</code>），那么<code>P(B|C)</code>后验概率表示的是未得病条件下的试剂结果为阳性的概率，之后再代入全概率公式就可得出最终结果。</p>
<p>$$<br>\begin{equation}\begin{split}<br>P(A|B)&amp;=\frac{P(B|A)P(A)}{P(B)}\<br>&amp;= \frac{P(B|A)P(A)}{P(B|A)P(A) + P(B|C)P(C)}\<br>&amp;= \frac{0.99 \times 0.001}{0.99 \times 0.001 + 0.05 \times 0.999}\approx 0.019<br>\end{split}\end{equation}<br>$$</p>
<p>最终结果约等于2%，即使一个病人的试剂结果为阳性，他的患病几率也只有2%而已。</p>
<h3 id="朴素贝叶斯的概率模型"><a href="#朴素贝叶斯的概率模型" class="headerlink" title="朴素贝叶斯的概率模型"></a>朴素贝叶斯的概率模型</h3><hr>
<p>我们设一个待分类项$X = {f_1,f_2,\cdots,f_n}$，其中每个<code>f</code>为<code>X</code>的一个特征属性，然后设一个类别集合$C_1,C_2,\cdots,C_m$。</p>
<p>然后需要计算$P(C_1|X),P(C_2|X),\cdots,P(C_m|X)$，我们可以根据一个训练样本集合（已知分类的待分类项集合），然后统计得到在各类别下各个特征属性的条件概率：</p>
<p>$P(f_1|C_1),P(f_2|C_1),\cdots,P(f_n|C_1),\cdots,P(f_1|C_2),P(f_2|C_2),\cdots,P(f_n|C_2),\cdots,P(f_1|C_m),P(f_2|C_m),\cdots,P(f_n|C_m)$</p>
<p>如果$P(C_k|X) = MAX(P(C_1|X),P(C_2|X),\cdots,P(C_m|X))$，则${X}\in{C_k}$（贝叶斯分类其实就是取概率最大的那一个）。</p>
<p>朴素贝叶斯会假设每个特征都是独立的，根据贝叶斯定理可推得：$P(C_i|X) = \frac{P(X|C_i)P(C_i)}{P(X)}$，由于分母对于所有类别为常数，因此只需要将分子最大化即可，又因为各特征是互相独立的，所以最终推得：</p>
<p><img src="http://wx4.sinaimg.cn/large/63503acbly1fmrz54fusig20ge01hdfm.gif"></p>
<p>根据上述的公式推导，朴素贝叶斯的流程可如下图所示：</p>
<p><img src="http://wx3.sinaimg.cn/large/63503acbly1fmpnscvyl1j20np0sfaeh.jpg"></p>
<p>接下来我们通过一个案例来过一遍上图的流程。</p>
<p>现有一网站想要通过程序自动识别出账号的真实性（将账号分类为真实账号与不真实账号，所谓不真实账号即带有虚假信息或恶意注册的小号）。</p>
<ul>
<li><p> 首先需要确定特征属性和类别，然后获取训练样本。假设一个账号具有三个特征：日志数量/注册天数（<code>F1</code>）、好友数量/注册天数（<code>F2</code>）、是否使用了真实的头像（True为1，False为0）。</p>
</li>
<li><p> 该网站使用曾经人工检测过的10000个账号作为训练样本，那么计算每个类别的概率为$P(C_0) = 8900 \div 10000 = 0.89, P(C_1) = 1100 \div 10000 = 0.11$，<code>C0</code>为真实账号的类别概率也就是89%，<code>C1</code>为虚假账号的类别概率也就是11%。</p>
</li>
<li><p> 之后需要计算每个类别下的各个特征的条件概率，代入朴素贝叶斯分类器，可得$P(F_1|C)P(F_2|C)P(F_3|C)P(C)$，不过有一个问题是，<code>F1</code>与<code>F2</code>是连续变量，不适宜按照某个特定值计算概率。解决方法为将连续值转化为离散值，然后计算区间的概率，比如将<code>F1</code>分解为<code>[0,0.05]、[0.05,0.2]、[0.2,+∞]</code>三个区间，然后计算每个区间的概率即可。</p>
</li>
<li><p>已知某一账号的数据如下：$F_1 = 0.1,F_2 = 0.2,F_3 = 0$，推测该账号是真实账号还是虚假账号。在此例中，<code>F1</code>为0.1，落在第二个区间内，所以在计算的时候，就使用第二个区间的发生概率。根据训练样本可得出结果为：</p>
</li>
</ul>
<p>$$<br>\begin{equation}\begin{split}<br>P(F_1|C_0) = 0.5, P(F_1|C_1) = 0.1\<br>P(F_2|C_0) = 0.7, P(F_2|C_1) = 0.2\<br>P(F_3|C_0) = 0.2, P(F_3|C_1) = 0.9<br>\end{split}\end{equation}<br>$$</p>
<ul>
<li>接下来使用训练后的分类器可得出该账号的真实账号概率与虚假账号概率，然后取最大概率作为它的类别：</li>
</ul>
<p>$$<br>\begin{equation}\begin{split}<br>P(F_1|C_0)P(F_2|C_0)P(F_3|C_0)P(C_0) &amp;= 0.5 \times 0.7 \times 0.2 \times 0.89\<br>&amp;= 0.0623<br>\end{split}\end{equation}<br>$$<br>$$<br>\begin{equation}\begin{split}<br>P(F_1|C_1)P(F_2|C_1)P(F_3|C_1)P(C_1) &amp;= 0.1 \times 0.2 \times 0.9 \times 0.11\<br>&amp;= 0.00198<br>\end{split}\end{equation}<br>$$</p>
<p>   最终结果为该账号是一个真实账号。</p>
<h3 id="朴素贝叶斯的算法模型"><a href="#朴素贝叶斯的算法模型" class="headerlink" title="朴素贝叶斯的算法模型"></a>朴素贝叶斯的算法模型</h3><hr>
<p>在朴素贝叶斯中含有以下三种算法模型：</p>
<ul>
<li><p>Gaussian Naive Bayes：适合在特征变量具有连续性的时候使用，同时它还假设特征遵从于高斯分布（正态分布）。举个栗子，假设我们有一组人体特征的统计资料，该数据中的特征：身高、体重和脚掌长度等都为连续变量，很明显我们不能采用离散变量的方法来计算概率，由于样本太少，也无法分成区间计算，那么要怎么办呢？解决方法是假设特征项都是正态分布，然后通过样本计算出均值与标准差，这样就得到了正态分布的密度函数，有了密度函数，就可以代入值，进而算出某一点的密度函数的值。</p>
</li>
<li><p>MultiNomial Naive Bayes：与Gaussian Naive Bayes相反，多项式模型更适合处理特征是离散变量的情况，该模型会在计算先验概率$P(C_m)$和条件概率$P(F_n|C_m)$时会做一些平滑处理。具体公式为<img src="http://wx1.sinaimg.cn/large/63503acbly1fmrz54ux0tg20480130nd.gif">，其中<code>T</code>为总的样本数，<code>m</code>为总类别数，$T_{cm}$即类别为$C_m$的样本个数，<code>a</code>是一个平滑值。条件概率的公式为<img src="http://wx1.sinaimg.cn/large/63503acbly1fmrz55ag8ng205j0130sh.gif">，<code>n</code>为特征的个数，<code>T_cmfn</code>为类别为<code>C_m</code>特征为<code>F_n</code>的样本个数。当平滑值<code>a = 1</code>与<code>0 &lt; a &lt; 1</code>时，被称作为<code>Laplace</code>平滑，当<code>a = 0</code>时不做平滑。它的思想其实就是对每类别下所有划分的计数加1，这样如果训练样本数量足够大时，就不会对结果产生影响，并且解决了$P(F|C)$的频率为0的现象（某个类别下的某个特征划分没有出现，这会严重影响分类器的质量）。</p>
</li>
<li><p>Bernoulli Naive Bayes：Bernoulli适用于在特征属性为二进制的场景下，它对每个特征的取值是基于布尔值的，一个典型例子就是判断单词有没有在文本中出现。</p>
</li>
</ul>
<h3 id="朴素贝叶斯的实现"><a href="#朴素贝叶斯的实现" class="headerlink" title="朴素贝叶斯的实现"></a>朴素贝叶斯的实现</h3><hr>
<p>了解了足够多的理论，接下来我们要动手使用python来实现一个Gaussian Naive Bayes，目的是解决皮马人（一个印第安人部落）的糖尿病问题，<a target="_blank" rel="noopener" href="https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data">样本数据（请从该超链接中获取）</a>是一个csv格式的文件，每个值都是一个数字，该文件描述了从患者的年龄、怀孕次数和验血结果等方面的即时测量数据。每个记录都有一个类别值（一个布尔值，以0或1表示），该值表述了患者是否在五年内得过糖尿病。这是一个在机器学习文献中被大量研究过的数据集，一个比较好的预测精度应该在70%~76%。样本数据的每列含义如下：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line">列1：怀孕次数</span><br><span class="line">列2：在口服葡萄糖耐量试验中，血浆葡萄糖的浓度（2小时）</span><br><span class="line">列3：心脏的舒张压（(mm Hg)）</span><br><span class="line">列4：肱三头肌皮肤褶皱厚度（mm）</span><br><span class="line">列5：二小时内的血清胰岛素（mu U&#x2F;ml）</span><br><span class="line">列6：体质指数 （(weight in kg&#x2F;(height in m)^2)）</span><br><span class="line">列7：糖尿病家族作用</span><br><span class="line">列8：年龄</span><br><span class="line">列9：类别布尔值，0为5年没得过糖尿病，1为5年内得过糖尿病</span><br><span class="line">------------------------------------</span><br><span class="line">6,148,72,35,0,33.6,0.627,50,1</span><br><span class="line">1,85,66,29,0,26.6,0.351,31,0</span><br><span class="line">8,183,64,0,0,23.3,0.672,32,1</span><br><span class="line">1,89,66,23,94,28.1,0.167,21,0</span><br><span class="line">0,137,40,35,168,43.1,2.288,33,1</span><br><span class="line">.........</span><br></pre></td></tr></table></figure>
<p>首先要做的是读取这个csv文件，并解析成我们可以直接使用的数据结构。由于样本数据文件中没有任何的空行和标记符号，每行都是对应的一行数据，只需要简单地把每一行封装到一个list中即可（返回结果为一个list，它的每一项元素都是包含一行数据的list），注意该文件中的数据都为数字，需要先做类型转换。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> csv</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">load_csv_file</span>(<span class="params">filename</span>):</span></span><br><span class="line">    <span class="keyword">with</span> <span class="built_in">open</span>(filename) <span class="keyword">as</span> f:</span><br><span class="line">        lines = csv.reader(f)</span><br><span class="line">        data_set = <span class="built_in">list</span>(lines)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(data_set)):</span><br><span class="line">        data_set[i] = [<span class="built_in">float</span>(x) <span class="keyword">for</span> x <span class="keyword">in</span> data_set[i]]</span><br><span class="line">    <span class="keyword">return</span> data_set</span><br></pre></td></tr></table></figure>
<p>获得了样本数据后，为了评估模型的准确性还需要将它切分为训练数据集（朴素贝叶斯需要使用它来进行预测）与测试数据集。数据在切分过程中是随机选取的，但我们会选择一个比率来控制训练数据集与测试数据集的大小，一般为67%：33%，这是一个比较常见的比率。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> random</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">split_data_set</span>(<span class="params">data_set, split_ratio</span>):</span></span><br><span class="line">    train_size = <span class="built_in">int</span>(<span class="built_in">len</span>(data_set) * split_ratio)</span><br><span class="line">    train_set = []</span><br><span class="line">    data_set_copy = <span class="built_in">list</span>(data_set)</span><br><span class="line">    <span class="keyword">while</span> <span class="built_in">len</span>(train_set) &lt; train_size:</span><br><span class="line">        index = random.randrange(<span class="built_in">len</span>(data_set_copy))</span><br><span class="line">        train_set.append(data_set_copy.pop(index))</span><br><span class="line">    <span class="keyword">return</span> [train_set, data_set_copy]</span><br></pre></td></tr></table></figure>
<p>切分了样本数据后，还要对训练数据集进行更细致的处理，由于Gaussian Naive Bayes假设了每个特征都遵循正态分布，所以需要从训练数据集中抽取出摘要，它包含了均值与标准差，摘要的数量由类别和特征属性的组合数决定，例如，如果有3个类别与7个特征属性，那么就需要对每个特征属性和类别计算出均值和标准差，这就是21个摘要。</p>
<p>在计算训练数据集的摘要之前，我们的第一个任务是要将训练数据集中的特征与类别进行分离，也就是说，构造出一个<code>key</code>为类别，值为所属该类别的数据行的散列表。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">separate_by_class</span>(<span class="params">data_set, class_index</span>):</span></span><br><span class="line">    result = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(data_set)):</span><br><span class="line">        vector = data_set[i]</span><br><span class="line">        class_val = vector[class_index]</span><br><span class="line">        <span class="keyword">if</span> (class_val <span class="keyword">not</span> <span class="keyword">in</span> result):</span><br><span class="line">            result[class_val] = []</span><br><span class="line">        result[class_val].append(vector)</span><br><span class="line">    <span class="keyword">return</span> result</span><br></pre></td></tr></table></figure>
<p>由于已经知道了类别只有一个，而且在每行数据的最后一个，所以只需要将-1传入到class_index参数即可。然后就是计算训练数据集的摘要（每个类别中的每个特征属性的均值与标准差），均值会被作为正态分布的中间值，而标准差则描述了数据的离散程度，在计算概率时，它会被作为正态分布中每个特征属性的期望分布。</p>
<p>标准差就是方差的平方根，只要先求出方差（每个特征值与平均值的差的平方之和的平均值）就可以得出标准差。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> math</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">mean</span>(<span class="params">numbers</span>):</span></span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">sum</span>(numbers) / <span class="built_in">float</span>(<span class="built_in">len</span>(numbers))</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">stdev</span>(<span class="params">numbers</span>):</span></span><br><span class="line">    avg = mean(numbers)</span><br><span class="line">    variance = <span class="built_in">sum</span>([<span class="built_in">pow</span>(x - avg, <span class="number">2</span>) <span class="keyword">for</span> x <span class="keyword">in</span> numbers]) / <span class="built_in">float</span>(<span class="built_in">len</span>(numbers))</span><br><span class="line">    <span class="keyword">return</span> math.sqrt(variance)    </span><br></pre></td></tr></table></figure>
<p>有了这些辅助函数，计算摘要就很简单了，具体步骤就是先从训练数据集中构造出<code>key</code>为类别的散列表，然后根据类别与每个特征进行计算求出均值与标准差即可。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">summarize</span>(<span class="params">data_set</span>):</span></span><br><span class="line">    <span class="comment"># 使用zip函数将每个元素中的第n个属性封装为一个元组</span></span><br><span class="line">	<span class="comment"># 简单地说，就是把每列（特征）都打包到一个元组中</span></span><br><span class="line">    summaries = [(mean(feature), stdev(feature)) <span class="keyword">for</span> feature <span class="keyword">in</span> <span class="built_in">zip</span>(*data_set)]</span><br><span class="line">    <span class="keyword">del</span> summaries[-<span class="number">1</span>] <span class="comment"># 最后一行是类别与类别的摘要 所以删除</span></span><br><span class="line">    <span class="keyword">return</span> summaries</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">summarize_by_class</span>(<span class="params">data_set</span>):</span></span><br><span class="line">    class_map = separate_by_class(data_set, -<span class="number">1</span>)</span><br><span class="line">    summaries = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> class_val, data <span class="keyword">in</span> class_map.items():</span><br><span class="line">        summaries[class_val] = summarize(data)</span><br><span class="line">    <span class="keyword">return</span> summaries</span><br></pre></td></tr></table></figure>
<p>数据的处理阶段已经完成了，下面的任务是要去根据训练数据集来进行预测，该阶段需要计算类概率与每个特征与类别的条件概率，然后选出概率最大的类别作为分类结果。关键在于计算条件概率，需要用到正态分布的密度函数，而它所依赖的参数（特征，均值，标准差）我们已经准备好了。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calculate_probability</span>(<span class="params">x, mean, stdev</span>):</span></span><br><span class="line">    exponent = math.exp(-(math.<span class="built_in">pow</span>(x - mean, <span class="number">2</span>) / (<span class="number">2</span> * math.<span class="built_in">pow</span>(stdev, <span class="number">2</span>))))</span><br><span class="line">    <span class="keyword">return</span> (<span class="number">1</span> / (math.sqrt(<span class="number">2</span> * math.pi) * stdev)) * exponent</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calculate_conditional_probabilities</span>(<span class="params">summaries, input_vector</span>):</span></span><br><span class="line">    probabilities = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> class_val, class_summaries <span class="keyword">in</span> summaries.items():</span><br><span class="line">        probabilities[class_val] = <span class="number">1</span></span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(class_summaries)):</span><br><span class="line">            mean, stdev = class_summaries[i]</span><br><span class="line">			<span class="comment"># input_vector是test_set的一行数据，x为该行中的某一特征属性</span></span><br><span class="line">            x = input_vector[i]</span><br><span class="line">			<span class="comment"># 将概率相乘</span></span><br><span class="line">            probabilities[class_val] *= calculate_probability(x, mean, stdev)</span><br><span class="line">    <span class="keyword">return</span> probabilities</span><br></pre></td></tr></table></figure>
<p>函数<code>calculate_conditional_probabilities()</code>返回了一个<code>key</code>为类别，值为其概率的散列表，这个散列表记录了每个特征类别的条件概率，之后只需要选出其中最大概率的类别即可。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">predict</span>(<span class="params">summaries, input_vector</span>):</span></span><br><span class="line">    probabilities = calculate_conditional_probabilities(summaries, input_vector)</span><br><span class="line">    best_label, best_prob = <span class="literal">None</span>, -<span class="number">1</span></span><br><span class="line">    <span class="keyword">for</span> class_val, probability <span class="keyword">in</span> probabilities.items():</span><br><span class="line">        <span class="keyword">if</span> best_label <span class="keyword">is</span> <span class="literal">None</span> <span class="keyword">or</span> probability &gt; best_prob:</span><br><span class="line">            best_label = class_val</span><br><span class="line">            best_prob = probability</span><br><span class="line">    <span class="keyword">return</span> best_label</span><br></pre></td></tr></table></figure>
<p>最后我们定义一个函数来对测试数据集中的每个数据实例进行预测以预估模型的准确性，该函数返回了一个预测值列表，包含了每个数据实例的预测值。根据这个返回值，就可以对预测结果进行准确性的评估了。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_predictions</span>(<span class="params">summaries, test_set</span>):</span></span><br><span class="line">    predictions = []</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(test_set)):</span><br><span class="line">        result = predict(summaries, test_set[i])</span><br><span class="line">        predictions.append(result)</span><br><span class="line">    <span class="keyword">return</span> predictions</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_accuracy</span>(<span class="params">predictions, test_set</span>):</span></span><br><span class="line">    correct = <span class="number">0</span></span><br><span class="line">    <span class="keyword">for</span> x <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(test_set)):</span><br><span class="line">		<span class="comment"># 分类结果与测试数据集一致，调整值自增</span></span><br><span class="line">        <span class="keyword">if</span> test_set[x][-<span class="number">1</span>] == predictions[x]:</span><br><span class="line">            correct += <span class="number">1</span></span><br><span class="line">    <span class="keyword">return</span> (correct / <span class="built_in">float</span>(<span class="built_in">len</span>(test_set))) * <span class="number">100.0</span></span><br></pre></td></tr></table></figure>
<p>完整代码如下：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br><span class="line">89</span><br><span class="line">90</span><br><span class="line">91</span><br><span class="line">92</span><br><span class="line">93</span><br><span class="line">94</span><br><span class="line">95</span><br><span class="line">96</span><br><span class="line">97</span><br><span class="line">98</span><br><span class="line">99</span><br><span class="line">100</span><br><span class="line">101</span><br><span class="line">102</span><br><span class="line">103</span><br><span class="line">104</span><br><span class="line">105</span><br><span class="line">106</span><br><span class="line">107</span><br><span class="line">108</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> csv, random, math</span><br><span class="line"></span><br><span class="line"><span class="string">&quot;&quot;&quot;</span></span><br><span class="line"><span class="string">A simple classifier base on the gaussian naive bayes and</span></span><br><span class="line"><span class="string">problem of the pima indians diabetes.</span></span><br><span class="line"><span class="string">(https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes)</span></span><br><span class="line"><span class="string">&quot;&quot;&quot;</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">load_csv_file</span>(<span class="params">filename</span>):</span></span><br><span class="line">    <span class="keyword">with</span> <span class="built_in">open</span>(filename) <span class="keyword">as</span> f:</span><br><span class="line">        lines = csv.reader(f)</span><br><span class="line">        data_set = <span class="built_in">list</span>(lines)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(data_set)):</span><br><span class="line">        data_set[i] = [<span class="built_in">float</span>(x) <span class="keyword">for</span> x <span class="keyword">in</span> data_set[i]]</span><br><span class="line">    <span class="keyword">return</span> data_set</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">split_data_set</span>(<span class="params">data_set, split_ratio</span>):</span></span><br><span class="line">    train_size = <span class="built_in">int</span>(<span class="built_in">len</span>(data_set) * split_ratio)</span><br><span class="line">    train_set = []</span><br><span class="line">    data_set_copy = <span class="built_in">list</span>(data_set)</span><br><span class="line">    <span class="keyword">while</span> <span class="built_in">len</span>(train_set) &lt; train_size:</span><br><span class="line">        index = random.randrange(<span class="built_in">len</span>(data_set_copy))</span><br><span class="line">        train_set.append(data_set_copy.pop(index))</span><br><span class="line">    <span class="keyword">return</span> [train_set, data_set_copy]</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">separate_by_class</span>(<span class="params">data_set, class_index</span>):</span></span><br><span class="line">    result = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(data_set)):</span><br><span class="line">        vector = data_set[i]</span><br><span class="line">        class_val = vector[class_index]</span><br><span class="line">        <span class="keyword">if</span> (class_val <span class="keyword">not</span> <span class="keyword">in</span> result):</span><br><span class="line">            result[class_val] = []</span><br><span class="line">        result[class_val].append(vector)</span><br><span class="line">    <span class="keyword">return</span> result</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">mean</span>(<span class="params">numbers</span>):</span></span><br><span class="line">    <span class="keyword">return</span> <span class="built_in">sum</span>(numbers) / <span class="built_in">float</span>(<span class="built_in">len</span>(numbers))</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">stdev</span>(<span class="params">numbers</span>):</span></span><br><span class="line">    avg = mean(numbers)</span><br><span class="line">    variance = <span class="built_in">sum</span>([<span class="built_in">pow</span>(x - avg, <span class="number">2</span>) <span class="keyword">for</span> x <span class="keyword">in</span> numbers]) / <span class="built_in">float</span>(<span class="built_in">len</span>(numbers))</span><br><span class="line">    <span class="keyword">return</span> math.sqrt(variance)</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">summarize</span>(<span class="params">data_set</span>):</span></span><br><span class="line">    summaries = [(mean(feature), stdev(feature)) <span class="keyword">for</span> feature <span class="keyword">in</span> <span class="built_in">zip</span>(*data_set)]</span><br><span class="line">    <span class="keyword">del</span> summaries[-<span class="number">1</span>]</span><br><span class="line">    <span class="keyword">return</span> summaries</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">summarize_by_class</span>(<span class="params">data_set</span>):</span></span><br><span class="line">    class_map = separate_by_class(data_set, -<span class="number">1</span>)</span><br><span class="line">    summaries = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> class_val, data <span class="keyword">in</span> class_map.items():</span><br><span class="line">        summaries[class_val] = summarize(data)</span><br><span class="line">    <span class="keyword">return</span> summaries</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calculate_probability</span>(<span class="params">x, mean, stdev</span>):</span></span><br><span class="line">    exponent = math.exp(-(math.<span class="built_in">pow</span>(x - mean, <span class="number">2</span>) / (<span class="number">2</span> * math.<span class="built_in">pow</span>(stdev, <span class="number">2</span>))))</span><br><span class="line">    <span class="keyword">return</span> (<span class="number">1</span> / (math.sqrt(<span class="number">2</span> * math.pi) * stdev)) * exponent</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calculate_conditional_probabilities</span>(<span class="params">summaries, input_vector</span>):</span></span><br><span class="line">    probabilities = &#123;&#125;</span><br><span class="line">    <span class="keyword">for</span> class_val, class_summaries <span class="keyword">in</span> summaries.items():</span><br><span class="line">        probabilities[class_val] = <span class="number">1</span></span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(class_summaries)):</span><br><span class="line">            mean, stdev = class_summaries[i]</span><br><span class="line">            x = input_vector[i]</span><br><span class="line">            probabilities[class_val] *= calculate_probability(x, mean, stdev)</span><br><span class="line">    <span class="keyword">return</span> probabilities</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">predict</span>(<span class="params">summaries, input_vector</span>):</span></span><br><span class="line">    probabilities = calculate_conditional_probabilities(summaries, input_vector)</span><br><span class="line">    best_label, best_prob = <span class="literal">None</span>, -<span class="number">1</span></span><br><span class="line">    <span class="keyword">for</span> class_val, probability <span class="keyword">in</span> probabilities.items():</span><br><span class="line">        <span class="keyword">if</span> best_label <span class="keyword">is</span> <span class="literal">None</span> <span class="keyword">or</span> probability &gt; best_prob:</span><br><span class="line">            best_label = class_val</span><br><span class="line">            best_prob = probability</span><br><span class="line">    <span class="keyword">return</span> best_label</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_predictions</span>(<span class="params">summaries, test_set</span>):</span></span><br><span class="line">    predictions = []</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(test_set)):</span><br><span class="line">        result = predict(summaries, test_set[i])</span><br><span class="line">        predictions.append(result)</span><br><span class="line">    <span class="keyword">return</span> predictions</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_accuracy</span>(<span class="params">predictions, test_set</span>):</span></span><br><span class="line">    correct = <span class="number">0</span></span><br><span class="line">    <span class="keyword">for</span> x <span class="keyword">in</span> <span class="built_in">range</span>(<span class="built_in">len</span>(test_set)):</span><br><span class="line">        <span class="keyword">if</span> test_set[x][-<span class="number">1</span>] == predictions[x]:</span><br><span class="line">            correct += <span class="number">1</span></span><br><span class="line">    <span class="keyword">return</span> (correct / <span class="built_in">float</span>(<span class="built_in">len</span>(test_set))) * <span class="number">100.0</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">main</span>():</span></span><br><span class="line">    filename = <span class="string">&#x27;pima-indians-diabetes.data.csv&#x27;</span></span><br><span class="line">    split_ratio = <span class="number">0.67</span></span><br><span class="line">    data_set = load_csv_file(filename)</span><br><span class="line">    train_set, test_set = split_data_set(data_set, split_ratio)</span><br><span class="line">    print(<span class="string">&#x27;Split %s rows into train set = %s and test set = %s rows&#x27;</span></span><br><span class="line">                %(<span class="built_in">len</span>(data_set), <span class="built_in">len</span>(train_set), <span class="built_in">len</span>(test_set)))</span><br><span class="line">    <span class="comment"># prepare model</span></span><br><span class="line">    summaries = summarize_by_class(train_set)</span><br><span class="line">    <span class="comment"># predict and test</span></span><br><span class="line">    predictions = get_predictions(summaries, test_set)</span><br><span class="line">    accuracy = get_accuracy(predictions, test_set)</span><br><span class="line">    print(<span class="string">&#x27;Accuracy: %s&#x27;</span> % accuracy)</span><br><span class="line"></span><br><span class="line">main()</span><br><span class="line"></span><br></pre></td></tr></table></figure>

<h3 id="参考文献"><a href="#参考文献" class="headerlink" title="参考文献"></a>参考文献</h3><hr>
<ul>
<li><p><a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Bayes%27_theorem">Bayes’ theorem - Wikipedia</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Conditional_probability">Conditional probability - Wikipedia</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Law_of_total_probability">Law of total probability - Wikipedia</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://en.wikipedia.org/wiki/Naive_Bayes_classifier">Naive Bayes classifier - Wikipedia</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://www.analyticsvidhya.com/blog/2017/09/naive-bayes-explained/"> 6 Easy Steps to Learn Naive Bayes Algorithm (with code in Python)</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://dataaspirant.com/2017/02/06/naive-bayes-classifier-machine-learning/">How the Naive Bayes Classifier works in Machine Learning</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="https://machinelearningmastery.com/naive-bayes-classifier-scratch-python/">Naive Bayes Classifier From Scratch in Python</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="http://mindhacks.cn/2008/09/21/the-magical-bayesian-method/">数学之美番外篇：平凡而又神奇的贝叶斯方法 – 刘未鹏 | Mind Hacks</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="http://www.ruanyifeng.com/blog/2013/12/naive_bayes_classifier.html">朴素贝叶斯分类器的应用 - 阮一峰的网络日志</a></p>
</li>
<li><p><a target="_blank" rel="noopener" href="http://www.cnblogs.com/leoo2sk/archive/2010/09/17/1829190.html">算法杂货铺——分类算法之朴素贝叶斯分类(Naive Bayesian classification) - T2噬菌体 - 博客园</a></p>
</li>
</ul>

    </div>

    
    
    

    <footer class="post-footer">
          <div class="post-tags">
              <a href="/yuwanzi.io/tags/2017/" rel="tag"># 2017</a>
              <a href="/yuwanzi.io/tags/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/" rel="tag"># 机器学习</a>
              <a href="/yuwanzi.io/tags/%E7%9B%91%E7%9D%A3%E5%BC%8F%E5%AD%A6%E4%B9%A0/" rel="tag"># 监督式学习</a>
              <a href="/yuwanzi.io/tags/%E8%B4%9D%E5%8F%B6%E6%96%AF/" rel="tag"># 贝叶斯</a>
          </div>

        

          <div class="post-nav">
            <div class="post-nav-item">
                <a href="/yuwanzi.io/2017/11/30/2017-11-30-netty_introduction/" rel="prev" title="Netty的那点事儿">
                  <i class="fa fa-chevron-left"></i> Netty的那点事儿
                </a>
            </div>
            <div class="post-nav-item">
                <a href="/yuwanzi.io/2017/12/31/2017-12-31-skip_list/" rel="next" title="SkipList的那点事儿">
                  SkipList的那点事儿 <i class="fa fa-chevron-right"></i>
                </a>
            </div>
          </div>
    </footer>
  </article>
</div>







<script>
  window.addEventListener('tabs:register', () => {
    let { activeClass } = CONFIG.comments;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      const activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      const commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>
</div>
  </main>

  <footer class="footer">
    <div class="footer-inner">


<div class="copyright">
  &copy; 
  <span itemprop="copyrightYear">2021</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">玉丸子</span>
</div>
  <div class="powered-by">Erstellt mit  <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> & <a href="https://theme-next.js.org/muse/" class="theme-link" rel="noopener" target="_blank">NexT.Muse</a>
  </div>

    </div>
  </footer>

  
  <script src="//cdn.jsdelivr.net/npm/animejs@3.2.1/lib/anime.min.js"></script>
<script src="/yuwanzi.io/js/utils.js"></script><script src="/yuwanzi.io/js/motion.js"></script><script src="/yuwanzi.io/js/schemes/muse.js"></script><script src="/yuwanzi.io/js/next-boot.js"></script>

  






  





</body>
</html>
