<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-Hans">
<head>
  <meta charset="UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"/>
<meta name="theme-color" content="#222">









<meta http-equiv="Cache-Control" content="no-transform" />
<meta http-equiv="Cache-Control" content="no-siteapp" />
















  
  
  <link href="/blog/lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css" />







<link href="/blog/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css" />

<link href="/blog/css/main.css?v=5.1.4" rel="stylesheet" type="text/css" />


  <link rel="apple-touch-icon" sizes="180x180" href="/blog/images/apple-touch-icon-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="32x32" href="/blog/images/favicon-32x32-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="16x16" href="/blog/images/favicon-16x16-next.png?v=5.1.4">


  <link rel="mask-icon" href="/blog/images/logo.svg?v=5.1.4" color="#222">





  <meta name="keywords" content="Hexo, NexT" />





  <link rel="alternate" href="/blog/atom.xml" title="稻草人的编程之路" type="application/atom+xml" />






<meta name="description" content="人生的意义，不在于最终获得了什么，而在于曾经努力追求过什么">
<meta property="og:type" content="website">
<meta property="og:title" content="稻草人的编程之路">
<meta property="og:url" content="https://wangxiaochuang.github.io/index.html">
<meta property="og:site_name" content="稻草人的编程之路">
<meta property="og:description" content="人生的意义，不在于最终获得了什么，而在于曾经努力追求过什么">
<meta property="og:locale" content="zh-Hans">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="稻草人的编程之路">
<meta name="twitter:description" content="人生的意义，不在于最终获得了什么，而在于曾经努力追求过什么">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/blog/',
    scheme: 'Gemini',
    version: '5.1.4',
    sidebar: {"position":"left","display":"post","offset":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="https://wangxiaochuang.github.io/"/>





  <title>稻草人的编程之路</title>
  








</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-Hans">

  
  
    
  

  <div class="container sidebar-position-left 
  page-home">
    <div class="headband"></div>

    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/blog/"  class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">稻草人的编程之路</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle">众人皆醉我独醒 举世皆浊我独清</p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="/blog/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br />
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-archives">
          <a href="/blog/archives/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-archive"></i> <br />
            
            归档
          </a>
        </li>
      

      
    </ul>
  

  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            
  <section id="posts" class="posts-expand">
    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/29/2.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/11/29/2.html" itemprop="url">机器学习之DBSCAN算法</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-30T07:19:25+08:00">
                2018-11-30
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/dbscan/" itemprop="url" rel="index">
                    <span itemprop="name">dbscan</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/29/2.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/29/2.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/29/2.html" class="leancloud_visitors" data-flag-title="机器学习之DBSCAN算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="机器学习之DBSCAN算法"><a href="#机器学习之DBSCAN算法" class="headerlink" title="机器学习之DBSCAN算法"></a>机器学习之DBSCAN算法</h1><p>上一篇将了kmeans，这一篇将dbscan</p>
<h2 id="监督算法DBSCAN"><a href="#监督算法DBSCAN" class="headerlink" title="监督算法DBSCAN"></a>监督算法DBSCAN</h2><p>DBSCAN(Density-Based Spatial Clustering of Applications with Noise)</p>
<p>相对于kmeans，dbscan不需要指定簇的个数</p>
<h3 id="基本概念"><a href="#基本概念" class="headerlink" title="基本概念"></a>基本概念</h3><ol>
<li>核心对象<br> 若某个点的密度达到算法设定的阈值则其为核心点（即r领域内点的数量不小于minPts）</li>
<li>领域的距离阈值：设定的半径r</li>
<li>直接密度可达<br> 若某点p在点q的r领域内，且q是核心点则p-q直接密度可达</li>
<li>密度可达<br> 若有一个点的序列q0、q1、…qk，对任意qi-qi-1是直接密度可达的，则称从q0到qk密度可达，这实际上是直接密度可达的“传播”</li>
<li>密度相连<br> 若从某核心点p出发，点q和点k都是密度可达的，则称点q和点k是密度相连的</li>
<li>边界点<br> 属于某一个类的非核心点，不能发展下线了</li>
<li>噪声点<br> 不属于任何一个簇的点，从任何一个核心点出发都是密度不可达的</li>
</ol>
<h3 id="dbscan处理流程"><a href="#dbscan处理流程" class="headerlink" title="dbscan处理流程"></a>dbscan处理流程</h3><p>通俗的将讲，dbscan算法就是找到一个点就开始画圈，在圈内的就是同一个类别，同时再以圈内的其他点作为圆心继续画圈，最后得到的这些点就是一个簇了</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line">标记所有对象为unvisited</span><br><span class="line">do</span><br><span class="line">随机选择一个unvisited对象p</span><br><span class="line">标记p为visited</span><br><span class="line">if p的r领域内至少有MinPts个对象</span><br><span class="line">    创建一个新簇C，并把p添加到C</span><br><span class="line">    令N为p的r领域中的对象集合</span><br><span class="line">    for N中每个点p</span><br><span class="line">        if p 是unvisited</span><br><span class="line">            标记p 为visited</span><br><span class="line">            if p 的r领域至少有MinPts个对象，把这些对象添加到N</span><br><span class="line">            如果p还不是任何簇的成员，把p 添加到C</span><br><span class="line">    end for</span><br><span class="line">    输出C</span><br><span class="line">else</span><br><span class="line">    标记 p 为噪声点</span><br><span class="line">until 没有标记为unvisited的对象</span><br></pre></td></tr></table></figure>
<h3 id="参数选择"><a href="#参数选择" class="headerlink" title="参数选择"></a>参数选择</h3><p>dbscan需要设定半径r与MinPts</p>
<ol>
<li><p>对于半径r<br> 一般方式是找突变点，选择一个点，计算该点到其他点的距离，从小到大排序，找距离变化最大的边界</p>
</li>
<li><p>MinPts<br> 一般取小一些，然后多次尝试</p>
</li>
</ol>
<h3 id="优缺点"><a href="#优缺点" class="headerlink" title="优缺点"></a>优缺点</h3><p><strong>优点</strong></p>
<ol>
<li>不需要指定簇的个数</li>
<li>可以发现任意形状的簇</li>
<li>擅长找到离群点（检测任务）</li>
<li>两个参数就够了</li>
</ol>
<p><strong>缺点</strong></p>
<ol>
<li>高纬度有些困难（可以做降维）</li>
<li>参数难以选择（参数对结果的影响非常大）</li>
<li>Sklearn中效率很慢（数据消减策略）</li>
<li>数据太大可能发生内存溢出</li>
</ol>
<h3 id="可视化展示"><a href="#可视化展示" class="headerlink" title="可视化展示"></a>可视化展示</h3><p><a href="https://www.naftaliharris.com/blog/visualizing-k-means-clustering/">Visualizing K-Means Clustering</a> 是一个国外的网友做的一个可视化网站</p>
<h2 id="使用sklearn进行DBSCAN聚类"><a href="#使用sklearn进行DBSCAN聚类" class="headerlink" title="使用sklearn进行DBSCAN聚类"></a>使用sklearn进行DBSCAN聚类</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">from</span> sklearn.cluster <span class="keyword">import</span> DBSCAN</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer = pd.read_csv(<span class="string">'data.txt'</span>, sep=<span class="string">' '</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>X = beer[[<span class="string">"calories"</span>, <span class="string">"sodium"</span>, <span class="string">"alcohol"</span>, <span class="string">"cost"</span>]]</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>db = DBSCAN(eps=<span class="number">10</span>, min_samples=<span class="number">2</span>).fit(X)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>labels = db.labels_</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer[<span class="string">'cluster_db'</span>] = labels</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer.sort_values(<span class="string">'cluster_db'</span>)</span><br><span class="line">                    name  calories  sodium  alcohol  cost  cluster_db</span><br><span class="line"><span class="number">9</span>        Budweiser_Light       <span class="number">113</span>       <span class="number">8</span>      <span class="number">3.7</span>  <span class="number">0.40</span>          <span class="number">-1</span></span><br><span class="line"><span class="number">3</span>            Kronenbourg       <span class="number">170</span>       <span class="number">7</span>      <span class="number">5.2</span>  <span class="number">0.73</span>          <span class="number">-1</span></span><br><span class="line"><span class="number">6</span>             Augsberger       <span class="number">175</span>      <span class="number">24</span>      <span class="number">5.5</span>  <span class="number">0.40</span>          <span class="number">-1</span></span><br><span class="line"><span class="number">17</span>   Heilemans_Old_Style       <span class="number">144</span>      <span class="number">24</span>      <span class="number">4.9</span>  <span class="number">0.43</span>           <span class="number">0</span></span><br><span class="line"><span class="number">16</span>                 Hamms       <span class="number">139</span>      <span class="number">19</span>      <span class="number">4.4</span>  <span class="number">0.43</span>           <span class="number">0</span></span><br><span class="line"><span class="number">14</span>                 Kirin       <span class="number">149</span>       <span class="number">6</span>      <span class="number">5.0</span>  <span class="number">0.79</span>           <span class="number">0</span></span><br><span class="line"><span class="number">13</span>                 Becks       <span class="number">150</span>      <span class="number">19</span>      <span class="number">4.7</span>  <span class="number">0.76</span>           <span class="number">0</span></span><br><span class="line"><span class="number">12</span>        Michelob_Light       <span class="number">135</span>      <span class="number">11</span>      <span class="number">4.2</span>  <span class="number">0.50</span>           <span class="number">0</span></span><br><span class="line"><span class="number">10</span>                 Coors       <span class="number">140</span>      <span class="number">18</span>      <span class="number">4.6</span>  <span class="number">0.44</span>           <span class="number">0</span></span><br><span class="line"><span class="number">0</span>              Budweiser       <span class="number">144</span>      <span class="number">15</span>      <span class="number">4.7</span>  <span class="number">0.43</span>           <span class="number">0</span></span><br><span class="line"><span class="number">7</span>   Srohs_Bohemian_Style       <span class="number">149</span>      <span class="number">27</span>      <span class="number">4.7</span>  <span class="number">0.42</span>           <span class="number">0</span></span><br><span class="line"><span class="number">5</span>          Old_Milwaukee       <span class="number">145</span>      <span class="number">23</span>      <span class="number">4.6</span>  <span class="number">0.28</span>           <span class="number">0</span></span><br><span class="line"><span class="number">4</span>               Heineken       <span class="number">152</span>      <span class="number">11</span>      <span class="number">5.0</span>  <span class="number">0.77</span>           <span class="number">0</span></span><br><span class="line"><span class="number">2</span>              Lowenbrau       <span class="number">157</span>      <span class="number">15</span>      <span class="number">0.9</span>  <span class="number">0.48</span>           <span class="number">0</span></span><br><span class="line"><span class="number">1</span>                Schlitz       <span class="number">151</span>      <span class="number">19</span>      <span class="number">4.9</span>  <span class="number">0.43</span>           <span class="number">0</span></span><br><span class="line"><span class="number">8</span>            Miller_Lite        <span class="number">99</span>      <span class="number">10</span>      <span class="number">4.3</span>  <span class="number">0.43</span>           <span class="number">1</span></span><br><span class="line"><span class="number">11</span>           Coors_Light       <span class="number">102</span>      <span class="number">15</span>      <span class="number">4.1</span>  <span class="number">0.46</span>           <span class="number">1</span></span><br><span class="line"><span class="number">19</span>         Schlitz_Light        <span class="number">97</span>       <span class="number">7</span>      <span class="number">4.2</span>  <span class="number">0.47</span>           <span class="number">1</span></span><br><span class="line"><span class="number">15</span>     Pabst_Extra_Light        <span class="number">68</span>      <span class="number">15</span>      <span class="number">2.3</span>  <span class="number">0.38</span>           <span class="number">2</span></span><br><span class="line"><span class="number">18</span>   Olympia_Goled_Light        <span class="number">72</span>       <span class="number">6</span>      <span class="number">2.9</span>  <span class="number">0.46</span>           <span class="number">2</span></span><br></pre></td></tr></table></figure>
          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/29/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/11/29/1.html" itemprop="url">机器学习之kmeans算法</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-30T06:40:49+08:00">
                2018-11-30
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/kmeans/" itemprop="url" rel="index">
                    <span itemprop="name">kmeans</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/29/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/29/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/29/1.html" class="leancloud_visitors" data-flag-title="机器学习之kmeans算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="机器学习之kmeans算法"><a href="#机器学习之kmeans算法" class="headerlink" title="机器学习之kmeans算法"></a>机器学习之kmeans算法</h1><p>今天我们开始介绍一个无监督机器学习算法。那么什么是有监督？什么又是无监督呢？</p>
<p>有监督：有标签值说明每个样本是属于什么结果的算法</p>
<p>无监督：没有标签说明每个样本所属的算法</p>
<h2 id="监督算法-kmeans"><a href="#监督算法-kmeans" class="headerlink" title="监督算法 kmeans"></a>监督算法 kmeans</h2><p>今天的算法是一个聚类算法，用于将相似的东西分到一组。对于这类算法，其难点是分好类后我们并不知道这个模型的好坏，不好评估</p>
<p>无监督的聚类算法比较多，这一篇文章讲 <code>kmeans</code></p>
<h3 id="基本概念"><a href="#基本概念" class="headerlink" title="基本概念"></a>基本概念</h3><ol>
<li>k值<br> 要得到的簇的个数即为k值</li>
<li>质心<br> 均值，即向量各维取平均即可</li>
<li>距离的度量<br> 常用欧几里得距离和余弦相似度（需要先标准化）</li>
<li>优化目标 $min\sum_{i=1}^{K} \sum_{x \in C_i} dist(C_i, x)^2$<br> i=1到K，表示我们要优化的每个簇。我们的目标是让每个簇的每个点到中心的距离越小越好</li>
</ol>
<h3 id="kmeans处理流程"><a href="#kmeans处理流程" class="headerlink" title="kmeans处理流程"></a>kmeans处理流程</h3><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">1. 假设k为2，我们就初始化随机选择两个点（初始质心）</span><br><span class="line">2. 遍历所有样本点，计算每个点到这两个质新的距离，并且将距离小的归到对应的类别</span><br><span class="line">3. 遍历完成后肯定就分层了两个类别了，这个时候我们要开始迭代了</span><br><span class="line">4. 分别计算这两个类别的质心（第一次是随机选择的）</span><br><span class="line">5. 重新遍历每个点进行重分类</span><br><span class="line">6. 多次迭代后，质心应该就是一个稳定值了，聚类就完成</span><br></pre></td></tr></table></figure>
<h3 id="优缺点"><a href="#优缺点" class="headerlink" title="优缺点"></a>优缺点</h3><p><strong>优点</strong></p>
<p>简单、快速、适合常规数据集</p>
<p><strong>缺点</strong></p>
<ol>
<li>K值难以确定</li>
<li>复杂度与样本数呈线性关系</li>
<li>另外一个问题就是很难发现任意形状的簇（比如两个环绕的簇，这样kmeans就发现不了）</li>
<li>受<em>初始值</em>的影响非常大</li>
</ol>
<h3 id="可视化展示"><a href="#可视化展示" class="headerlink" title="可视化展示"></a>可视化展示</h3><p><a href="https://www.naftaliharris.com/blog/visualizing-k-means-clustering/">Visualizing K-Means Clustering</a> 是一个国外的网友做的一个可视化网站</p>
<p>尝试一下不同类型的数据集下，kmeans的效果</p>
<h2 id="使用sklearn进行kmeans聚类"><a href="#使用sklearn进行kmeans聚类" class="headerlink" title="使用sklearn进行kmeans聚类"></a>使用sklearn进行kmeans聚类</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer = pd.read_csv(<span class="string">'data.txt'</span>, sep=<span class="string">' '</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>X = beer[[<span class="string">"calories"</span>, <span class="string">"sodium"</span>, <span class="string">"alcohol"</span>, <span class="string">"cost"</span>]]</span><br><span class="line"></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">from</span> sklearn.cluster <span class="keyword">import</span> KMeans</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>km = KMeans(n_clusters=<span class="number">3</span>).fit(X)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>km2 = KMeans(n_clusters=<span class="number">2</span>).fit(X)</span><br><span class="line"><span class="comment"># 查看每个样本分类后的类别，从0开始计数</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>km.labels_</span><br><span class="line">array([<span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">2</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">2</span>, <span class="number">0</span>],</span><br><span class="line">      dtype=int32)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>km2.labels_</span><br><span class="line">array([<span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">1</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">0</span>, <span class="number">0</span>, <span class="number">1</span>, <span class="number">1</span>],</span><br><span class="line">      dtype=int32)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer[<span class="string">'cluster'</span>] = km.labels_</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer[<span class="string">'cluster2'</span>] = km2.labels_</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer.sort_values(<span class="string">'cluster'</span>)</span><br><span class="line">                    name  calories  sodium  alcohol  cost  cluster  cluster2</span><br><span class="line"><span class="number">9</span>        Budweiser_Light       <span class="number">113</span>       <span class="number">8</span>      <span class="number">3.7</span>  <span class="number">0.40</span>        <span class="number">0</span>         <span class="number">1</span></span><br><span class="line"><span class="number">11</span>           Coors_Light       <span class="number">102</span>      <span class="number">15</span>      <span class="number">4.1</span>  <span class="number">0.46</span>        <span class="number">0</span>         <span class="number">1</span></span><br><span class="line"><span class="number">8</span>            Miller_Lite        <span class="number">99</span>      <span class="number">10</span>      <span class="number">4.3</span>  <span class="number">0.43</span>        <span class="number">0</span>         <span class="number">1</span></span><br><span class="line"><span class="number">19</span>         Schlitz_Light        <span class="number">97</span>       <span class="number">7</span>      <span class="number">4.2</span>  <span class="number">0.47</span>        <span class="number">0</span>         <span class="number">1</span></span><br><span class="line"><span class="number">4</span>               Heineken       <span class="number">152</span>      <span class="number">11</span>      <span class="number">5.0</span>  <span class="number">0.77</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">5</span>          Old_Milwaukee       <span class="number">145</span>      <span class="number">23</span>      <span class="number">4.6</span>  <span class="number">0.28</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">6</span>             Augsberger       <span class="number">175</span>      <span class="number">24</span>      <span class="number">5.5</span>  <span class="number">0.40</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">7</span>   Srohs_Bohemian_Style       <span class="number">149</span>      <span class="number">27</span>      <span class="number">4.7</span>  <span class="number">0.42</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">2</span>              Lowenbrau       <span class="number">157</span>      <span class="number">15</span>      <span class="number">0.9</span>  <span class="number">0.48</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">10</span>                 Coors       <span class="number">140</span>      <span class="number">18</span>      <span class="number">4.6</span>  <span class="number">0.44</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">1</span>                Schlitz       <span class="number">151</span>      <span class="number">19</span>      <span class="number">4.9</span>  <span class="number">0.43</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">12</span>        Michelob_Light       <span class="number">135</span>      <span class="number">11</span>      <span class="number">4.2</span>  <span class="number">0.50</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">13</span>                 Becks       <span class="number">150</span>      <span class="number">19</span>      <span class="number">4.7</span>  <span class="number">0.76</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">14</span>                 Kirin       <span class="number">149</span>       <span class="number">6</span>      <span class="number">5.0</span>  <span class="number">0.79</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">16</span>                 Hamms       <span class="number">139</span>      <span class="number">19</span>      <span class="number">4.4</span>  <span class="number">0.43</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">17</span>   Heilemans_Old_Style       <span class="number">144</span>      <span class="number">24</span>      <span class="number">4.9</span>  <span class="number">0.43</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">3</span>            Kronenbourg       <span class="number">170</span>       <span class="number">7</span>      <span class="number">5.2</span>  <span class="number">0.73</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">0</span>              Budweiser       <span class="number">144</span>      <span class="number">15</span>      <span class="number">4.7</span>  <span class="number">0.43</span>        <span class="number">1</span>         <span class="number">0</span></span><br><span class="line"><span class="number">18</span>   Olympia_Goled_Light        <span class="number">72</span>       <span class="number">6</span>      <span class="number">2.9</span>  <span class="number">0.46</span>        <span class="number">2</span>         <span class="number">1</span></span><br><span class="line"><span class="number">15</span>     Pabst_Extra_Light        <span class="number">68</span>      <span class="number">15</span>      <span class="number">2.3</span>  <span class="number">0.38</span>        <span class="number">2</span>         <span class="number">1</span></span><br><span class="line"></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>cluster_centers = km.cluster_centers_</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>cluster_centers_2 = km2.cluster_centers_</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer.groupby(<span class="string">"cluster"</span>).mean()</span><br><span class="line">         calories  sodium   alcohol      cost  cluster2</span><br><span class="line">cluster</span><br><span class="line"><span class="number">0</span>          <span class="number">102.75</span>    <span class="number">10.0</span>  <span class="number">4.075000</span>  <span class="number">0.440000</span>         <span class="number">1</span></span><br><span class="line"><span class="number">1</span>          <span class="number">150.00</span>    <span class="number">17.0</span>  <span class="number">4.521429</span>  <span class="number">0.520714</span>         <span class="number">0</span></span><br><span class="line"><span class="number">2</span>           <span class="number">70.00</span>    <span class="number">10.5</span>  <span class="number">2.600000</span>  <span class="number">0.420000</span>         <span class="number">1</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>beer.groupby(<span class="string">"cluster2"</span>).mean()</span><br><span class="line">            calories     sodium   alcohol      cost   cluster</span><br><span class="line">cluster2</span><br><span class="line"><span class="number">0</span>         <span class="number">150.000000</span>  <span class="number">17.000000</span>  <span class="number">4.521429</span>  <span class="number">0.520714</span>  <span class="number">1.000000</span></span><br><span class="line"><span class="number">1</span>          <span class="number">91.833333</span>  <span class="number">10.166667</span>  <span class="number">3.583333</span>  <span class="number">0.433333</span>  <span class="number">0.666667</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 可视化展示</span></span><br><span class="line"><span class="comment"># 获取中心点，就是均值</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>centers = beer.groupby(<span class="string">"cluster"</span>).mean().reset_index()</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.rcParams[<span class="string">'font.size'</span>] = <span class="number">14</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>colors = np.array([<span class="string">'red'</span>, <span class="string">'green'</span>, <span class="string">'blue'</span>, <span class="string">'yellow'</span>])</span><br><span class="line"><span class="comment"># 不同类别的点画不同的颜色</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.scatter(beer[<span class="string">"calories"</span>], beer[<span class="string">"alcohol"</span>], c=colors[beer[<span class="string">"cluster"</span>]])</span><br><span class="line">&lt;matplotlib.collections.PathCollection object at <span class="number">0x1a1a580f98</span>&gt;</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.scatter(centers.calories, centers.alcohol, linewidths=<span class="number">3</span>, marker=<span class="string">'+'</span>, s=<span class="number">300</span>, c=<span class="string">'black'</span>)</span><br><span class="line">&lt;matplotlib.collections.PathCollection object at <span class="number">0x1a1a9f43c8</span>&gt;</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.xlabel(<span class="string">"Calories"</span>)</span><br><span class="line">Text(<span class="number">0.5</span>,<span class="number">0</span>,<span class="string">'Calories'</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.ylabel(<span class="string">"Alcohol"</span>)</span><br><span class="line">Text(<span class="number">0</span>,<span class="number">0.5</span>,<span class="string">'Alcohol'</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.show()</span><br></pre></td></tr></table></figure>
<img src="/blog/2018/11/29/1/1.png" title="分类结果">
<p>我们可以看看所有两两组合的结果，使用matrix<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">from</span> pandas.plotting <span class="keyword">import</span> scatter_matrix</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>scatter_matrix(beer[[<span class="string">"calories"</span>, <span class="string">"sodium"</span>, <span class="string">"alcohol"</span>, <span class="string">"cost"</span>]], s=<span class="number">100</span>, alpha=<span class="number">1</span>, c=colors[beer[<span class="string">"cluster</span></span><br><span class="line"><span class="string">"</span>]], figsize=(<span class="number">10</span>, <span class="number">8</span>))</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.suptitle(<span class="string">"With 3 centroids initialized"</span>)</span><br><span class="line">Text(<span class="number">0.5</span>,<span class="number">0.98</span>,<span class="string">'With 3 centroids initialized'</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.show()</span><br></pre></td></tr></table></figure></p>
<img src="/blog/2018/11/29/1/2.png" title="matrix">
<p>我们对数据进行归一化后看结果<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">from</span> sklearn.preprocessing <span class="keyword">import</span> StandardScaler</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>scaler = StandardScaler()</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>X_scaled = scaler.fit_transform(X)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>X_scaled</span><br><span class="line">array([[ <span class="number">0.38791334</span>,  <span class="number">0.00779468</span>,  <span class="number">0.43380786</span>, <span class="number">-0.45682969</span>],       </span><br><span class="line">       [ <span class="number">0.6250656</span> ,  <span class="number">0.63136906</span>,  <span class="number">0.62241997</span>, <span class="number">-0.45682969</span>],</span><br><span class="line">       [ <span class="number">0.82833896</span>,  <span class="number">0.00779468</span>, <span class="number">-3.14982226</span>, <span class="number">-0.10269815</span>],</span><br><span class="line">       [ <span class="number">1.26876459</span>, <span class="number">-1.23935408</span>,  <span class="number">0.90533814</span>,  <span class="number">1.66795955</span>],</span><br><span class="line">       [ <span class="number">0.65894449</span>, <span class="number">-0.6157797</span> ,  <span class="number">0.71672602</span>,  <span class="number">1.95126478</span>],</span><br><span class="line">       [ <span class="number">0.42179223</span>,  <span class="number">1.25494344</span>,  <span class="number">0.3395018</span> , <span class="number">-1.5192243</span> ],</span><br><span class="line">       [ <span class="number">1.43815906</span>,  <span class="number">1.41083704</span>,  <span class="number">1.1882563</span> , <span class="number">-0.66930861</span>],</span><br><span class="line">       [ <span class="number">0.55730781</span>,  <span class="number">1.87851782</span>,  <span class="number">0.43380786</span>, <span class="number">-0.52765599</span>],</span><br><span class="line">       [<span class="number">-1.1366369</span> , <span class="number">-0.7716733</span> ,  <span class="number">0.05658363</span>, <span class="number">-0.45682969</span>],</span><br><span class="line">       [<span class="number">-0.66233238</span>, <span class="number">-1.08346049</span>, <span class="number">-0.5092527</span> , <span class="number">-0.66930861</span>],</span><br><span class="line">       [ <span class="number">0.25239776</span>,  <span class="number">0.47547547</span>,  <span class="number">0.3395018</span> , <span class="number">-0.38600338</span>],</span><br><span class="line">       [<span class="number">-1.03500022</span>,  <span class="number">0.00779468</span>, <span class="number">-0.13202848</span>, <span class="number">-0.24435076</span>],</span><br><span class="line">       [ <span class="number">0.08300329</span>, <span class="number">-0.6157797</span> , <span class="number">-0.03772242</span>,  <span class="number">0.03895447</span>],</span><br><span class="line">       [ <span class="number">0.59118671</span>,  <span class="number">0.63136906</span>,  <span class="number">0.43380786</span>,  <span class="number">1.88043848</span>],</span><br><span class="line">       [ <span class="number">0.55730781</span>, <span class="number">-1.39524768</span>,  <span class="number">0.71672602</span>,  <span class="number">2.0929174</span> ],</span><br><span class="line">       [<span class="number">-2.18688263</span>,  <span class="number">0.00779468</span>, <span class="number">-1.82953748</span>, <span class="number">-0.81096123</span>],</span><br><span class="line">       [ <span class="number">0.21851887</span>,  <span class="number">0.63136906</span>,  <span class="number">0.15088969</span>, <span class="number">-0.45682969</span>],</span><br><span class="line">       [ <span class="number">0.38791334</span>,  <span class="number">1.41083704</span>,  <span class="number">0.62241997</span>, <span class="number">-0.45682969</span>],</span><br><span class="line">       [<span class="number">-2.05136705</span>, <span class="number">-1.39524768</span>, <span class="number">-1.26370115</span>, <span class="number">-0.24435076</span>],</span><br><span class="line">       [<span class="number">-1.20439469</span>, <span class="number">-1.23935408</span>, <span class="number">-0.03772242</span>, <span class="number">-0.17352445</span>]])</span><br><span class="line"><span class="comment"># 后面的操作跟以上的一样，请自行操作一下吧</span></span><br></pre></td></tr></table></figure></p>
<p><strong>关于归一化的效果</strong></p>
<p>一般情况下都会将数据进行归一化以消除数值大小对重要性的影响，但是也未必归一化后的结果就比原始数据好，因为可能某些数据的重要程度就是很小，归一化后反而将重要性提高了</p>
<h2 id="模型效果评估"><a href="#模型效果评估" class="headerlink" title="模型效果评估"></a>模型效果评估</h2><p>聚类评估：轮廓系数（Silhouette Coefficient）</p>
<script type="math/tex; mode=display">
s(i) = \frac{b(i) - a(i)}{max(a(i), b(i))}</script><script type="math/tex; mode=display">\left.
\begin{array}{1}
1-\frac{a(i)}{b(i)} \text{,}& a(i) < b(i)\\
1-\frac{a(i)}{b(i)} \text{,}& a(i) = b(i)\\
1-\frac{a(i)}{b(i)} \text{,}& a(i) > b(i)\\
\end{array}
\right\}=f(n)</script><ul>
<li>计算样本i到同族其他样本的平均距离ai。ai越小说明样本i越应该被聚类到该簇，将ai称为样本i的簇内不相似度</li>
<li>计算样本i到其他同簇Cj的所有样本的平均距离bij，称为样本i与簇Cj的不相似度。定义样本i的簇间不相似度：$b_{i} = min(b_{i1}, b_{i2}, …, b_{ik})$</li>
<li>si接近1，说明样本i簇类合理</li>
<li>si接近-1，说明样本i更应该分类到另外的簇</li>
<li>若si接近0，说明样本i在两个簇的边界上</li>
</ul>
<p>以上面的例子算轮廓系数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">from</span> sklearn <span class="keyword">import</span> metrics</span><br><span class="line"><span class="comment"># 传入样本值与分类结果</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>score_scaled = metrics.silhouette_score(X, beer.cluster)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>score = metrics.silhouette_score(X, beer.cluster)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>score</span><br><span class="line"><span class="number">0.6731775046455796</span></span><br></pre></td></tr></table></figure>
<p>我们可能根据score值来选取合适的簇值</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="meta">&gt;&gt;&gt; </span>scores = []</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span><span class="keyword">for</span> k <span class="keyword">in</span> range(<span class="number">2</span>, <span class="number">20</span>):</span><br><span class="line"><span class="meta">... </span>    labels = KMeans(n_clusters=k).fit(X).labels_</span><br><span class="line"><span class="meta">... </span>    score = metrics.silhouette_score(X, labels)</span><br><span class="line"><span class="meta">... </span>    scores.append(score)...</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>scores</span><br><span class="line">[<span class="number">0.6917656034079486</span>, <span class="number">0.6731775046455796</span>, <span class="number">0.5857040721127795</span>, <span class="number">0.422548733517202</span>, <span class="number">0.4559182167013377</span>, <span class="number">0.4377611669796312</span></span><br><span class="line"><span class="number">4</span>, <span class="number">0.38946337473125997</span>, <span class="number">0.39746405172426014</span>, <span class="number">0.3915697409245163</span>, <span class="number">0.3413109618039333</span>, <span class="number">0.3459775237127248</span>, <span class="number">0.31221439248</span></span><br><span class="line"><span class="number">428434</span>, <span class="number">0.30707782144770296</span>, <span class="number">0.31834561839139497</span>, <span class="number">0.2849514001174898</span>, <span class="number">0.23498077333071996</span>, <span class="number">0.1588091017496281</span>, <span class="number">0.08423</span></span><br><span class="line"><span class="number">051380151177</span>]</span><br><span class="line"><span class="comment"># 我们画图更直观看看结果</span></span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.plot(list(range(<span class="number">2</span>, <span class="number">20</span>)), scores)</span><br><span class="line">[&lt;matplotlib.lines.Line2D object at <span class="number">0x1a1f153be0</span>&gt;]</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.xlabel(<span class="string">"Number of Clusters Initialized"</span>)</span><br><span class="line">Text(<span class="number">0.5</span>,<span class="number">0</span>,<span class="string">'Number of Clusters Initialized'</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.ylabel(<span class="string">"Sibouette Score"</span>)</span><br><span class="line">Text(<span class="number">0</span>,<span class="number">0.5</span>,<span class="string">'Sibouette Score'</span>)</span><br><span class="line"><span class="meta">&gt;&gt;&gt; </span>plt.show()</span><br></pre></td></tr></table></figure>
<img src="/blog/2018/11/29/1/3.png">
          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/18/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/11/18/1.html" itemprop="url">计算机的负数表示详解</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-18T11:17:44+08:00">
                2018-11-18
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/基础/" itemprop="url" rel="index">
                    <span itemprop="name">基础</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/18/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/18/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/18/1.html" class="leancloud_visitors" data-flag-title="计算机的负数表示详解">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="计算机负数表示"><a href="#计算机负数表示" class="headerlink" title="计算机负数表示"></a>计算机负数表示</h1><p>计算机专业的同学应该都知道，计算机里负数的存储是“按位取反加一“，为了简单期间，这里我们以一个字节表示的整数为例</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">+1的表示：</span><br><span class="line">0000 0001</span><br><span class="line">-1的表示：</span><br><span class="line">1111 1111</span><br></pre></td></tr></table></figure>
<p>你们可是你们知道为什么要这样做么？</p>
<h2 id="模运算"><a href="#模运算" class="headerlink" title="模运算"></a>模运算</h2><p>一个重要的理论：在一个模运算中，一个数与它除以“模”后的余数等价。这里我们给出三个例子：</p>
<ol>
<li><p>时钟是一种模12的系统</p>
 <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line">假定现在钟表时针指向10点，要将它拨向6点，则有两种拨法：</span><br><span class="line">1. 倒拨4格：10 - 4 = 6</span><br><span class="line">2. 顺拨8格：10 + 8 = 18 等价 6  (mod 12)</span><br><span class="line">可以得出结论，在模12的系统中：</span><br><span class="line">    10 - 4 等价 10 + 8         (mod 12)</span><br><span class="line">    - 4 等价 8</span><br><span class="line">则，我们称8是-4对模12的补码，同样的有</span><br><span class="line">    -3 等价 9</span><br><span class="line">    -5 等价 7</span><br></pre></td></tr></table></figure>
<p> 这里得出两个结论：</p>
 <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">1. 一个负数的补码等于模减该负数的绝对值</span><br><span class="line">2. 对于一个确定的模，某数减去小于模的另一数，总可以用该数加上另一数负数的补码来代替</span><br></pre></td></tr></table></figure>
<p> 正是上面的两个结论，计算机里的减法都是用加法来做的</p>
</li>
<li><p>“4位十进制数”模运算系统<br> 假定算盘只有4挡，且只能做加法，则在算盘上计算7000 - 2000等于多少？</p>
 <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">7000 - 2000 = 7000 + (10000 -  2000) = 7000 + 8000 = 15000(mod 10000) = 5000</span><br></pre></td></tr></table></figure>
</li>
<li><p>8位二进制加法器模运算系统（已经很像现在的计算机了）</p>
 <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">计算 &quot;0111 111 - 0100 0000 = ?&quot;</span><br><span class="line">  0111 1111 - 0100 0000 =   0111 1111 + (2^8 - 0100 0000)</span><br><span class="line">= 0111 1111 + 1100 0000 = 1 0011 1111 (mod 2^8) = 0011 1111</span><br><span class="line"></span><br><span class="line">注意这里的转换</span><br><span class="line">2^8 - 0100 0000 = 1011 1111 + 1 (按位取反加1)</span><br></pre></td></tr></table></figure>
<p> 对于计算机来说加法不好做，但是取反加1的动作还是很好做的</p>
</li>
</ol>
<h2 id="对于一个signed-char，-128如何得来"><a href="#对于一个signed-char，-128如何得来" class="headerlink" title="对于一个signed char，-128如何得来"></a>对于一个signed char，-128如何得来</h2><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">我们用上面学到的内容来计算一下</span><br><span class="line"></span><br><span class="line">-128 = -127 - 1</span><br><span class="line">-128 = 1000 0001 + 1111 1111</span><br><span class="line">-128 = 1 1000 0000  1000 0000</span><br><span class="line">注意这里表示 -0，也就是-128</span><br></pre></td></tr></table></figure>
<h2 id="对于一个signed-char，再看一下-127-1-的结果"><a href="#对于一个signed-char，再看一下-127-1-的结果" class="headerlink" title="对于一个signed char，再看一下 127 + 1 的结果"></a>对于一个signed char，再看一下 127 + 1 的结果</h2><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">127 + 1 = 0111 1111 + 1</span><br><span class="line">127 + 1 = 1000 0000 = -128</span><br></pre></td></tr></table></figure>
          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/04/2.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/11/04/2.html" itemprop="url">机器学习之贝叶斯算法</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-04T20:25:24+08:00">
                2018-11-04
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/贝叶斯/" itemprop="url" rel="index">
                    <span itemprop="name">贝叶斯</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/04/2.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/04/2.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/04/2.html" class="leancloud_visitors" data-flag-title="机器学习之贝叶斯算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="贝叶斯算法"><a href="#贝叶斯算法" class="headerlink" title="贝叶斯算法"></a>贝叶斯算法</h1><p>这篇博客介绍机器学习中非常常见的贝叶斯算法</p>
<h2 id="前言"><a href="#前言" class="headerlink" title="前言"></a>前言</h2><p>贝叶斯算法要解决的是什么问题？</p>
<p><strong>正向概率:</strong> 假设袋子里面有N个白球，M个黑球，你伸手进去摸一把，摸出黑球的概率是多大？</p>
<p><strong>逆向概率:</strong> 如果我们事先并不知道袋子里黑白球的比例，而是闭着眼睛摸出一个（或者好几个）球，观察这些取出来的球的颜色之后，那么我们可以就此对袋子里的黑白球的比例作出什么样的推测？</p>
<p>有没有疑惑，我们事先就知道了白球、黑球的概率，为什么还要反着来呢？</p>
<p>答案是：现实世界本身就是不确定的，人类的观察能力是有局限性的，对于某些问题，我们可能根本就不可能知道黑球、白球的比例</p>
<h2 id="公式推导"><a href="#公式推导" class="headerlink" title="公式推导"></a>公式推导</h2><p>这里还是拿一个跟生活比较贴近的例子。</p>
<blockquote>
<p>一个学校男生女生的比例分别是60%与40%。男生总是穿长裤，女生则一半穿长裤一半穿裙子<br>正向概率：随机选取一个学生，他（她）穿长裤的概率和穿裙子的概率是多大？<br>逆向概率：迎面走来一个穿长裤的学生，无法确定性别的情况下，你能推断出他（她）是女生的概率是多大么？</p>
</blockquote>
<ol>
<li>假设学校里面人的总数是U个</li>
<li><p>穿长裤的（男生）：$U \times P(Boy) \times P(Pants|Boy)$</p>
<ul>
<li>P(Boy)是男生的概率 = 60%</li>
<li>P(Pants|Boy) 是条件概率，即在Boy这个条件下穿长裤的概率是多大，这里是100%</li>
</ul>
</li>
<li><p>穿长裤的（女生）：</p>
<script type="math/tex; mode=display">
 U \times P(Girl) \times P(Pants|Girl)</script></li>
<li><p>我们可以得到穿长裤的总数：</p>
<script type="math/tex; mode=display">
 SUM = U \times P(Boy) \times P(Pants|Boy) + U \times P(Girl) \times P(Pants|Girl)</script></li>
<li><p>我们的求解目标是穿长裤的女生的概率：</p>
<script type="math/tex; mode=display">
 \begin{equation}\begin{split}
 P(Girl|Pants) &= \frac {U \times P(Girl) \times P(Pants|Girl)} {SUM} \\
 &= \frac {P(Pants, Girl)} {P(Pants)} \\
 \end{split}\end{equation}</script><p> 分子就是穿裤子的女孩子的概率，分母就是穿裤子的概率</p>
</li>
<li><p>贝叶斯公式</p>
<script type="math/tex; mode=display">
 P(A|B) = \frac {P(B|A)P(A)} {P(B)}</script><p> 当在B条件下不好求A，但是反过来在条件A下B的概率的情况下，就可以用贝叶斯算法</p>
</li>
</ol>
<h2 id="模型比较理论"><a href="#模型比较理论" class="headerlink" title="模型比较理论"></a>模型比较理论</h2><ol>
<li><p>最大似然：最符合观测数据的（即 P(D|h) 最大的）最有优势</p>
<p> 我们之前讨论的机器学习算法都是基于最大似然估计来做的</p>
</li>
<li><p>奥卡姆剃刀：P(h)较大的模型有较大的优势</p>
<p> 如果平面上有N个点，近似构成一条直线，但绝不精确的位于一条直线。这时候我们可以用直线来拟合（1阶）、也可以用曲线（n阶）去拟合所有的点，但通常我们会用低阶的多项式去拟合，因为越是高阶的多项式越是不常见</p>
</li>
</ol>
<h2 id="应用实例"><a href="#应用实例" class="headerlink" title="应用实例"></a>应用实例</h2><h3 id="拼写纠正实例"><a href="#拼写纠正实例" class="headerlink" title="拼写纠正实例"></a>拼写纠正实例</h3><p>当我们看到用户输入了一个不在字典中的单词，我们需要去猜测：“这个家伙到底真正想要输入的单词是什么呢？”</p>
<p>P(我们猜测他想输入的单词|他实际输入的单词)，比如用户输入了tha，我们需要猜测他想输入the呢还是than</p>
<ol>
<li>用户实际输入的单词记为D</li>
<li>猜测1：P(h1|D)，猜测2：P(h2|D)，猜测3：P(h3|D)；统一为：P(h|D)</li>
<li>$P(h|D) = \frac {P(h) * P(D|h)}{P(D)}$</li>
<li>p(h)为先验概率，我们会根据一个语料库计算出这些单词的出现概率</li>
<li>P(D|h)表示正确单词输错为了D的概率</li>
<li>这里的P(D)表示输入单词D的概率，这个一般都会被约分掉，我们是比较的概率大小，并不需要得出实际的值</li>
<li>P(h|D) 正比于 P(h) * P(D|h)，对于给定的观测数据，一个猜测是好是坏，取决于“这个猜测本身独立的可能性大小（先验概率，Prior）”和“这个猜测生成我们观测到的数据的可能性大小”</li>
<li>这个的先验概率可以使用语料库计算单词h出现的概率，这里的 P(D|h)根据衡量的指标不同而不同，我们可以按键盘上的概率计算也可以按单词的距离计算等等</li>
<li>比如用户输入tlp，那到底是top还是tip？这个时候，当最大似然不能做出决定性的判断是，先验概率就可以插手进来给出指示—“既然你无法决定，那么我告诉你，一般来说top出现的程度要高许多，所以更可能他想打的是top”</li>
</ol>
<h3 id="实现一个简单的拼写检查器"><a href="#实现一个简单的拼写检查器" class="headerlink" title="实现一个简单的拼写检查器"></a>实现一个简单的拼写检查器</h3><ol>
<li><p>我们的求解的目标：$argmaxc P(c|w) \to argmaxc \frac{P(w|c) \times P(c)}{P(w)}$</p>
<ul>
<li>P(c)，文章中一个正确拼写词c的概率，也就是说，在英语文章中，c出现的概率有多大</li>
<li>P(w|c)，在用户想键入c的情况下敲成了w的概率，因为这个是代表用户会以多大的概率把c敲错成w</li>
<li>argmaxc，用来枚举所有可能的c并且选取概率最大的</li>
</ul>
</li>
<li><p>把语料中的单词全部抽取出来，转成小写，并且去除单词中间的特殊符号</p>
 <div><div class="fold_hider"><div class="close hider_title">  </div></div><div class="fold">
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> re</span><br><span class="line"><span class="keyword">import</span> collections</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">words</span><span class="params">(text)</span>:</span> <span class="keyword">return</span> re.findall(<span class="string">'[a-z]+'</span>, text.lower())</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">train</span><span class="params">(features)</span>:</span></span><br><span class="line">    <span class="comment"># 默认单词出现一次，如果为0不好计算</span></span><br><span class="line">    model = collections.defaultdict(<span class="keyword">lambda</span>: <span class="number">1</span>)</span><br><span class="line">    <span class="keyword">for</span> f <span class="keyword">in</span> features:</span><br><span class="line">        model[f] += <span class="number">1</span></span><br><span class="line">    <span class="keyword">return</span> model</span><br><span class="line">NWORDS = train(words(open(<span class="string">'big.txt'</span>).read()))</span><br></pre></td></tr></table></figure>

</div></div></li>
<li><p>返回所有单词编辑距离为1的单词集合</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">alphabet = <span class="string">'abcdefghijklmnopqrstuvwxyz'</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">edits1</span><span class="params">(word)</span>:</span></span><br><span class="line">    n = len(word)</span><br><span class="line">    <span class="keyword">return</span> set(</span><br><span class="line">        [word[<span class="number">0</span>:i] + word[i+<span class="number">1</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n)] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + word[i+<span class="number">1</span>] + word[i] +  word[i+<span class="number">2</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n<span class="number">-1</span>)] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + c + word[i+<span class="number">1</span>:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n) <span class="keyword">for</span> c <span class="keyword">in</span> alphabet] +</span><br><span class="line">        [word[<span class="number">0</span>:i] + c + word[i:] <span class="keyword">for</span> i <span class="keyword">in</span> range(n+<span class="number">1</span>) <span class="keyword">for</span> c <span class="keyword">in</span> alphabet])</span><br></pre></td></tr></table></figure>
</li>
<li><p>返回所有单词编辑距离为2的单词集合</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">edits2</span><span class="params">(word)</span>:</span></span><br><span class="line">    <span class="keyword">return</span> set(e2 <span class="keyword">for</span> e1 <span class="keyword">in</span> edits1(word) <span class="keyword">for</span> e2 <span class="keyword">in</span> edits1(e1))</span><br></pre></td></tr></table></figure>
</li>
<li><p>为了简单起见，我们认为编辑距离为1的正确单词比编辑距离为2的正确单词的概率要高，编辑距离为0的单词比编辑距离为1的单词的概率高</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">known</span><span class="params">(words)</span>:</span> <span class="keyword">return</span> set(w <span class="keyword">for</span> w <span class="keyword">in</span> words <span class="keyword">if</span> w <span class="keyword">in</span> NWORDS)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">correct</span><span class="params">(word)</span>:</span></span><br><span class="line">    candidates = known([word]) <span class="keyword">or</span> known(edits1(word)) <span class="keyword">or</span> known_edits2(word) <span class="keyword">or</span> [word]</span><br><span class="line">    <span class="keyword">return</span> max(candidates, key=<span class="keyword">lambda</span> w: NWORDS[w])</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用correct试一下效果吧</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">correct(<span class="string">"tlp"</span>)</span><br></pre></td></tr></table></figure>
</li>
</ol>
<h3 id="垃圾邮件过滤实例"><a href="#垃圾邮件过滤实例" class="headerlink" title="垃圾邮件过滤实例"></a>垃圾邮件过滤实例</h3><p>给定一封邮件，判定它是否属于垃圾邮件</p>
<p>D来表示这封邮件，注意D由N个单词组成。我们用h+来表示垃圾邮件，h-表示正常邮件</p>
<script type="math/tex; mode=display">
\begin{equation}\begin{split}
P(h+|D) &= \frac {P(h+) \times P(D|h+)} {P(D)} \\
P(h-|D) &= \frac {P(h-) \times P(D|h-)} {P(D)} \\
\end{split}\end{equation}</script><p>先验概率：P(h+)和P(h-)这两个先验概率都是很容易求出来的。只需要计算一个邮件库里面垃圾邮件和正常邮件的比例就行了</p>
<p>D里面含有N个单词d1，d2，d3，dn，P(D|h+)=P(d1,d2,…,dn|h+)</p>
<p>P(d1,d2,…,dn|h+)的意思是说在垃圾邮件中出现跟我们当前这封邮件一模一样的一封邮件的概率是多大，这个概率就太小了吧，一般来说是不可能的，这种情况我们认为只要这封邮件邮件与垃圾邮件大致相同就认为是垃圾邮件吧，所以就可以进行如下的扩展</p>
<p>P(d1,d2,…,dn|h+)扩展为：P(d1|h+) <em> P(d2|d1,h+) </em> P(d3|d2,d1,h+) * … ，怎么理解呢？我们依次求概率的乘积，垃圾邮件中出现d1的概率乘以垃圾邮件中包含d1单词的情况下出现d2的概率再乘以垃圾邮件中包含d1、d2单词的情况下出现d3的概率 。。。</p>
<p>现在假设di与di-1是完全条件无关的（朴素贝叶斯假设特征之间是独立且互不影响的），因此就可以简化为 P(d1|h+) <em> P(d2|h+) </em> P(d3|h+) * …</p>
<p>对于P(d1|h+) <em> P(d2|h+) </em> P(d3|h+) * … 只要统计di这个单词在垃圾邮件中出现的频率即可</p>
<p>关于这个假设的问题，两个单词之间肯定是有关系的吧，那是不是假设就错了，结果就有问题呢？其实也不是，我们的假设的目的是为了简化运算，如果这个假设对我们的影响不那么大，我们的这个假设就是有意义的</p>
<h3 id="中文新闻分类实例"><a href="#中文新闻分类实例" class="headerlink" title="中文新闻分类实例"></a>中文新闻分类实例</h3><p>这里我们会给一个新闻数据集，一条新闻包含了类别、主题、URL、内容四项内容</p>
<h4 id="停用词"><a href="#停用词" class="headerlink" title="停用词"></a>停用词</h4><p>对于语言内容的数据的分类，我们需要提取出关键词才能大概知道文本的内容。而对于这种新闻类的数据中会包含很多对数据分析没有任何作用的词，这类词就是停用词，比如：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">！  一下</span><br><span class="line">“   一个</span><br><span class="line">&amp;   一些</span><br><span class="line">￥  一切</span><br><span class="line">（  一天</span><br><span class="line">）  一定</span><br><span class="line">‘   一方面</span><br><span class="line">。。。 。。。</span><br></pre></td></tr></table></figure>
<p>对于停用词表，网络上有一大堆，可以自行搜索后直接使用。</p>
<p>当我们在进行中分类的时候，就将内容中的停用词都给去掉，只留下精简有意义的内容</p>
<h4 id="TF-IDF：关键词提取"><a href="#TF-IDF：关键词提取" class="headerlink" title="TF-IDF：关键词提取"></a>TF-IDF：关键词提取</h4><p>举个例子来说明这是个什么东西。比如我们要对《中国的蜜蜂养殖》进行词频统计，这个就称为TF（Term Frequency）</p>
<script type="math/tex; mode=display">
词频(TF) = \frac{某个词在文章中的出现次数}{该文档的总单词数量}</script><p>当我们去掉其中的停用词后，现在出现了三个词（中国、蜜蜂、养殖），他们出现的次数都是一样多的，这样能说明这三个词的重要性都是一样的么？</p>
<p>“中国”是个很常见的词，这篇叫《中国的蜜蜂养殖》、另一篇可能叫《中国的美食》等等；然后蜜蜂和养殖可能就没有那么常见了，我们要找的是不那么常见的词在当前这篇文章中出现又比较频繁的词，这就是逆文档频率（IDF）</p>
<script type="math/tex; mode=display">
逆文档频率（IDF）=log(\frac{语料库的文档总数}{包含该词的文档数+1})</script><p>在计算某个词是否为关键词使用的公式：</p>
<script type="math/tex; mode=display">
TF-IDF = 词频（TF） \times 逆文档频率</script><p>例如：《中国的蜜蜂养殖》鉴定该文长度为1000个词，“中国“、“蜜蜂”、“养殖”各出现20次，则这三个词的“词频”（TF）都为0.02；搜索Google关键词“的”的网页共有250亿张，假定这就是中文网页总数。包含“中国”的网页共有62.3亿张，包含“蜜蜂”的网页为0.484亿张，包含“养殖”的网页为0.973亿张</p>
<div class="table-container">
<table>
<thead>
<tr>
<th></th>
<th>包含该词的文档数（亿）</th>
<th>IDF</th>
<th>TF-IDF</th>
</tr>
</thead>
<tbody>
<tr>
<td>中国</td>
<td>62.3</td>
<td>0.603</td>
<td>0.0121</td>
</tr>
<tr>
<td>蜜蜂</td>
<td>0.484</td>
<td>2.713</td>
<td>0.0543</td>
</tr>
<tr>
<td>养殖</td>
<td>0.973</td>
<td>2.410</td>
<td>0.0482</td>
</tr>
</tbody>
</table>
</div>
<p>TF-IDF 越大，关键词的重要程度越低 </p>
<h4 id="LDA-主题模型"><a href="#LDA-主题模型" class="headerlink" title="LDA 主题模型"></a>LDA 主题模型</h4><p>拿新闻的例子。现在又一堆新闻数据，我们要将其分成N个主题，比如军事、文化、娱乐等。具体细节后续补充</p>
<p>一个比较好的用于LDA模型分析的库 <code>gensim</code></p>
<h4 id="相似度"><a href="#相似度" class="headerlink" title="相似度"></a>相似度</h4><p>现在有两句话，如果让你去计算他们的相似度，你会如何去计算呢？对于计算机来说，就是个字符串，什么也做不了，我们需要转换成计算机能够识别的样子，这里看个例子：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line">句子A：我喜欢看电视，不喜欢看电影</span><br><span class="line">句子B：我不喜欢看电视，也不喜欢看电影</span><br><span class="line"></span><br><span class="line">分词：</span><br><span class="line">句子A：我/喜欢/看/电视，不/喜欢/看/电影</span><br><span class="line">句子B：我/不/喜欢/看/电视，也/不/喜欢/看/电影</span><br><span class="line"></span><br><span class="line">语料库：[我, 喜欢, 看, 电视, 电影, 不, 也]</span><br><span class="line"></span><br><span class="line">词频：</span><br><span class="line">句子A：我 1, 喜欢 2, 看 2, 电视 1, 电影 1, 不 1, 也 0</span><br><span class="line">句子B：我 1, 喜欢 2, 看 2, 电视 1, 电影 1, 不 2, 也 1</span><br><span class="line"></span><br><span class="line">词频向量：</span><br><span class="line">句子A：[1, 2, 2, 1, 1, 1, 0]</span><br><span class="line">句子B：[1, 2, 2, 1, 1, 2, 1]</span><br><span class="line"></span><br><span class="line">相似度计算：</span><br><span class="line">有了词频向量后就有很多种方式来计算相似度了，比较常见的是余弦相似度计算</span><br></pre></td></tr></table></figure>
<p>余弦相似度</p>
<script type="math/tex; mode=display">
cos\theta = \frac{\sum_{i=1}^n (A_i \times B_i)}{\sqrt{\sum_{i=1}^n (A_i)^2} \times \sqrt{\sum_{i=1}^n{B_i}^2}} = \frac{A \cdot B}{|A| \times |B|}</script><p>进行相似度计算的时候，数据的预处理是很重要的，需要花大量的时间去做数据预处理</p>
<h4 id="使用python完成新闻分类"><a href="#使用python完成新闻分类" class="headerlink" title="使用python完成新闻分类"></a>使用python完成新闻分类</h4><p>数据来源: <a href="http://www.sogou.com/labs/resource/ca.php">搜狗实验室</a></p>
<ol>
<li><p>搜狗实验室下载的数据为xml格式，需要自行转换为pandas能读的格式，假设已经处理好</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line">df_news = pd.read_table(<span class="string">'./data/val.txt'</span>, names=[<span class="string">'category'</span>, <span class="string">'theme'</span>, <span class="string">'URL'</span>, <span class="string">'content'</span>],encoding=<span class="string">'utf-8'</span>)</span><br><span class="line">df_news = df_news.dropna()</span><br></pre></td></tr></table></figure>
</li>
<li><p>分词：使用结巴分词器</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># pip install jieba</span></span><br><span class="line"><span class="keyword">import</span> jieba</span><br><span class="line">content = df_news.content.values.tolist()</span><br><span class="line"><span class="comment"># print(content[1000])</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 将内容分词后存为list of list格式</span></span><br><span class="line">content_S = []</span><br><span class="line"><span class="keyword">for</span> line <span class="keyword">in</span> content:</span><br><span class="line">    current_segment = jieba.lcut(line)</span><br><span class="line">    <span class="keyword">if</span> len(current_segment) &gt; <span class="number">1</span> <span class="keyword">and</span> current_segment != <span class="string">'\r\n'</span>:</span><br><span class="line">        content_S.append(current_segment)</span><br><span class="line"></span><br><span class="line">df_content = pd.DataFrame(&#123;<span class="string">'content_S'</span>: content_S&#125;)</span><br><span class="line"><span class="comment"># print(df_content[1000])</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>清洗：使用停用词表过滤内容</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line">stopwords = pd.read_csv(<span class="string">"stopwords.txt"</span>, index_col=<span class="keyword">False</span>, sep=<span class="string">"\t"</span>, quoting=<span class="number">3</span>, names=[<span class="string">'stopword'</span>], encoding=<span class="string">'utf-8'</span>)</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">drop_stopwords</span><span class="params">(contents, stopwords)</span>:</span></span><br><span class="line">    contents_clean = []</span><br><span class="line">    all_words = []</span><br><span class="line">    <span class="keyword">for</span> line <span class="keyword">in</span> contents:</span><br><span class="line">        line_clean = []</span><br><span class="line">        <span class="keyword">for</span> word <span class="keyword">in</span> line:</span><br><span class="line">            <span class="keyword">if</span> word <span class="keyword">in</span> stopwords:</span><br><span class="line">                <span class="keyword">continue</span></span><br><span class="line">            <span class="comment"># 将过滤后的word放入line_clean中</span></span><br><span class="line">            line_clean.append(word)</span><br><span class="line">            <span class="comment"># 将所有单词都放到all_words列表，后面会使用词云展示</span></span><br><span class="line">            all_words.append(str(word))</span><br><span class="line">        contents_clean.append(line_clean)</span><br><span class="line">    <span class="keyword">return</span> contents_clean, all_words</span><br><span class="line">contents = df_content.content_S.values.tolist()</span><br><span class="line">stopwords = stopwords.stopword.values.tolist()</span><br><span class="line">contents_clean, all_words = drop_stopwords(contents, stopwords)</span><br><span class="line"><span class="comment"># print(contents_clean[1000])</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>将所有的单词的列表进行统计计数，画出词云图（分分类没有关系，可选）</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line">df_all_words = pd.DataFrame(&#123;<span class="string">'all_words'</span>: all_words&#125;)</span><br><span class="line"><span class="comment"># print(df_all_words.head())</span></span><br><span class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line">words_count = df_all_words.groupby(by=[<span class="string">'all_words'</span>])[<span class="string">'all_words'</span>].agg(&#123;<span class="string">"count"</span>: np.size&#125;)</span><br><span class="line">words_count = words_count.reset_index().sort_values(by=[<span class="string">"count"</span>],ascending=<span class="keyword">False</span>)</span><br><span class="line"><span class="comment"># print(words_count.head())</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># pip install wordcloud</span></span><br><span class="line"><span class="comment"># https://github.com/amueller/word_cloud</span></span><br><span class="line"><span class="keyword">from</span> wordcloud <span class="keyword">import</span> WordCloud</span><br><span class="line"><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line">%matplotlib inline</span><br><span class="line"><span class="keyword">import</span> matplotlib</span><br><span class="line">matplotlib.rcParams[<span class="string">'figure.figsize'</span>] = (<span class="number">10.0</span>, <span class="number">5.0</span>)</span><br><span class="line"></span><br><span class="line">wordcloud = WordCloud(font_path=<span class="string">"./data/simhei.ttf"</span>, background_color=<span class="string">"white"</span>, max_font_size=<span class="number">80</span>)</span><br><span class="line">word_frequence = &#123;x[<span class="number">0</span>]:x[<span class="number">1</span>] <span class="keyword">for</span> x <span class="keyword">in</span> words_count.head(<span class="number">100</span>).values&#125;</span><br><span class="line">wordcloud = wordcloud.fit_words(word_frequence)</span><br><span class="line">plt.imshow(wordcloud)</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用 TF-IDF 提取关键词</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> jieba.analyse</span><br><span class="line">index = <span class="number">1000</span></span><br><span class="line">print(df_news[<span class="string">'content'</span>][index])</span><br><span class="line">content_S_str = <span class="string">""</span>.join(content_S[index])</span><br><span class="line"><span class="comment"># topK: 返回前K个词</span></span><br><span class="line">print(<span class="string">" "</span>.join(jieba.analyse.extract_tags(content_S_str, topK=<span class="number">5</span>, withWeight=<span class="keyword">False</span>)))</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用gensim进行LDA主题模型分析</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> gensim</span><br><span class="line"><span class="keyword">from</span> gensim <span class="keyword">import</span> corpora, models, similarities</span><br><span class="line"></span><br><span class="line"><span class="comment"># 制作为字典映射表</span></span><br><span class="line">dictionary = corpora.Dictionary(contents_clean)</span><br><span class="line"><span class="comment"># 每一个新闻都做词袋模型</span></span><br><span class="line">corpus = [dictionary.doc2bow(sentence) <span class="keyword">for</span> sentence <span class="keyword">in</span> contents_clean]</span><br><span class="line"></span><br><span class="line"><span class="comment">#  num_topics 主题数</span></span><br><span class="line">lda = models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=<span class="number">20</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印第一个主题中topK的关键词</span></span><br><span class="line">print(lda.print_topic(<span class="number">1</span>, topn=<span class="number">5</span>))</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印每个topic的关键词及权重</span></span><br><span class="line"><span class="keyword">for</span> topic <span class="keyword">in</span> lda.print_topics(num_topics=<span class="number">20</span>, num_words=<span class="number">5</span>):</span><br><span class="line">    print(topic[<span class="number">1</span>])</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用贝叶斯完成新闻的分类</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br></pre></td><td class="code"><pre><span class="line">df_train = pd.DataFrame(&#123;<span class="string">'contents_clean'</span>: contents_clean, <span class="string">'label'</span>: df_news[<span class="string">'category'</span>]&#125;)</span><br><span class="line"><span class="comment"># print(df_train.tail())</span></span><br><span class="line">df_train.label.unique()</span><br><span class="line"></span><br><span class="line"><span class="comment"># label替换为数字</span></span><br><span class="line">label_mapping = &#123;<span class="string">'汽车'</span>: <span class="number">1</span>, <span class="string">'财经'</span>: <span class="number">2</span>, <span class="string">'科技'</span>: <span class="number">3</span>, <span class="string">'健康'</span>: <span class="number">4</span>, <span class="string">'体育'</span>: <span class="number">5</span>, <span class="string">'教育'</span>: <span class="number">6</span>, <span class="string">'文化'</span>: <span class="number">7</span>, <span class="string">'军事'</span>: <span class="number">8</span>, <span class="string">'娱乐'</span>: <span class="number">9</span>, <span class="string">'时尚'</span>: <span class="number">0</span>&#125;</span><br><span class="line">df_train[<span class="string">'label'</span>] = df_train[<span class="string">'label'</span>].map(label_mapping)</span><br><span class="line">print(df_train.head())</span><br><span class="line"></span><br><span class="line"><span class="comment"># 将数据拆分为训练集与测试集</span></span><br><span class="line"><span class="keyword">from</span> sklearn.model_selection <span class="keyword">import</span> train_test_split</span><br><span class="line">x_train, x_test, y_train, y_test = train_test_split(df_train[<span class="string">'contents_clean'</span>].values, df_train[<span class="string">'label'</span>].values, random_state=<span class="number">1</span>)</span><br><span class="line"><span class="comment"># 将数据转换为sklearn.naive_bayes 要求的格式: 列表的每项为一个字符串，是单词的集合，以空格分割</span></span><br><span class="line">words = []</span><br><span class="line"><span class="keyword">for</span> line_index <span class="keyword">in</span> range(len(x_train)):</span><br><span class="line">    <span class="keyword">try</span>:</span><br><span class="line">        words.append(<span class="string">' '</span>.join(x_train[line_index]))</span><br><span class="line">    <span class="keyword">except</span>:</span><br><span class="line">        print(line_index, word_index)</span><br><span class="line">words[<span class="number">0</span>]</span><br><span class="line"></span><br><span class="line"><span class="comment"># 使用sklearn构造向量，这个构造原理见下一节</span></span><br><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> CountVectorizer</span><br><span class="line">vec = CountVectorizer(analyzer=<span class="string">'word'</span>, max_features=<span class="number">4000</span>, lowercase=<span class="keyword">False</span>)</span><br><span class="line">vec.fit(words)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 上面的words就是构造好的向量值，在这里进行bayes的分类</span></span><br><span class="line"><span class="keyword">from</span> sklearn.naive_bayes <span class="keyword">import</span> MultinomialNB</span><br><span class="line">classifier = MultinomialNB()</span><br><span class="line">classifier.fit(vec.transform(words), y_train)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 对测试集执行同样的预处理</span></span><br><span class="line">test_words = []</span><br><span class="line"><span class="keyword">for</span> line_index <span class="keyword">in</span> range(len(x_test)):</span><br><span class="line">    <span class="keyword">try</span>:</span><br><span class="line">        test_words.append(<span class="string">' '</span>.join(x_test[line_index]))</span><br><span class="line">    <span class="keyword">except</span>:</span><br><span class="line">        print(line_index, word_index)</span><br><span class="line">test_words[<span class="number">0</span>]</span><br><span class="line"></span><br><span class="line"><span class="comment"># 看看使用贝叶斯分类的效果，这里是精度值</span></span><br><span class="line">classifier.score(vec.transform(test_words), y_test)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 上面是基于词频进行分类的，我们还可以根据TF-IDF进行分类，与基于词频的类似</span></span><br><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> TfidfVectorizer</span><br><span class="line"><span class="keyword">from</span> sklearn.naive_bayes <span class="keyword">import</span> MultinomialNB</span><br><span class="line">vectorizer = TfidfVectorizer(analyzer=<span class="string">'word'</span>, max_features=<span class="number">4000</span>, lowercase=<span class="keyword">False</span>)</span><br><span class="line">vectorizer.fit(words)</span><br><span class="line">classifier = MultinomialNB()</span><br><span class="line">classifier.fit(vectorizer.transform(words), y_train)</span><br><span class="line">classifier.score(vectorizer.transform(test_words), y_test)</span><br></pre></td></tr></table></figure>
</li>
<li><p>sklearn构造向量</p>
 <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.feature_extraction.text <span class="keyword">import</span> CountVectorizer</span><br><span class="line">texts = [<span class="string">"dog cat fish"</span>, <span class="string">"dog cat cat"</span>, <span class="string">"fish bird"</span>, <span class="string">"bird"</span>]</span><br><span class="line">cv = CountVectorizer()</span><br><span class="line">cv_fit = cv.fit_transform(texts)</span><br><span class="line"></span><br><span class="line">print(cv.get_feature_names())</span><br><span class="line">print(cv_fit.toarray())</span><br><span class="line">print(cv_fit.toarray().sum(axis=<span class="number">0</span>))</span><br></pre></td></tr></table></figure>
<p> texts有4个元素，每个元素都是以空格分割的字符串，所以放在一起去重后就是可使用的单词集合，这个有个单词”dog cat fish bird”, 那么构造出来的向量就有4列，每个元素表示对应单词出现的次数</p>
</li>
</ol>

          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/11/04/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/11/04/1.html" itemprop="url">机器学习之集成算法</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-11-04T08:12:50+08:00">
                2018-11-04
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/集成算法/" itemprop="url" rel="index">
                    <span itemprop="name">集成算法</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/11/04/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/11/04/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/11/04/1.html" class="leancloud_visitors" data-flag-title="机器学习之集成算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="集成算法"><a href="#集成算法" class="headerlink" title="集成算法"></a>集成算法</h1><h2 id="前言"><a href="#前言" class="headerlink" title="前言"></a>前言</h2><p>本篇博客介绍集成算法。准确的说集成算法并非机器学习中的一种算法，它是将多种基础算法组合在一起的一种方式。</p>
<p>那么集成算法的目的是什么呢？用某种机器学习算法去完成我们的任务不行么？答案是：当然可以啦。可是机器学习的最终目标是结果越准确越好，管他黑猫白猫，能抓老鼠的就是好猫。其实集成算法对我们的机器学习任务有着无与伦比的重要性，就因为使用了集成算法后，往往效果是非常不错的，这就是目的。</p>
<p>总结来说，集成算法通常有三种比较常见的算法，分别是：</p>
<ul>
<li>Bagging</li>
<li>Boosting</li>
<li>Stacking</li>
</ul>
<p>下面一一介绍</p>
<h2 id="Bagging"><a href="#Bagging" class="headerlink" title="Bagging"></a>Bagging</h2><p>Bagging的方式就是并行的训练多个分类器，然后取平均。最典型的代表就是随机森林算法（为决策树模型服务的）</p>
<script type="math/tex; mode=display">
f(x) = \frac{1}{M} \sum_{m=1}^{M} f_{m}(x)</script><p>随机：数据采样的随机与特征选择的随机</p>
<p>森林：很多决策树并行的放在一起</p>
<p>随机森林的优势：</p>
<ol>
<li>它能够处理很高纬度（feature）的数据，并且不用做特征选择</li>
<li>迅雷完成后，它能够给出哪些feature比较重要</li>
<li>容易做成并行化方法，速度比较快</li>
<li><p>可以进行可视化展示，便于分析</p>
<p> 能够知道各个特征的重要程度，通过图表能够表示出来。</p>
<p> 举个例子来说，比如有四个特征A、B、C、D，现在我们想看B特征的重要程度。</p>
<p> 我们就将B特征的数据值随机化，变成一个垃圾值，然后看预测结果。</p>
<p> 如果比垃圾化前差很多，则说明B特征很重要；如果跟垃圾化前差不多，则说明B不重要；就是通过这种方式来看特征的重要程度</p>
</li>
</ol>
<p>除了决策树可以当做Bagging模型的基础模型，其他算法也可以，比如KNN，但是因为KNN很难去随机让泛化能力变强，所以一般不会将KNN作为基础模型。树模型是公认为比较不错的基础模型</p>
<p>随机森林里的树选择多少个合适呢？一般情况下20-100个就差不多，太多也不会有特别明显的提升。</p>
<h2 id="Boosting"><a href="#Boosting" class="headerlink" title="Boosting"></a>Boosting</h2><p>Boosting的方式就是串行的逐渐加强训练，从弱学习区开始，通过加权来进行训练，举个例子：</p>
<p>现在要预测银行给我贷多少钱，要贷1000块。第一棵树预测出来是950，还差50让给第二棵树来训练；假设第二棵树预测出来30，那还差20；再交给第三棵树来训练，一直到效果比较不错为止</p>
<script type="math/tex; mode=display">
F_m(x) = F_{m-1}(x) + argmin_h \sum_{i=1}^{n} L(y_i, F_{m-1}(x_i) + h(x_i))</script><p>Boosting算法中典型的代表是：AdaBoost、Xgboost</p>
<h2 id="Stacking"><a href="#Stacking" class="headerlink" title="Stacking"></a>Stacking</h2><p>Stacking称为堆叠模型，比较暴力，将个各种算法堆在一起得出一个结果，比如将4个不同的算法组合在一起，算出来的值求平均值就是一个非常简单的堆叠算法。</p>
<p>该算法可以堆叠各种各样的分类器（KNN、SVM、RF等等）</p>
<p>分阶段，第一阶段计算出各自的结果，第二阶段再用第一阶段的值</p>
<p>堆叠算法确实能够带来结果的提升，但是速度是一个问题，其经常都是竞赛与论文的神器</p>

          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/10/20/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/10/20/1.html" itemprop="url">机器学习之决策树算法</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-10-20T09:43:45+08:00">
                2018-10-20
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/决策树/" itemprop="url" rel="index">
                    <span itemprop="name">决策树</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/10/20/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/10/20/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/10/20/1.html" class="leancloud_visitors" data-flag-title="机器学习之决策树算法">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="决策树"><a href="#决策树" class="headerlink" title="决策树"></a>决策树</h1><p>这篇博客我们介绍决策树。决策树也是机器学习中非常厉害的一个算法</p>
<p>算法并没有好坏之分，只有哪个算法更适合哪个场景。只要是能解决问题的，且效果好的，就是一种很厉害的算法。决策树既可以做分类也可以做回归。</p>
<p>那么决策树是一个什么样的算法呢？</p>
<blockquote>
<ol>
<li>首先顾名思义，决策树是一个树模型，一个倒挂的树，类似linux的文件树一致</li>
<li>从根节点一步步走到叶子节点（这就是一个决策的过程）</li>
<li>所有的数据最终都会落到叶子节点，既可以做分类也可以做回归</li>
<li>由根节点（第一个选择点）、非叶子节点与分支（中间过程）、叶子节点（最终决策结果）</li>
</ol>
</blockquote>
<p>这些节点都表达了什么含义呢？</p>
<blockquote>
<ol>
<li>节点越多表示数据划分得越细</li>
<li>所有的叶子节点都表示是一个决策后的结果</li>
<li>理论上有几个特征，我们就有几个分支</li>
</ol>
</blockquote>
<h2 id="决策树训练与测试"><a href="#决策树训练与测试" class="headerlink" title="决策树训练与测试"></a>决策树训练与测试</h2><p>训练阶段：从给定的训练集构造一棵树（从根节点开始选择特征，需要考虑怎么选择好的特征）</p>
<p>测试阶段：这个阶段非常容易，将测试数据放到决策树中，从上到下走一遍就好了</p>
<p>主要的工作：找到具有决定性作用的特征，根据决定性作用的程度去构造倒挂树，决定性作用最大的作为根节点，后续类推。怎么判断决定性作用的程度呢，根据熵值。</p>
<h3 id="衡量标准-熵"><a href="#衡量标准-熵" class="headerlink" title="衡量标准-熵"></a>衡量标准-熵</h3><p>熵是物理或者化学中常用的一个词，表示随机变量不确定性的一种度量（表示了物体内部的混乱程度）</p>
<p>通俗解释一下：比如到一个杂货市场去买东西，买大一只铅笔的可能性是很低的，因为东西种类太多，每个种类取到的概率都很小，就说这个熵值很高。去苹果专卖店去买苹果设备的熵值就很低，因为买到的一定是苹果设备</p>
<p>公式：$H(X) = - \sum_{i=1}^n p_i * log{p_i} \text{（i表示：第i个类别）}$ </p>
<img src="/blog/2018/10/20/1/func.png" title="在下采样测试样本下混淆矩阵">
<p>假设现在有两个集合A和B：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">A = &#123;1,1,1,1,1,1,1,1,2,2&#125;</span><br><span class="line">B = &#123;1,2,3,4,5,6,7,8,9,1&#125;</span><br></pre></td></tr></table></figure>
<p>A集合就两个类别，整体的熵值是小于B集合的。那么想象一下，我们的决策树模型的构造过程中，按某个特征划分得时候我们是更想要得到哪种集合的数据呢？必然是A集合吧（为什么？说明分类很有效果啊，如果什么类型的数据都有，还有必要分这个类么）。</p>
<p>比如我们将一个样本集合里的index值也拿来作为一个特征去划分数据，那么这样的熵值肯定很高，用这样的特征去划分数据是没有意义的</p>
<p>不确定性越大，得到的熵值越大，当概率为0或者1的时候，logp就为0，就没有不确定性，熵值最小</p>
<h3 id="从一个天气情况与打球的实例来看决策树的过程"><a href="#从一个天气情况与打球的实例来看决策树的过程" class="headerlink" title="从一个天气情况与打球的实例来看决策树的过程"></a>从一个天气情况与打球的实例来看决策树的过程</h3><ol>
<li>数据如下</li>
</ol>
<div class="table-container">
<table>
<thead>
<tr>
<th>outlook</th>
<th>temperature</th>
<th>humidity</th>
<th>windy</th>
<th>play</th>
</tr>
</thead>
<tbody>
<tr>
<td>sunny</td>
<td>hot</td>
<td>high</td>
<td>FALSE</td>
<td>no</td>
</tr>
<tr>
<td>sunny</td>
<td>hot</td>
<td>high</td>
<td>TRUE</td>
<td>no</td>
</tr>
<tr>
<td>overcast</td>
<td>hot</td>
<td>high</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>rainy</td>
<td>mild</td>
<td>hight</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>rainy</td>
<td>cool</td>
<td>normal</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>rainy</td>
<td>cool</td>
<td>normal</td>
<td>TRUE</td>
<td>no</td>
</tr>
<tr>
<td>overcast</td>
<td>cool</td>
<td>normal</td>
<td>TRUE</td>
<td>yes</td>
</tr>
<tr>
<td>sunny</td>
<td>mild</td>
<td>hight</td>
<td>FALSE</td>
<td>no</td>
</tr>
<tr>
<td>sunny</td>
<td>cool</td>
<td>normal</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>rainy</td>
<td>mild</td>
<td>normal</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>sunny</td>
<td>mild</td>
<td>normal</td>
<td>TRUE</td>
<td>yes</td>
</tr>
<tr>
<td>overcast</td>
<td>mild</td>
<td>high</td>
<td>TRUE</td>
<td>yes</td>
</tr>
<tr>
<td>overcast</td>
<td>hot</td>
<td>normal</td>
<td>FALSE</td>
<td>yes</td>
</tr>
<tr>
<td>rainy</td>
<td>mild</td>
<td>high</td>
<td>TRUE</td>
<td>no</td>
</tr>
</tbody>
</table>
</div>
<ol>
<li>根节点用什么指标来划分呢</li>
</ol>
<p>第一步我们先算一下初始的熵值是多少</p>
<p>在历史数据中（14天）有9点打球、5天不打球，因此可以计算出熵值：</p>
<script type="math/tex; mode=display">
- \frac{9}{14} log_2{\frac{9}{14}} - \frac{5}{14} log_2{\frac{5}{14}} = 0.940</script><p>这里有如下的集中划分方式，我们依次计算一下划分后的熵值</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br></pre></td><td class="code"><pre><span class="line">1. 基于天气的划分</span><br><span class="line">    outlook:                        5/14 * 0.971 + 4/14 * 0 + 5/14 * 9.971 = 0.693</span><br><span class="line">        sunny（5天）</span><br><span class="line">            yes yes no no no        -1 * 2/5 * log2(2/5) - 3/5 * log2(3/5) = 0.971</span><br><span class="line">        overcast（4天）    </span><br><span class="line">            yes yes yes yes         -1 * 1 * log2(1) = 0</span><br><span class="line">        rainy（5天）</span><br><span class="line">            yes yes yes no no       -1 * 3/5 * log2(3/5) - 2/5 * log2(2/5) = 0.971</span><br><span class="line">2. 基于温度的划分</span><br><span class="line">    temperature:                    0</span><br><span class="line">        hot</span><br><span class="line">            yes yes no no </span><br><span class="line">        mild</span><br><span class="line">            yes yes yes yes no no</span><br><span class="line">        cool</span><br><span class="line">            yes yes yes no</span><br><span class="line">3. 基于湿度的划分</span><br><span class="line">    humidity</span><br><span class="line">        high</span><br><span class="line">            yes yes yes no no no no</span><br><span class="line">        normal</span><br><span class="line">            yes yes yes yes yes yes no</span><br><span class="line">4. 基于有风的划分</span><br><span class="line">    windy</span><br><span class="line">        false</span><br><span class="line">            yes yes yes yes yes yes no no</span><br><span class="line">        true</span><br><span class="line">            yes yes yes no no no</span><br></pre></td></tr></table></figure>
<h3 id="信息增益"><a href="#信息增益" class="headerlink" title="信息增益"></a>信息增益</h3><p>信息增益：表示特征X使得类Y的不确定性减少的程度（希望使用特征X分类后是同类的在一起）</p>
<p>从上面的例子我们可以得出，当我们基于天气进行划分时，信息增益为：0.940 - 0.693 = 0.247</p>
<p>同理可以得出其他分类的信息增益：gain(temperature)=0.029  gain(humidity)=0.152  gain(windy)=0.048</p>
<h3 id="关于计算熵值的算法"><a href="#关于计算熵值的算法" class="headerlink" title="关于计算熵值的算法"></a>关于计算熵值的算法</h3><p>考虑一下上面计算熵值的方法，我们根据每个特征计算了熵值，假设现在有一个特征是id值，依次增大的一个数，我们要以id作为特征，那么可以算出来熵值为0，增益是最大的，但是这个划分毫无意义</p>
<p>这种算法叫ID3算法，这个算法是有一定问题的，所以呢还是有很多其他评估熵值的算法</p>
<ol>
<li>ID3</li>
<li>C4.5：信息增益率（解决ID3问题，考虑自身熵）</li>
<li>使用GINI系数来做衡量标准</li>
</ol>
<p>GINI系数：$Gini(p) = \sum_{k=1}^K p_k(1-p_k) = 1 - \sum_{k=1}^K p_k^2$</p>
<h3 id="关于连续值特征的计算"><a href="#关于连续值特征的计算" class="headerlink" title="关于连续值特征的计算"></a>关于连续值特征的计算</h3><p>上面的例子都是一些离散值，而很多时候我们的数据都是连续值，那应该怎么处理呢？将连续值进行离散化</p>
<p>将连续数据进行排序，然后进行二分，可以得到离散数据，比如一堆数据从1到100的连续值，我们可以拿50当做分界点，就变成量两个离散值</p>
<h2 id="决策树剪枝策略"><a href="#决策树剪枝策略" class="headerlink" title="决策树剪枝策略"></a>决策树剪枝策略</h2><p>为什么要剪枝？决策树的过拟合风险非常大，理论上可以完全分得开数据，如果树足够庞大，每个叶子节点都是一个数据，但这样的树是没有意义的</p>
<h3 id="减枝策略"><a href="#减枝策略" class="headerlink" title="减枝策略"></a>减枝策略</h3><ol>
<li><p>预剪枝</p>
<p> 边建立决策树，边进行剪枝的操作（更实用）</p>
<p> 比如我们可以限制特征的数量（控制树的深度）、限制叶子节点个数、叶子节点样本数、信息增益量</p>
</li>
<li><p>后减枝</p>
<p> 建立完决策树后进行剪枝操作，通过一定的衡量标准</p>
</li>
</ol>
<script type="math/tex; mode=display">
C_{\alpha}(T)=C(T) + \alpha |T{leaf}|</script><p>叶子节点越多，损失越大</p>
<h2 id="实践"><a href="#实践" class="headerlink" title="实践"></a>实践</h2><p>我们使用两种方式去实践，一个使用sklearn库，一个直接使用python</p>
<h3 id="使用sklean实践决策树"><a href="#使用sklean实践决策树" class="headerlink" title="使用sklean实践决策树"></a>使用sklean实践决策树</h3><p>sklean 参数详解</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line">1. criterion gini or entropy    指定评判标准</span><br><span class="line">2. splitter best or random 前者是在所有特征中找最好的切分点 后缀是在部分特征中（数据量大的时候）</span><br><span class="line">3. max_features None（所有），log2，sqrt，N特征小于50的时候一般使用所有的</span><br><span class="line">4. max_depth 数据少或者特征少的时候可以不管这个值，如果模型样本量多，特征也多的情况下，可以尝试限制一下</span><br><span class="line">5. min_samples_split 如果某节点的样本数少于min_samples_split，则不会继续再尝试选择最优特征来进行划分，如果样本量不大，不需要管这个值，如果样本量数量级非常大，则推荐增大这个值</span><br><span class="line">6. min_samples_leaf 这个值限制了叶子节点最少的样本数，如果某叶子节点数目小于样本数，则会和兄弟节点一起被剪枝，如果样本量不大，不需要管这个值，大些如10W可以尝试下5</span><br><span class="line">7. min_weight_fraction_leaf 这个值限制了叶子节点所有样本权重和的最小值，如果小于这个值，则会和兄弟节点一起被剪枝默认是0，就是不考虑权重问题。一般来说，如果我们有较多样本有缺失值，或者分类树样本的分布类别偏差很大，就会引入样本权重。这时我们就要注意这个值了。</span><br><span class="line">8. max_leaf_nodes 通过限制最大叶子节点数，可以防止过拟合，默认是&quot;None&quot;，即不限制最大的叶子节点数。如果加了限制，算法会建立在最大叶子节点数内最优的决策树。如果特征不多，可以考虑这个值，但是如果特征分成多的话，可以加以限制具体的值可以通过检查验证得到</span><br><span class="line">9. class_weight 指定样本各类别的权重，主要为了防止训练集某些类别的样本过多导致训练的决策树过于偏向这些类别，这里可以自己指定各个样本的权重如果使用&quot;balanced&quot;，则算法会自己计算权重，样本量少的类别所对应的样本权重会高</span><br><span class="line">10. min_impurity_split 这个值限制了决策树的增长，如果某节点的不纯度（基尼系数、信息增益、均方差，绝对差）小于这个阈值则该节点不再生成子节点。即为叶子节点</span><br><span class="line">11. n_estimators 要建立数的个数</span><br></pre></td></tr></table></figure>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># brew install graphviz</span></span><br><span class="line">%matplotlib inline</span><br><span class="line"><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="keyword">from</span> sklearn.datasets.california_housing <span class="keyword">import</span> fetch_california_housing</span><br><span class="line">housing = fetch_california_housing()    <span class="comment"># 使用sklean内置房屋数据集</span></span><br><span class="line">housing.data.shape  <span class="comment"># 数据部分在data里，可以看下数据的大小规格</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">from</span> sklearn <span class="keyword">import</span> tree</span><br><span class="line">dtr = tree.DecisionTreeRegressor(max_depth=<span class="number">2</span>)</span><br><span class="line"><span class="comment"># 分别传入x与y的数据</span></span><br><span class="line">dtr.fit(housing.data[:, [<span class="number">6</span>,<span class="number">7</span>]], housing.target)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 构造可视化数据</span></span><br><span class="line">dot_data = tree.export_graphviz(</span><br><span class="line">    dtr,</span><br><span class="line">    out_file = <span class="keyword">None</span>,</span><br><span class="line">    feature_names = housing.feature_names[<span class="number">6</span>:<span class="number">8</span>],</span><br><span class="line">    filled = <span class="keyword">True</span>,</span><br><span class="line">    impurity = <span class="keyword">False</span>,</span><br><span class="line">    rounded = <span class="keyword">True</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 构造图形</span></span><br><span class="line"><span class="comment"># php install pydotplus</span></span><br><span class="line"><span class="keyword">import</span> pydotplus</span><br><span class="line">graph = pydotplus.graph_from_dot_data(dot_data)</span><br><span class="line">graph.get_nodes()[<span class="number">7</span>].set_fillcolor(<span class="string">"#FFF2DD"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 展示</span></span><br><span class="line"><span class="keyword">from</span> IPython.display <span class="keyword">import</span> Image</span><br><span class="line">Image(graph.create_png())</span><br><span class="line"></span><br><span class="line"><span class="comment"># 保存</span></span><br><span class="line">graph.write_png(<span class="string">"dtr_white_background.png"</span>)</span><br></pre></td></tr></table></figure>
<p>使用sklearn评估这些参数选择的效果<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.model_selection <span class="keyword">import</span> train_test_split</span><br><span class="line">data_train, data_test, target_train, target_test = train_test_split(housing.data, housing.target, test_size=<span class="number">0.1</span>, random_state=<span class="number">0</span>)</span><br><span class="line">dtr = tree.DecisionTreeRegressor(random_state=<span class="number">0</span>)</span><br><span class="line">dtr.fit(data_train, target_train)</span><br><span class="line">dtr.score(data_test, target_test)</span><br></pre></td></tr></table></figure></p>
<p>决策树模型中有非常多的参数，我们往往需要尝试各种不同的参数，然后看看效果如何，就是拿各种参数组合来泡一下；非常庆幸的是sklean帮我们想到了这一点，它提供了grid_search工具帮我们做这一点<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.grid_search <span class="keyword">import</span> GridSearchCV</span><br><span class="line"><span class="keyword">from</span> sklearn.ensemble <span class="keyword">import</span> RandomForestRegressor</span><br><span class="line">tree_param_grid = &#123;<span class="string">'min_samples_split'</span>:list((<span class="number">3</span>,<span class="number">6</span>,<span class="number">9</span>)), <span class="string">'n_estimators'</span>:list((<span class="number">10</span>,<span class="number">50</span>,<span class="number">100</span>))&#125;</span><br><span class="line"><span class="comment"># 第一个参数指定算法实例，第二个参数指定参数字典，第三个参数表示训练集的拆分个数，用于交叉验证</span></span><br><span class="line">grid = GridSearchCV(RandomForestRegressor(),param_grid=tree_param_grid, cv=<span class="number">5</span>)</span><br><span class="line">grid.fit(data_train, target_train)</span><br><span class="line">grid.grid_scores_, grid.best_params_, grid.best_score_</span><br></pre></td></tr></table></figure></p>
<h3 id="自实现一个决策树算法"><a href="#自实现一个决策树算法" class="headerlink" title="自实现一个决策树算法"></a>自实现一个决策树算法</h3><p>sklearn帮我们做了很多工作，使用起来也是非常方便，但是这一小节我们还是自己实现一个决策树算法，以便更深入的了解</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># @todo</span></span><br></pre></td></tr></table></figure>
          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/10/18/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/10/18/1.html" itemprop="url">机器学习之其他细节</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-10-18T22:44:13+08:00">
                2018-10-18
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/其他细节/" itemprop="url" rel="index">
                    <span itemprop="name">其他细节</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/10/18/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/10/18/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/10/18/1.html" class="leancloud_visitors" data-flag-title="机器学习之其他细节">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="机器学习之其他细节"><a href="#机器学习之其他细节" class="headerlink" title="机器学习之其他细节"></a>机器学习之其他细节</h1><p>这一篇文章将以一个信用卡异常检测为例，介绍机器学习中一些其他非常重要的点，如样本不均、交叉验证、正则化惩罚、混淆矩阵等问题</p>
<h2 id="熟悉数据"><a href="#熟悉数据" class="headerlink" title="熟悉数据"></a>熟悉数据</h2><p><a href="https://pan.baidu.com/s/1UXoZkgjBF7Ye7a2-pEjz3A">信用卡数据</a> 密码:uhst</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line">%matplotlib inline</span><br><span class="line"></span><br><span class="line">data = pd.read_csv(<span class="string">"creditcard.csv"</span>)</span><br><span class="line">print(data.head())</span><br><span class="line">print(data.info())</span><br></pre></td></tr></table></figure>
<p>总共有31个指标，284807条样本，其中v1到v28的指标都是标准化后的数据</p>
<ol>
<li>Time指标对信用卡检测没用，直接去掉</li>
<li>Amount值没有做过处理，需要单独标准化</li>
<li>Class指标表示当前样本是否是一个异常样本</li>
<li>我们的任务是通过逻辑回归建立一个分类的任务，检测信用卡是否异常</li>
<li>看一下样本分布情况（异常样本与正常样本分布情况）</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">count_classes = pd.value_counts(data[<span class="string">'Class'</span>], sort=<span class="keyword">True</span>).sort_index()</span><br><span class="line">count_classes.plot(kind=<span class="string">'bar'</span>)</span><br><span class="line">plt.title(<span class="string">"Fraud class histogram"</span>)</span><br><span class="line">plt.xlabel(<span class="string">"class"</span>)</span><br><span class="line">plt.ylabel(<span class="string">"Frequency"</span>)</span><br></pre></td></tr></table></figure>
<p>可以看到绝大部分的样本都是正样本，极少数是负样本，在实际生活中，这也是非常常见的一种情况，毕竟坏人还是极少数的嘛</p>
<ol>
<li>将Amount数据进行标准化（让所有的指标具有相同的分布范围，消除因不同类型数据的分布差异让计算机有“偏见”）</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.preprocessing <span class="keyword">import</span> StandardScaler</span><br><span class="line"><span class="comment"># 注意这里的reshape(-1, 1)，-1，1表示重组后的行自动计算，列设置为1，比如：[3,2].reshape(-1, 3)，后会变成 [2, 3]</span></span><br><span class="line">data[<span class="string">'normAmount'</span>] = StandardScaler().fit_transform(data[<span class="string">'Amount'</span>].values.reshape(<span class="number">-1</span>, <span class="number">1</span>))</span><br><span class="line"><span class="comment"># normAmount为标准化后的Amount列，Time与Amount指标就可以删除了</span></span><br><span class="line">data = data.drop([<span class="string">'Time'</span>, <span class="string">'Amount'</span>], axis=<span class="number">1</span>)</span><br><span class="line">data.head()</span><br></pre></td></tr></table></figure>
<h2 id="问题"><a href="#问题" class="headerlink" title="问题"></a>问题</h2><h3 id="样本不均问题"><a href="#样本不均问题" class="headerlink" title="样本不均问题"></a>样本不均问题</h3><p>我们已经熟悉了数据，看到了样本极度不均，正样本（Class为1）：284315；负样本（Class为0）：492</p>
<p>对于这种情况一般有两种方式：下采样与过采样</p>
<p>下采样：让两个样本同样少</p>
<p>过采样：让两个样本同样多，将少的样本生成更多</p>
<p>这里我们先使用下采样的方式取数据：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line">X = data.iloc[:, data.columns != <span class="string">'Class'</span>]</span><br><span class="line">y = data.iloc[:, data.columns == <span class="string">'Class'</span>]</span><br><span class="line"></span><br><span class="line">number_records_fraud = len(data[data.Class == <span class="number">1</span>])</span><br><span class="line">fraud_indices = data[data.Class == <span class="number">1</span>].index</span><br><span class="line">normal_indices = data[data.Class == <span class="number">0</span>].index</span><br><span class="line"></span><br><span class="line">random_normal_indices = np.random.choice(normal_indices, number_records_fraud, replace=<span class="keyword">False</span>)</span><br><span class="line">under_sample_indices = np.concatenate([fraud_indices, random_normal_indices])</span><br><span class="line">under_sample_data = data.iloc[under_sample_indices, :]</span><br><span class="line">X_undersample = under_sample_data.iloc[:, under_sample_data.columns != <span class="string">'Class'</span>]</span><br><span class="line">y_undersample = under_sample_data.iloc[:, under_sample_data.columns == <span class="string">'Class'</span>]</span><br><span class="line">print(<span class="string">"Percentage of normal transactions: "</span>, len(under_sample_data[under_sample_data.Class == <span class="number">0</span>]) / len(under_sample_data))</span><br><span class="line">print(<span class="string">"Percentage of fraud transactions: "</span>, len(under_sample_data[under_sample_data.Class == <span class="number">1</span>]) / len(under_sample_data))</span><br><span class="line">print(<span class="string">"Total number of transactions in resampled data: "</span>, len(under_sample_data))</span><br><span class="line"><span class="comment"># Percentage of normal transactions:  0.5</span></span><br><span class="line"><span class="comment"># Percentage of fraud transactions:  0.5</span></span><br><span class="line"><span class="comment"># Total number of transactions in resampled data:  984</span></span><br></pre></td></tr></table></figure>
<p>我们从正样本中随机选择了和负样本同样数量的样本，并组合在一起为一个下采样动作</p>
<h3 id="交叉验证"><a href="#交叉验证" class="headerlink" title="交叉验证"></a>交叉验证</h3><p>当我们在做一个机器学习任务的时候都是将原始数据切分为训练集与测试集，比如训练集（80%），测试集（20%），其中的20%测试的测试集非常宝贵，用于最后的模型评估，我们这里要说的交叉验证是不涉及测试集的</p>
<p>另一方面，当我们在进行模型训练的时候，我们需要一步一步的选择更好的参数去拟合我们的数据，这需要一个评判标准，这个时候我们会再次将我们的训练数据切分为几个部分（为了更好说明，假如就三个部分），我们会分别做三次训练，最后取一个参数的均值。这三次分别为：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">1. a + b &lt;==&gt; c</span><br><span class="line">2. a + c &lt;==&gt; b</span><br><span class="line">3. b + c &lt;==&gt; a</span><br></pre></td></tr></table></figure>
<p>这里的a b c 分别各做了一次验证集，这么一个过程称为交叉验证，下面我们使用sklean工具包帮我们完成切分</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> sklearn.cross_validation <span class="keyword">import</span> train_test_split</span><br><span class="line"><span class="comment"># 既然都说了使用下采样了，其余数据就应该扔掉了，怎么还要拿来用呢？</span></span><br><span class="line"><span class="comment"># 主要原因是下采样的数据比较少，分布规则可能不惧代表性，因此会拿原始数据集来进行测试</span></span><br><span class="line">X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=<span class="number">0.3</span>, random_state=<span class="number">0</span>)</span><br><span class="line">print(<span class="string">"Number transactions train dataset: "</span>, len(X_train))</span><br><span class="line">print(<span class="string">"Number transactions test dataset: "</span>, len(X_test))</span><br><span class="line">print(<span class="string">"Total number of transactions: "</span>, len(X_train) + len(X_test))</span><br><span class="line">X_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample = train_test_split(X_undersample, y_undersample, test_size=<span class="number">0.3</span>, random_state=<span class="number">0</span>)</span><br><span class="line">print(<span class="string">""</span>)</span><br><span class="line">print(<span class="string">"Number transactions train dataset: "</span>, len(X_train_undersample))</span><br><span class="line">print(<span class="string">"Number transacyions test dataset: "</span>, len(X_test_undersample))</span><br><span class="line">print(<span class="string">"Total number of transactions: "</span>, len(X_train_undersample) + len(X_test_undersample))</span><br></pre></td></tr></table></figure>
<h3 id="模型评估方法"><a href="#模型评估方法" class="headerlink" title="模型评估方法"></a>模型评估方法</h3><p>这里我们讨论一下，我们的模型评估标准，怎么样的结果算是比较满意的结果呢？</p>
<p>这里我们举一个例子：</p>
<blockquote>
<p>将设有1000个病人（990个正样本，10个负样本）的样本信息，我们要建立模型预测病人是否得了癌症，我们使用精度来判断模型好坏。<br>假设我们的模型把全部样本都预测为正样本（没有得癌症），那么这个时候我们的模型的精度是多少呢？990/1000=99%<br>但是这个时候，我们的模型有用吗？一点用都没有嘛，因此这种评估方法是非常有问题的</p>
</blockquote>
<p>特别是针对正负样本严重不均的情况下，这种精度的评估方法非常烂，这个时候一般使用一个叫做<code>recall(召回率)</code>的评估方法，怎么理解呢？</p>
<blockquote>
<p>还是以上面检测癌症的例子，我们现在不检测正样本的精度，而是检测负样本的精度，及求10个癌症病人中，我们检测出来了几个癌症病人。</p>
</blockquote>
<p>这里给出Recall的公式：<code>Recall = TP / (TP + FN)</code>，这里有的解释一下了</p>
<h4 id="关于Recall的计算的几个概念"><a href="#关于Recall的计算的几个概念" class="headerlink" title="关于Recall的计算的几个概念"></a>关于Recall的计算的几个概念</h4><blockquote>
<p>假如某个班级有男生80人，女生20人，共计100人，目标是找出所有女生<br>现在某个挑选出50个人，其中20个人士女生，另外还错误的把30个男生也当做女生选了出来</p>
</blockquote>
<div class="table-container">
<table>
<thead>
<tr>
<th style="text-align:center"></th>
<th>相关（Relevant），正类</th>
<th>无关（NonRelevant），负类</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center">被检索到<br>（Retrieved）</td>
<td>true positives（TP 正类判定为正类，例子中就是正确的判定为女生）</td>
<td>false positives（FP 负类判定为正类，例子中就是将男生判断为女生）</td>
</tr>
<tr>
<td style="text-align:center">未被检索到<br>（Not Retrieved）</td>
<td>false negatives（FN 正类判定为负累，“去真”，例子中就是，将女生判定为男生）</td>
<td>true negatives（TN 负类判定为负类，例子中就是吧男生判定为男生）</td>
</tr>
</tbody>
</table>
</div>
<p>通过上面这种病，我们可以非常容易得到Recall值：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">TP = 20</span><br><span class="line">FP = 30</span><br><span class="line">FN = 0</span><br><span class="line">TN = 50</span><br><span class="line">Recall = TP / (TP + FN) = 20 / (20 + 50)</span><br></pre></td></tr></table></figure>
<p>对于实现来说，sklearn已经帮我们做好了</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"></span><br><span class="line"></span><br></pre></td></tr></table></figure>
<h3 id="正则化惩罚"><a href="#正则化惩罚" class="headerlink" title="正则化惩罚"></a>正则化惩罚</h3><p>假设现在对于一个任务，我们有两个模型，A和B。</p>
<p>A模型的各个参数值变化浮动比较大，B模型的各个参数值变化浮动比较小，但是模型在训练集上的效果是一样的，这个时候我们需要有一种方法将B模型选择出来（为什么要选择参数浮动小的模型：模型更稳定，除了你和测试数据效果不错外，还要能完美拟合真实数据）</p>
<p>对于这种情况，在机器学习里面，常用两种名为正则化惩罚的方式（L1惩罚项与L2惩罚项）。</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"># 权重参数的绝对值</span><br><span class="line">L1: loss + |w|</span><br><span class="line"></span><br><span class="line"># 权重参数的平方</span><br><span class="line">L2: loss + 1/2 x w^2</span><br><span class="line"></span><br><span class="line"># 还有一个惩罚力度 alpha，表示惩罚力度</span><br><span class="line"></span><br><span class="line"># 如果使用sklean，在使用逻辑回归工具的时候，会让输入惩罚力度与惩罚方法</span><br><span class="line"></span><br><span class="line">from sklearn.linear_model import LogisticRegression</span><br><span class="line">lr = LogisticRegression(C = 0.01, penalty = &apos;l1&apos;)</span><br></pre></td></tr></table></figure>
<p>这里我们结合信用卡欺诈的例子，以recall值为判断标准，比对一下不同的惩罚力度对结果的影响</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">printing_Kfold_scores</span><span class="params">(x_train_data, y_train_data)</span>:</span></span><br><span class="line">    <span class="comment"># 交叉验证的数据切分，这里按y_train_data的长度将训练集切分为5分，进行交叉验证</span></span><br><span class="line">    fold = KFold(len(y_train_data), <span class="number">5</span>, shuffle=<span class="keyword">False</span>)</span><br><span class="line">    c_param_range = [<span class="number">0.01</span>, <span class="number">0.1</span>, <span class="number">1</span>, <span class="number">10</span>, <span class="number">100</span>]</span><br><span class="line">    results_table = pd.DataFrame(index=range(len(c_param_range), <span class="number">2</span>), columns=[<span class="string">'C_parameter'</span>, <span class="string">'Mean recall score'</span>])</span><br><span class="line">    results_table[<span class="string">'C_parameter'</span>] = c_param_range</span><br><span class="line">    </span><br><span class="line">    j = <span class="number">0</span></span><br><span class="line">    <span class="keyword">for</span> c_param <span class="keyword">in</span> c_param_range:</span><br><span class="line">        print(<span class="string">'-----------------------------'</span>)</span><br><span class="line">        print(<span class="string">'C parameter: '</span>, c_param)</span><br><span class="line">        print(<span class="string">'-----------------------------'</span>)</span><br><span class="line">        print(<span class="string">''</span>)</span><br><span class="line">        recall_accs = []</span><br><span class="line">        <span class="keyword">for</span> iteration, indices <span class="keyword">in</span> enumerate(fold, start=<span class="number">1</span>):</span><br><span class="line">            <span class="comment"># 遍历fold，indices[0]为交叉验证的训练集，indices[1]为交叉验证的测试集</span></span><br><span class="line">            lr = LogisticRegression(C = c_param, penalty = <span class="string">'l1'</span>)</span><br><span class="line">            lr.fit(x_train_data.iloc[indices[<span class="number">0</span>], :], y_train_data.iloc[indices[<span class="number">0</span>], :].values.ravel())</span><br><span class="line">            <span class="comment"># 拿交叉验证的测试集去预测结果</span></span><br><span class="line">            y_pred_undersample = lr.predict(x_train_data.iloc[indices[<span class="number">1</span>], :].values)</span><br><span class="line">            <span class="comment"># 当次交叉验证的recall值</span></span><br><span class="line">            recall_acc = recall_score(y_train_data.iloc[indices[<span class="number">1</span>],:].values, y_pred_undersample)</span><br><span class="line">            recall_accs.append(recall_acc)</span><br><span class="line">            print(<span class="string">'Iteration '</span>, iteration, <span class="string">': recall score = '</span>, recall_acc)</span><br><span class="line">        </span><br><span class="line">        results_table.loc[j, <span class="string">'Mean recall score'</span>] = np.mean(recall_accs)</span><br><span class="line">        j += <span class="number">1</span></span><br><span class="line">        print(<span class="string">''</span>)</span><br><span class="line">        print(<span class="string">'Mean recall score '</span>, np.mean(recall_accs))</span><br><span class="line">        print(<span class="string">''</span>)</span><br><span class="line">        </span><br><span class="line">    <span class="comment"># 这里需要将这个字段的值类型转为float64，在程序运行过程莫名其妙就转成了object</span></span><br><span class="line">    results_table[<span class="string">'Mean recall score'</span>] = results_table[<span class="string">'Mean recall score'</span>].astype(<span class="string">'float64'</span>)</span><br><span class="line">    </span><br><span class="line">    <span class="comment"># 求效果最好的惩罚力度值</span></span><br><span class="line">    best_c = results_table.loc[results_table[<span class="string">'Mean recall score'</span>].idxmax()][<span class="string">'C_parameter'</span>]</span><br><span class="line">    print(<span class="string">'******************************************************'</span>)</span><br><span class="line">    print(<span class="string">'Best model to choose from cross validation is with C parameter = '</span>, best_c)</span><br><span class="line">    print(<span class="string">'******************************************************'</span>)</span><br><span class="line">    <span class="keyword">return</span> best_c</span><br><span class="line"></span><br><span class="line">printing_Kfold_scores(X_train_undersample, y_train_undersample)</span><br><span class="line"><span class="comment"># 达到结果，比较好的惩罚力度是0.01</span></span><br><span class="line"><span class="comment"># 通过结果也能看到在交叉验证的过程中某些结果的差异还是蛮大的，可能差10个百分点，所以使用交叉验证求平均的方式还是非常有用的</span></span><br></pre></td></tr></table></figure>
<p>为了更好的看到下采样的优势，我们对比一下，直接用原始数据计算recall值：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">printing_Kfold_scores(X_train_undersample, y_train_undersample)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 两种方式的结果可以看到，直接使用原始数据进行计算，recall值都在0.6左右，而使用下采样能够达到0.9</span></span><br></pre></td></tr></table></figure>
<h3 id="混淆矩阵"><a href="#混淆矩阵" class="headerlink" title="混淆矩阵"></a>混淆矩阵</h3><p>混淆矩阵就是展示与Recall相关的几个指标的图形，这里我们拿上一段计算出来的best_c，来看一下我们的混淆矩阵长什么样</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">plot_confusion_matrix</span><span class="params">(cm, classes, title=<span class="string">'Confusion matrix'</span>, cmap=plt.cm.Blues)</span>:</span></span><br><span class="line">    plt.imshow(cm, interpolation=<span class="string">'nearest'</span>, cmap=cmap)</span><br><span class="line">    plt.title(title)</span><br><span class="line">    plt.colorbar()</span><br><span class="line">    tick_marks = np.arange(len(classes))</span><br><span class="line">    plt.xticks(tick_marks, classes, rotation=<span class="number">0</span>)</span><br><span class="line">    plt.yticks(tick_marks, classes)</span><br><span class="line">    thresh = cm.max() / <span class="number">2</span></span><br><span class="line">    <span class="keyword">for</span> i, j <span class="keyword">in</span> itertools.product(range(cm.shape[<span class="number">0</span>]), range(cm.shape[<span class="number">1</span>])):</span><br><span class="line">        plt.text(j, i, cm[i, j], horizontalalignment=<span class="string">"center"</span>, color=<span class="string">"white"</span> <span class="keyword">if</span> cm[i, j] &gt; thresh <span class="keyword">else</span> <span class="string">"black"</span>)</span><br><span class="line">    plt.tight_layout()</span><br><span class="line">    plt.ylabel(<span class="string">'True label'</span>)</span><br><span class="line">    plt.xlabel(<span class="string">'Predicted label'</span>)</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> itertools</span><br><span class="line">lr = LogisticRegression(C=best_c, penalty=<span class="string">'l1'</span>)</span><br><span class="line">lr.fit(X_train_undersample, y_train_undersample.values.ravel())</span><br><span class="line">y_pred_undersample = lr.predict(X_test_undersample.values)</span><br><span class="line">cnf_matrix = confusion_matrix(y_test_undersample, y_pred_undersample)</span><br><span class="line">np.set_printoptions(precision=<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line">print(<span class="string">"Recall metrix in the testing dataset: "</span>, cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]/(cnf_matrix[<span class="number">1</span>,<span class="number">0</span>]+cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]))</span><br><span class="line">class_names = [<span class="number">0</span>,<span class="number">1</span>]</span><br><span class="line">plt.figure()</span><br><span class="line">plot_confusion_matrix(cnf_matrix, classes=class_names, title=<span class="string">'Confusion matrix'</span>)</span><br><span class="line">plt.show()</span><br></pre></td></tr></table></figure>
<p>得到如下结果：</p>
<img src="/blog/2018/10/18/1/matrix.png" title="在下采样测试样本下混淆矩阵">
<p>其中y轴表示样本真实的情况，x轴表示模型预测的情况</p>
<p>通过混淆矩阵我们非常容易计算出recall值，同时精度值也非常容易求出来</p>
<p>这个举证我们是在下采样数据集中测试的，但是为了更好的评估模型的好坏，我们需要再所有数据的测试集上去测试，我们看下结果吧（只需要改一下预测的数据即可）：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> itertools</span><br><span class="line">lr = LogisticRegression(C=best_c, penalty=<span class="string">'l1'</span>)</span><br><span class="line">lr.fit(X_train_undersample, y_train_undersample.values.ravel())</span><br><span class="line">y_pred = lr.predict(X_test.values)</span><br><span class="line">cnf_matrix = confusion_matrix(y_test, y_pred)</span><br><span class="line">np.set_printoptions(precision=<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line">print(<span class="string">"Recall metrix in the testing dataset: "</span>, cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]/(cnf_matrix[<span class="number">1</span>,<span class="number">0</span>]+cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]))</span><br><span class="line">class_names = [<span class="number">0</span>,<span class="number">1</span>]</span><br><span class="line">plt.figure()</span><br><span class="line">plot_confusion_matrix(cnf_matrix, classes=class_names, title=<span class="string">'Confusion matrix'</span>)</span><br><span class="line">plt.show()</span><br></pre></td></tr></table></figure>
<img src="/blog/2018/10/18/1/matrix-new.png" title="在原始测试样本下的混淆矩阵">
<p>你能看出有什么区别么？</p>
<p>对于计算Recall（Recall = TP / (TP + FN)）值来说，变化不大，但是我们的误杀率特别高，为了检测这135个异常样本，结果多误杀了8662个样本</p>
<p>通过对比我们能够看到，对于下采样获取数据集来说，虽然我们能够满足Recall值的要求，但是误杀就太多太多了</p>
<h3 id="不同阈值对recall值的影响"><a href="#不同阈值对recall值的影响" class="headerlink" title="不同阈值对recall值的影响"></a>不同阈值对recall值的影响</h3><p>还记得之前的介绍的，逻辑回归时间预测的结果值转换为概率么？默认情况下我们将概率大于0.5的认为是异常信用卡，那有没有考虑这个0.5可以变化一下呢？这里我们还是对比一下不同的阈值对recall值的影响</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line">lr = LogisticRegression(C=<span class="number">0.01</span>, penalty=<span class="string">'l1'</span>)</span><br><span class="line">lr.fit(X_train_undersample, y_train_undersample.values.ravel())</span><br><span class="line"><span class="comment"># 预测出来的是概率值</span></span><br><span class="line">y_pred_undersample_proba = lr.predict_proba(X_test_undersample.values)</span><br><span class="line">thresholds = [<span class="number">0.1</span>,<span class="number">0.2</span>,<span class="number">0.3</span>,<span class="number">0.4</span>,<span class="number">0.5</span>,<span class="number">0.6</span>,<span class="number">0.7</span>,<span class="number">0.8</span>,<span class="number">0.9</span>]</span><br><span class="line">plt.figure(figsize=(<span class="number">10</span>,<span class="number">10</span>))</span><br><span class="line">j = <span class="number">1</span></span><br><span class="line"><span class="keyword">for</span> i <span class="keyword">in</span> thresholds:</span><br><span class="line">    y_test_predictions_hight_recall = y_pred_undersample_proba[:,<span class="number">1</span>] &gt; i</span><br><span class="line">    plt.subplot(<span class="number">3</span>,<span class="number">3</span>,j)</span><br><span class="line">    j += <span class="number">1</span></span><br><span class="line">    cnf_matrix = confusion_matrix(y_test_undersample, y_test_predictions_hight_recall)</span><br><span class="line">    np.set_printoptions(precision=<span class="number">2</span>)</span><br><span class="line">    print(<span class="string">"Recall metric in the testing dataset: "</span>, cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]/(cnf_matrix[<span class="number">1</span>,<span class="number">0</span>] + cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]))</span><br><span class="line">    </span><br><span class="line">    class_names = [<span class="number">0</span>,<span class="number">1</span>]</span><br><span class="line">    plot_confusion_matrix(cnf_matrix, classes=class_names, title=<span class="string">'Threshold &gt;= %s'</span> % i)</span><br></pre></td></tr></table></figure>
<p>可以得到结果：</p>
<img src="/blog/2018/10/18/1/matrix-threshold.png" title="不同阈值下的的混淆矩阵">
<p>可以得出结论：</p>
<ol>
<li>随着阈值的上升，recall值逐渐减少，但是误杀也在减少</li>
<li>阈值过小或过大，精度会比较小</li>
</ol>
<p>在实际工作中，我们可能是有一些指标的，比如误杀率不能超过多少，精度要大于多少等，我们就可以根据这些指标来选择合适的参数</p>
<h3 id="SMOTE样本生成策略"><a href="#SMOTE样本生成策略" class="headerlink" title="SMOTE样本生成策略"></a>SMOTE样本生成策略</h3><p>前面我们都是基于下采样来处理样本不平衡问题，这里我们讨论过采样方法，这就不得不提到SMOTE样本生成策略了。</p>
<p>过采样的意思就是将少数类样本进行采样，扩展其样本数量。</p>
<p>SMOTE算法解释：</p>
<blockquote>
<ol>
<li>以少类样本为基础，遍历每一个少类样本，假设有n个少类样本</li>
<li>分别找到离这n个少类样本，欧式距离最近的m个样本</li>
<li>再依次遍历这m个样本，在两个点中间随机选择k个点作为新增的点</li>
</ol>
</blockquote>
<p>可以参考这篇文章：<a href="https://www.jianshu.com/p/ecbc924860af">SMOTE</a></p>
<p>在python中有一个叫做 <code>imblearn</code>的库，通过 <code>pip install imblearn</code> 安装即可</p>
<p>这里我们从头开始做一遍信用卡欺诈检测任务，使用过采样方式：</p>
<ol>
<li><p>导入一些必要的库</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line"><span class="keyword">import</span> matplotlib.pyplot <span class="keyword">as</span> plt</span><br><span class="line"><span class="keyword">from</span> imblearn.over_sampling <span class="keyword">import</span> SMOTE</span><br><span class="line"><span class="keyword">from</span> sklearn.ensemble <span class="keyword">import</span> RandomForestClassifier</span><br><span class="line"><span class="keyword">from</span> sklearn.linear_model <span class="keyword">import</span> LogisticRegression</span><br><span class="line"><span class="keyword">from</span> sklearn.metrics <span class="keyword">import</span> confusion_matrix</span><br><span class="line"><span class="keyword">from</span> sklearn.model_selection <span class="keyword">import</span> train_test_split</span><br></pre></td></tr></table></figure>
</li>
<li><p>拆分数据，并进行过采样补齐</p>
</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line">credit_cards = pd.read_csv(<span class="string">'./creditcard.csv'</span>)</span><br><span class="line">columns = credit_cards.columns</span><br><span class="line">features_columns = columns.delete(len(columns)<span class="number">-1</span>)</span><br><span class="line">features = credit_cards[features_columns]</span><br><span class="line">labels = credit_cards[<span class="string">'Class'</span>]</span><br><span class="line"></span><br><span class="line">features_train, features_test, labels_train, labels_test = train_test_split(features,</span><br><span class="line">                                                                           labels,</span><br><span class="line">                                                                           test_size=<span class="number">0.2</span>,</span><br><span class="line">                                                                           random_state=<span class="number">0</span>)</span><br><span class="line">oversampler = SMOTE(random_state=<span class="number">0</span>)</span><br><span class="line"><span class="comment"># 特别注意：这里采样的的训练集数据，测试集数据时不能动的</span></span><br><span class="line">os_features, os_labels = oversampler.fit_sample(features_train, labels_train)</span><br><span class="line">print(<span class="string">"the number of class = 1, after oversample: %d"</span> % len(os_labels[os_labels==<span class="number">1</span>]))</span><br></pre></td></tr></table></figure>
<ol>
<li>我们再看针对过采样的方式，不同的正则化惩罚力度对recall的影响</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">os_features = pd.DataFrame(os_features)</span><br><span class="line">os_labels = pd.DataFrame(os_labels)</span><br><span class="line">best_c = printing_Kfold_scores(os_features, os_labels)</span><br></pre></td></tr></table></figure>
<p>通过结果可以看到recall值比小采样的稍小一些，但别急，看其他指标</p>
<ol>
<li>我们再看看混淆矩阵的结果如何</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> itertools</span><br><span class="line">lr = LogisticRegression(C=best_c, penalty=<span class="string">'l1'</span>)</span><br><span class="line">lr.fit(os_features, os_labels.values.ravel())</span><br><span class="line">y_pred = lr.predict(features_test.values)</span><br><span class="line"></span><br><span class="line">cnf_matrix = confusion_matrix(labels_test, y_pred)</span><br><span class="line">np.set_printoptions(precision=<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line">print(<span class="string">"Recall metric in the testing dataset: "</span>, cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]/(cnf_matrix[<span class="number">1</span>,<span class="number">0</span>]+cnf_matrix[<span class="number">1</span>,<span class="number">1</span>]))</span><br><span class="line">class_names = [<span class="number">0</span>, <span class="number">1</span>]</span><br><span class="line">plt.figure()</span><br><span class="line">plot_confusion_matrix(cnf_matrix, classes=class_names, title=<span class="string">'Confusion matrix'</span>)</span><br></pre></td></tr></table></figure>
<p>看看结果：</p>
<img src="/blog/2018/10/18/1/matrix-os.png" title="过采样下的的混淆矩阵">
<p>我们需要上面的下采样的结果进行对比，可以看出recall低了一些，但是误杀率减少了很多，过采样误杀8000多个，现在只有500多个</p>
<h2 id="总结"><a href="#总结" class="headerlink" title="总结"></a>总结</h2><p>本文通过实验测试了针对模型的不同参数，可能对结果产生非常大的影响。我们需要根据实际情况选择合适的参数</p>
<p>这里是以逻辑回归为例，但是很多点都是机器学习中通用的思路</p>

          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/10/17/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/10/17/1.html" itemprop="url">机器学习之逻辑回归（代码实现）</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-10-17T08:18:05+08:00">
                2018-10-17
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/逻辑回归（代码实现）/" itemprop="url" rel="index">
                    <span itemprop="name">逻辑回归（代码实现）</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/10/17/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/10/17/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/10/17/1.html" class="leancloud_visitors" data-flag-title="机器学习之逻辑回归（代码实现）">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="逻辑回归"><a href="#逻辑回归" class="headerlink" title="逻辑回归"></a>逻辑回归</h1><p>上一篇博文我们推导了逻辑回归的公式，这一篇我们用代码实现一个逻辑回归算法，且给一个出国留学的数据集进行测试</p>
<h2 id="熟悉数据"><a href="#熟悉数据" class="headerlink" title="熟悉数据"></a>熟悉数据</h2><ol>
<li>数据集</li>
</ol>
<p><a href="https://pan.baidu.com/s/1euBBz3n3Qou1EHSQFsrxsg">考试成绩与录取情况</a>  密码:1uvq</p>
<ol>
<li>数据读取</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> pandas <span class="keyword">as</span> pd</span><br><span class="line"><span class="keyword">import</span> numpy <span class="keyword">as</span> np</span><br><span class="line"><span class="keyword">import</span> matplotlib.pylab <span class="keyword">as</span> plt</span><br><span class="line"></span><br><span class="line">pdData = pd.read_csv(<span class="string">"LogiReg_data.txt"</span>, header=<span class="keyword">None</span>, names=[<span class="string">'Exam 1'</span>, <span class="string">'Exam 2'</span>, <span class="string">'Admitted'</span>])</span><br><span class="line">pdData.head()   <span class="comment"># 看下数据结果</span></span><br><span class="line">pdData.shape</span><br></pre></td></tr></table></figure>
<p>每个记录包含三个数据，成绩1、成绩2与是否录取</p>
<ol>
<li>我们根据是否录取作为结果来看下数据大致分布</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line">positive = pdData[pdData[<span class="string">'Admitted'</span>] == <span class="number">1</span>]</span><br><span class="line">negative = pdData[pdData[<span class="string">'Admitted'</span>] == <span class="number">0</span>]</span><br><span class="line"></span><br><span class="line">fig, ax = plt.subplots(figsize=(<span class="number">10</span>,<span class="number">5</span>))</span><br><span class="line">ax.scatter(positive[<span class="string">'Exam 1'</span>], positive[<span class="string">'Exam 2'</span>], s=<span class="number">30</span>, c=<span class="string">'b'</span>, marker=<span class="string">'o'</span>, label=<span class="string">'Admitted'</span>)</span><br><span class="line">ax.scatter(negative[<span class="string">'Exam 1'</span>], negative[<span class="string">'Exam 2'</span>], s=<span class="number">30</span>, c=<span class="string">'r'</span>, marker=<span class="string">'x'</span>, label=<span class="string">'Not Admitted'</span>)</span><br><span class="line">ax.legend()</span><br><span class="line">ax.set_xlabel(<span class="string">'Exam 1 Score'</span>)</span><br><span class="line">ax.set_ylabel(<span class="string">'Exam 2 Score'</span>)</span><br></pre></td></tr></table></figure>
<h2 id="编写代码"><a href="#编写代码" class="headerlink" title="编写代码"></a>编写代码</h2><p>总结一下，要完成的模块</p>
<ul>
<li>sigmoid：映射到概率的函数</li>
<li>model：返回预测结果值</li>
<li>cost：根据参数计算损失</li>
<li>gradient：计算每个参数的梯度方向</li>
<li>descent：进行参数更新</li>
<li>accuracy：计算精度</li>
</ul>
<ol>
<li>sigmoid</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">sigmoid</span><span class="params">(z)</span>:</span></span><br><span class="line">    <span class="keyword">return</span> <span class="number">1</span> / (<span class="number">1</span> + np.exp(-z))</span><br><span class="line"></span><br><span class="line"><span class="comment"># 大致看下sigmoid函数的图像</span></span><br><span class="line">nums = np.arange(<span class="number">-10</span>, <span class="number">10</span>, step=<span class="number">1</span>)</span><br><span class="line">fig, ax = plt.subplots(figsize=(<span class="number">12</span>, <span class="number">4</span>))</span><br><span class="line">ax.plot(nums, sigmoid(nums), <span class="string">'r'</span>)</span><br></pre></td></tr></table></figure>
<ol>
<li>model</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">model</span><span class="params">(X, theta)</span>:</span></span><br><span class="line">    <span class="keyword">return</span> sigmoid(np.dot(X, theta.T))</span><br></pre></td></tr></table></figure>
<p>看一下我们需要什么样的数据？</p>
<script type="math/tex; mode=display">
(\theta_0 \quad \theta_1 \quad \theta_2) \quad \times \quad \begin{bmatrix}
1 \\ 
x_1 \\ 
x_2
\end{bmatrix} = \theta_0 + \theta_1 x_1 + \theta_2 x_2</script><script type="math/tex; mode=display">
(1 \quad x_1 \quad x_2) \quad \times \quad \begin{bmatrix}
\theta_0 \\ 
\theta_1 \\ 
\theta_2
\end{bmatrix} = \theta_0 + \theta_1 x_1 + \theta_2 x_2</script><p>我们将样本数据构造成符合上面公式的形式</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">pdData.insert(<span class="number">0</span>, <span class="string">'Ones'</span>, <span class="number">1</span>)</span><br><span class="line">orig_data = pdData.values</span><br><span class="line">cols = orig_data.shape[<span class="number">1</span>]</span><br><span class="line">X = orig_data[:, <span class="number">0</span>:cols<span class="number">-1</span>]</span><br><span class="line">y = orig_data[:, cols<span class="number">-1</span>:cols]</span><br><span class="line">theta = np.zeros([<span class="number">1</span>, <span class="number">3</span>])</span><br><span class="line"><span class="comment">#自行打印各个数据，检查一下是否构造正常</span></span><br></pre></td></tr></table></figure>
<ol>
<li>cost</li>
</ol>
<p>将对数似然函数去负号</p>
<script type="math/tex; mode=display">
D(h_{\theta}(x), y) = -ylog(h_{\theta}(x)) - (1 - y)log(1 - h_{\theta}(x))</script><p>求平均损失</p>
<script type="math/tex; mode=display">
J(\theta) = \frac{1}{n} \sum_{i=1}^n D(h_{\theta}(x_i), y_i)</script><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">cost</span><span class="params">(X, y, theta)</span>:</span></span><br><span class="line">    left = np.multiply(-y, np.log(model(X, theta)))</span><br><span class="line">    right = np.multiply(<span class="number">1</span> - y, np.log(<span class="number">1</span> - model(X, theta)))</span><br><span class="line">    <span class="keyword">return</span> np.sum(left - right) / (len(X))</span><br><span class="line"></span><br><span class="line"><span class="comment"># 计算一下</span></span><br><span class="line">cost(X, y, theta)</span><br></pre></td></tr></table></figure>
<p>损失值能够求出来了，就可以以让损失值越小的方向调优了</p>
<ol>
<li>计算梯度（gradient）</li>
</ol>
<p>计算梯度的目的是为了找到参数更新的方向</p>
<p>计算梯度公式：</p>
<script type="math/tex; mode=display">\frac{\partial J}{\partial \theta_j} = - \frac{1}{m} \sum_{i=1}^m (y_i - h_{\theta}(x_i))x_{ij}</script><p>这里讲负号放到括号里面去</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">gradient</span><span class="params">(X, y, theta)</span>:</span></span><br><span class="line">    grad = np.zeros(theta.shape)</span><br><span class="line">    error = (model(X, theta) - y).ravel()</span><br><span class="line">    <span class="keyword">for</span> j <span class="keyword">in</span> range(len(theta.ravel())):</span><br><span class="line">        term = np.multiply(error, X[:,j])</span><br><span class="line">        grad[<span class="number">0</span>, j] = np.sum(term) / len(X)</span><br><span class="line">    <span class="keyword">return</span> grad</span><br></pre></td></tr></table></figure>
<ol>
<li>descent（进行梯度下降计算）</li>
</ol>
<p>我们的模型会进行非常多的迭代运算，那具体什么时候算结束呢</p>
<p>停止迭代策略</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">STOP_ITER = <span class="number">0</span>   <span class="comment"># 按迭代次数</span></span><br><span class="line">STOP_COST = <span class="number">1</span>   <span class="comment"># 按迭代前后两次的损失值变化大小</span></span><br><span class="line">STOP_GRAD = <span class="number">2</span>   <span class="comment"># 按迭代前后两次的梯度的变化大小</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">stopCriterion</span><span class="params">(type, value, threshold)</span>:</span></span><br><span class="line">    <span class="keyword">if</span> type == STOP_ITER:    <span class="keyword">return</span> value &gt; threshold</span><br><span class="line">    <span class="keyword">elif</span> type == STOP_COST:  <span class="keyword">return</span> abs(value[<span class="number">-1</span>] - value[<span class="number">-2</span>]) &lt; threshold</span><br><span class="line">    <span class="keyword">elif</span> type == STOP_GRAD:  <span class="keyword">return</span> np.linalg.norm(value) &lt; threshold</span><br></pre></td></tr></table></figure>
<p>我们的每次迭代都要打乱我们的数据</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">shuffleData</span><span class="params">(data)</span>:</span></span><br><span class="line">    np.random.shuffle(data)</span><br><span class="line">    cols = data.shape[<span class="number">1</span>]</span><br><span class="line">    X = data[:, <span class="number">0</span>:cols<span class="number">-1</span>]</span><br><span class="line">    y = data[:, cols<span class="number">-1</span>:]</span><br><span class="line">    <span class="keyword">return</span> X, y</span><br></pre></td></tr></table></figure>
<p>开始迭代</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> time</span><br><span class="line"><span class="comment"># batchSize 有三个值，1：随机梯度下降，1-n：小批量梯度下降，n：全批量梯度下降</span></span><br><span class="line"><span class="comment"># alpha为学习率</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">descent</span><span class="params">(data, theta, batchSize, stopType, thresh, alpha)</span>:</span></span><br><span class="line">    init_time = time.time()</span><br><span class="line">    i = <span class="number">0</span>    <span class="comment"># 迭代次数</span></span><br><span class="line">    k = <span class="number">0</span>    <span class="comment"># batch</span></span><br><span class="line">    X, y = shuffleData(data)</span><br><span class="line">    grad = np.zeros(theta.shape)  <span class="comment"># 计算的梯度</span></span><br><span class="line">    costs = [cost(X, y, theta)]   <span class="comment"># 损失值</span></span><br><span class="line">    </span><br><span class="line">    <span class="keyword">while</span> <span class="keyword">True</span>:</span><br><span class="line">        grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta)    <span class="comment"># 计算梯度</span></span><br><span class="line">        k += batchSize    <span class="comment"># 取batch数量个数据</span></span><br><span class="line">        <span class="comment"># 判断是否超过了最大样本数</span></span><br><span class="line">        <span class="keyword">if</span> k &gt;= n:</span><br><span class="line">            k = <span class="number">0</span></span><br><span class="line">            X, y = shuffleData(data)  <span class="comment"># 重新洗牌</span></span><br><span class="line">        theta = theta - alpha * grad</span><br><span class="line">        costs.append(cost(X, y, theta)) <span class="comment"># 计算新的损失</span></span><br><span class="line">        i += <span class="number">1</span></span><br><span class="line">        </span><br><span class="line">        <span class="keyword">if</span> stopType == STOP_ITER:    value = i</span><br><span class="line">        <span class="keyword">elif</span> stopType == STOP_COST:  value = costs</span><br><span class="line">        <span class="keyword">elif</span> stopType == STOP_GRAD:    value = grad</span><br><span class="line">        <span class="keyword">if</span> stopCriterion(stopType, value, thresh): <span class="keyword">break</span></span><br><span class="line">        </span><br><span class="line">    <span class="keyword">return</span> theta, i<span class="number">-1</span>, costs, grad, time.time() - init_time</span><br></pre></td></tr></table></figure>
<ol>
<li>计算并画图看看结果吧</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">runExpe</span><span class="params">(data, theta, batchSize, stopType, thresh, alpha)</span>:</span></span><br><span class="line">    theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha)</span><br><span class="line">    fig, ax = plt.subplots(figsize=(<span class="number">12</span>,<span class="number">4</span>))</span><br><span class="line">    ax.plot(np.arange(len(costs)), costs, <span class="string">'r'</span>)</span><br><span class="line">    ax.set_xlabel(<span class="string">'Iterations'</span>)</span><br><span class="line">    ax.set_ylabel(<span class="string">'Cost'</span>)</span><br><span class="line">    <span class="keyword">return</span> theta</span><br></pre></td></tr></table></figure>
<ol>
<li>对比不同的迭代停止策略</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># 按迭代次数</span></span><br><span class="line">n = <span class="number">100</span> <span class="comment"># 我们的样本值就100，这里先使用全量迭代</span></span><br><span class="line">runExpe(orig_data, theta, n, STOP_ITER, thresh=<span class="number">5000</span>, alpha=<span class="number">0.000001</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 按cost值变化大小</span></span><br><span class="line">runExpe(orig_data, theta, n, STOP_COST, thresh=<span class="number">0.000001</span>, alpha=<span class="number">0.001</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 按梯度值的变化大小</span></span><br><span class="line">runExpe(orig_data, theta, n, STOP_GRAD, thresh=<span class="number">0.05</span>, alpha=<span class="number">0.001</span>)</span><br></pre></td></tr></table></figure>
<ol>
<li>对比不同的样本策略对结果的影响</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># 随机梯度下降</span></span><br><span class="line">runExpe(orig_data, theta, <span class="number">1</span>, STOP_ITER, thresh=<span class="number">0.05</span>, alpha=<span class="number">0.001</span>)</span><br><span class="line">runExpe(orig_data, theta, <span class="number">1</span>, STOP_ITER, thresh=<span class="number">15000</span>, alpha=<span class="number">0.000001</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 小批量梯度下降</span></span><br><span class="line">runExpe(orig_data, theta, <span class="number">16</span>, STOP_ITER, thresh=<span class="number">15000</span>, alpha=<span class="number">0.001</span>)</span><br><span class="line"><span class="comment"># 可以看到结果也不好，这里我们对数据进行预处理一下，再看结果</span></span><br><span class="line"><span class="keyword">from</span> sklearn <span class="keyword">import</span> preprocessing <span class="keyword">as</span> pp</span><br><span class="line">scaled_data = orig_data.copy()</span><br><span class="line">scaled_data[:, <span class="number">1</span>:<span class="number">3</span>] = pp.scale(orig_data[:, <span class="number">1</span>:<span class="number">3</span>])</span><br><span class="line">theta = runExpe(scaled_data, theta, <span class="number">16</span>, STOP_ITER, thresh=<span class="number">15000</span>, alpha=<span class="number">0.001</span>)</span><br><span class="line"><span class="comment"># 可以明显看到差别</span></span><br><span class="line"><span class="comment"># 对数据进行预处理后的效果非常好</span></span><br></pre></td></tr></table></figure>
<ol>
<li>计算精度，看下模型效果如何</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">predict</span><span class="params">(X, theta)</span>:</span></span><br><span class="line">    <span class="keyword">return</span> [<span class="number">1</span> <span class="keyword">if</span> x &gt;= <span class="number">0.5</span> <span class="keyword">else</span> <span class="number">0</span> <span class="keyword">for</span> x <span class="keyword">in</span> model(X, theta)]</span><br><span class="line"></span><br><span class="line">scaled_X = scaled_data[:, :<span class="number">3</span>]</span><br><span class="line">y = scaled_data[:, <span class="number">3</span>]</span><br><span class="line">predictions = predict(scaled_X, theta)</span><br><span class="line">correct = [<span class="number">1</span> <span class="keyword">if</span> ((a == <span class="number">1</span> <span class="keyword">and</span> b == <span class="number">1</span>) <span class="keyword">or</span> (a == <span class="number">0</span> <span class="keyword">and</span> b == <span class="number">0</span>)) <span class="keyword">else</span> <span class="number">0</span> <span class="keyword">for</span> (a, b) <span class="keyword">in</span> zip(predictions, y)]</span><br><span class="line">accuracy = (sum(map(int, correct))) % len(correct)</span><br><span class="line"><span class="keyword">print</span> (<span class="string">'accuracy = &#123;0&#125;%'</span>.format(accuracy))</span><br></pre></td></tr></table></figure>
          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/10/16/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/10/16/1.html" itemprop="url">机器学习之逻辑回归</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-10-16T22:20:17+08:00">
                2018-10-16
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/逻辑回归/" itemprop="url" rel="index">
                    <span itemprop="name">逻辑回归</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/10/16/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/10/16/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/10/16/1.html" class="leancloud_visitors" data-flag-title="机器学习之逻辑回归">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="逻辑回归"><a href="#逻辑回归" class="headerlink" title="逻辑回归"></a>逻辑回归</h1><p>逻辑回归是机器学习中非常牛逼的一个算法，为什么这么说呢？因为它简单且大多数情况下，效果很好。</p>
<p>一般情况下，当公司要做一个大型的学习系统前并没有特别好的想法，这个时候就会用先用逻辑回归去试一下，看看效果。</p>
<p>机器学习算法的选择：先用逻辑回归再用复杂的，能简单还是用简单的</p>
<p><strong>分类还是回归？</strong></p>
<p>虽然它的名字叫回归，但其实它做的是分类的任务</p>
<h2 id="sigmoid函数"><a href="#sigmoid函数" class="headerlink" title="sigmoid函数"></a>sigmoid函数</h2><p>公式：$g(z) = \frac{1}{1 + e^{-z}}$</p>
<p>画一下这个函数的图形，可以看到自变量取值为任意实数，值域[0,1]</p>
<p>解释：将任意的输入映射到了[0,1]区间，我们在线性回归中可以得到一个预测值，再将这个值映射到sigmoid函数中，这样就完成了由值到概率的转换，也就是分类任务</p>
<h2 id="公式推导"><a href="#公式推导" class="headerlink" title="公式推导"></a>公式推导</h2><p>我们利用sigmoid将输入转化为概率值，得到预测函数：$h_{\theta} = g(\theta^T x) = \frac{1}{1 + e^{-\theta^T x}}$</p>
<p>可以看到，我们的输入值其实就是线性回归中的输入</p>
<p>我们建立一个二分类任务，假设有两个类别，分别为1和0：</p>
<script type="math/tex; mode=display">\begin{cases}
P(y=1|x;\theta) = h_{\theta}(x) \\
P(y=0|x;\theta) = 1 - h_{\theta}(x)
\end{cases}</script><p>整合一下：$P(y|x;\theta) - (h_{\theta}(x))^y(1 - h_{\theta}(x))^{1-y}$</p>
<p>跟线性回归类似，我们也是求似然函数：$L(\theta) = \prod_{i=1}^m P(y_i|x_i;\theta) = \prod_{i=1}^m (h_{\theta}(x_i))^{y_i}(1 - h_{\theta}(x_i))^{1 - y_i}$</p>
<p>同样，为了减小复杂度，求对数似然：$l(\theta) = log L(\theta) = \sum_{i=1}^m (y_i log h_{\theta}(x_i) + (1 - y_i)log(1 - h_{\theta}(x_i)))$</p>
<p>由于似然函数表达的是什么样的参数使得预测值与结果值越相似，自然是概率越大越好</p>
<p>因此现在求的是一个最大值，所以是一个梯度上升的问题</p>
<p>我们常常需要将梯度上升的问题转换为梯度下降的问题，因此引入 $J(\theta) = - \frac{1}{m} l(\theta)$ </p>
<h3 id="求导过程"><a href="#求导过程" class="headerlink" title="求导过程"></a>求导过程</h3><script type="math/tex; mode=display">
\begin{equation}\begin{split}
l(\theta) &= log L(\theta) = \sum_{i=1}^m (y_i log h_{\theta}(x_i) + (1 - y_i)log(1 - h_{\theta}(x_i))) \\
\frac{\partial}{\partial_{\theta_j}} J(\theta) &= -\frac{1}{m} \sum_{i=1}^m \left( y_i \frac{1}{h_{\theta}(x_i)} \frac{\partial}{\partial_{\theta_j}} h_{\theta}(x_i) - (1 - y_i) \frac{1}{1 - h_{\theta}(x_i)} \frac{\partial}{\partial_{\theta_j}} h_{\theta}(x_i) \right) \\
&= -\frac{1}{m} \sum_{i=1}^m \left( y_i \frac{1}{g(\theta^T x_i)} - (1 - y_i) \frac{1}{1 - g(\theta^T x_i)} \right) \frac{\partial}{\partial_{\theta_j}} g(\theta^T x_i) \\
&= -\frac{1}{m} \sum_{i=1}^m \left( y_i \frac{1}{g(\theta^T x_i)} - (1 - y_i) \frac{1}{1 - g(\theta^T x_i)} \right) g(\theta^T x_i) (1 - g(\theta^T x_i)) \frac{\partial}{\partial_{\theta_j}} \theta^T x_i \\
&= -\frac{1}{m} \sum_{i=1}^m \left( y_i (1 - g(\theta^T x_i)) - (1 - y_i) g(\theta^T x_i) \right) x_i^j \\
&= -\frac{1}{m} \sum_{i=1}^m \left( y_i - g(\theta^T x_i) \right) x_i^j \\
&= \frac{1}{m} \sum_{i=1}^m \left( h_{\theta}(x_i) - y_i \right) x_i^j
\end{split}\end{equation}</script><h3 id="参数更新"><a href="#参数更新" class="headerlink" title="参数更新"></a>参数更新</h3><p>我们费了好大的劲求出来了 $\theta$ 偏导，这时可以用了，我们要进行参数更新：$\theta_j := \theta_j - \alpha \frac{1}{m} \sum_{i=1}^m \left( h_{\theta}(x_i) - y_i \right) x_i^j$</p>

          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
      

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://wangxiaochuang.github.io/blog/2018/10/15/1.html">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="jackstraw">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="/blog/images/avatar.jpg">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="稻草人的编程之路">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">
                
                <a class="post-title-link" href="/blog/2018/10/15/1.html" itemprop="url">机器学习之线性回归</a></h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2018-10-15T22:05:08+08:00">
                2018-10-15
              </time>
            

            

            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/" itemprop="url" rel="index">
                    <span itemprop="name">机器学习</span>
                  </a>
                </span>

                
                
                  ， 
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/blog/categories/机器学习/线性回归/" itemprop="url" rel="index">
                    <span itemprop="name">线性回归</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/blog/2018/10/15/1.html#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/blog/2018/10/15/1.html" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          
             <span id="/blog/2018/10/15/1.html" class="leancloud_visitors" data-flag-title="机器学习之线性回归">
               <span class="post-meta-divider">|</span>
               <span class="post-meta-item-icon">
                 <i class="fa fa-eye"></i>
               </span>
               
                 <span class="post-meta-item-text">阅读次数&#58;</span>
               
                 <span class="leancloud-visitors-count"></span>
             </span>
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        
          
            <h1 id="线性回归"><a href="#线性回归" class="headerlink" title="线性回归"></a>线性回归</h1><p>线性回归是机器学习中另一个非常简单的算法，这里我们以一个银行贷款的例子来进行说明，我们会完整的推算一遍。</p>
<h2 id="例子"><a href="#例子" class="headerlink" title="例子"></a>例子</h2><div class="table-container">
<table>
<thead>
<tr>
<th>工资</th>
<th>年龄</th>
<th>额度</th>
</tr>
</thead>
<tbody>
<tr>
<td>4000</td>
<td>25</td>
<td>20000</td>
</tr>
<tr>
<td>8000</td>
<td>30</td>
<td>70000</td>
</tr>
<tr>
<td>5000</td>
<td>28</td>
<td>35000</td>
</tr>
<tr>
<td>7500</td>
<td>33</td>
<td>50000</td>
</tr>
<tr>
<td>12000</td>
<td>40</td>
<td>85000</td>
</tr>
</tbody>
</table>
</div>
<p>解释说明一下这个例子，这个例子是银行贷款的样本，总共有两个特征，工资和年龄，我们的目标是给定一个工资和年龄，银行会给我贷多少钱</p>
<h2 id="公式推导"><a href="#公式推导" class="headerlink" title="公式推导"></a>公式推导</h2><p>假设 $\theta_1$ 是年龄的参数，$\theta_2$ 是工资的参数</p>
<p>我们能得到一个方程：$h_{\theta}(x) = \theta_0 + \theta_1 x_1 + \theta_2 x_2$ ，其中的 $\theta_0$ 为偏置项</p>
<p>整合一下：$h_{\theta}(x) = \sum_{i=0}^{n} \theta_i x_i = \theta^T x$</p>
<p>由于我们的数据都是一个个独立的样本，预测值与真实值间必然存在一定的误差，我们加入误差项</p>
<p>对于每个样本：$y^{(i)} = \theta^T x^{(i)} + \epsilon^{(i)}$</p>
<h3 id="关于误差"><a href="#关于误差" class="headerlink" title="关于误差"></a>关于误差</h3><p>误差 $\epsilon^{(i)}$ 是独立并且具有相同的分布，并且服从均值为0方差为 $\theta^2$ 的高斯分布</p>
<p>独立：张三与李四一起来贷款，他两没有关系</p>
<p>同分布：他两来的都是同一家银行</p>
<p>高斯分布：银行可能会多给，也可能会少给，但是绝大多数情况下，这个浮动不会太大，极小情况下浮动会比较大，但是符合正常情况，理解高斯分布<br>这里要注意：对误差的独立且同分布都是一个假设，并没有完完全全的数学去证明确实如此，但是对于机器学习来说，只要最好模型可用就行</p>
<p>由于误差是服从高斯分布的：$p \left( \epsilon^{(i)} \right) = \frac{1}{\sqrt{2\pi}\sigma}e^{\left( - \frac{\left(\epsilon^{(i)}\right)^2}{2{\sigma}^2} \right)}$</p>
<p>将<code>预测值与误差</code>的公式带入到误差高斯分布公式：$p(y^{(i)}|x^{(i)};\theta) = \frac{1}{\sqrt{2\pi}\sigma}e^{\left( - \frac{\left(y^{(i)} - \theta^T x^{(i)}\right)^2}{2{\sigma}^2} \right)}$</p>
<h3 id="似然函数"><a href="#似然函数" class="headerlink" title="似然函数"></a>似然函数</h3><p>似然函数是根据样本值去估计参数值的一个函数，比如我们要去赌场赌博，我们不知道去了赢还是输，所有我们就去赌场门口堵别人，出来一个就统一下是否挣钱，最后得到10个样本中有9个都挣钱了，我们认为挣钱的概率是 90%</p>
<p>再直白解释一下就是： <code>什么样的参数跟我们的数据组合后恰好就是真实值</code></p>
<p>这里给出似然函数公式：$L(\theta) = \prod_{i=1}^{m} p\left( y^{(i)}|x^{(i)};\theta \right) = \prod_{i=1}^{m} \frac{1}{\sqrt{2\pi}\sigma}e^{\left( - \frac{\left(y^{(i)} - \theta^T x^{(i)}\right)^2}{2{\sigma}^2} \right)}$</p>
<p>还有一个概念叫最大似然函数或极大似然估计，都是表示什么样的参数使得估计值成为真实值的概率越大</p>
<p>为了解决乘法的复杂度，因此有了对数似然函数：$log L(\theta) = log \prod_{i=1}^{m} \frac{1}{\sqrt{2\pi}\sigma}e^{\left( - \frac{\left(y^{(i)} - \theta^T x^{(i)}\right)^2}{2{\sigma}^2} \right)}$</p>
<p>将对数似然函数展开化简：</p>
<p>\begin{equation}\begin{split}<br>log L(\theta) &amp;= \sum_{i=1}^{m} \log \frac{1}{\sqrt{2\pi}\sigma} e^{\left( - \frac{\left(y^{(i)} - \theta^T x^{(i)}\right)^2}{2{\sigma}^2} \right)} \\<br>&amp;= m \log \frac{1}{\sqrt{2\pi}\sigma} - \frac{1}{\sigma^2} \times \frac{1}{2} \sum_{i=1}^{m} \left( y^{(i)} - \theta^T x^{(i)} \right)^2 \\<br>\end{split}\end{equation}</p>
<p>目标：让似然函数越大越好，分析后可以得出，减号后面的部分为零时式子取得最大值</p>
<script type="math/tex; mode=display">
J(\theta) = \frac{1}{2} \sum_{i=1}^{m} (y^{(i)} - \theta^T x^{(i)})^2 \text{（最小二乘法）}</script><p>这里我们求一下参数值 $\theta$</p>
<p>\begin{equation}\begin{split}<br>\text{目标函数：} &amp; \\<br>J(\theta) &amp;= \frac{1}{2} \sum_{i=1}^{m} (y^{(i)} - \theta^T x^{(i)})^2 \\<br>&amp;= \frac{1}{2} (X\theta - y)^T (X\theta - y) \\<br>\text{求偏导：} &amp; \\<br>\nabla_{\theta} J(\theta) &amp;= \nabla_{\theta} \left( \frac{1}{2} (X\theta - y)^T (X\theta - y) \right) \\<br>&amp;= \nabla_{\theta} \left( \frac{1}{2} (\theta^T X^T - y^T)(X\theta - y) \right) \\<br>&amp;= \nabla_{\theta} \left( \frac{1}{2} (\theta^T X^T X \theta - \theta^T X^T y - y^T X \theta + y^T t) \right) \\<br>&amp;= \frac{1}{2} \left( 2X^T X \theta - X^T y - (y^T X)^T \right) \\<br>\text{由偏导等于0得出：} &amp; \\<br>\theta &amp;= (X^T X)^{-1} X^T y<br>\end{split}\end{equation}</p>
<p>一般情况下，$\theta$ 是求不出来的，线性回归是数学上的一个巧合，大部分求 $\theta$ 的方式是优化的方式</p>
<h2 id="评估方法"><a href="#评估方法" class="headerlink" title="评估方法"></a>评估方法</h2><p>一个模型出来了，我们需要有一个评估方法去衡量这个模型的好坏，对于线性回归，最常用的评估项为：</p>
<script type="math/tex; mode=display">
R^2 = 1 - \frac{\sum_{i=1}^{m} (\hat{y_i} - y_i)^2}{\sum_{i=1}^{m} (y_i - \bar{y})^2}</script><p>$R^2$ 的取值越接近于1，我们认为模型拟合的越好</p>
<h2 id="梯度下降"><a href="#梯度下降" class="headerlink" title="梯度下降"></a>梯度下降</h2><p>上面的介绍中我们说了，大部分的机器学习算法都是计算不出来参数值的，这里我们介绍用优化的思想来解决问题</p>
<p>对于梯度下降，我们需要有自己的目标函数：$J(\theta_0, \theta_1) = \frac{1}{2m} \sum_{i=i}^{m} \left( h_{\theta} (x^{(i)} - y^{(i)}) \right)^2$，位置参数值为 $\theta$，我们要去找到合适的 $\theta$ 使得拟合值与真实值越接近，对于这个式子就是求解极小值</p>
<p>大概会分为如下几个步骤：</p>
<ol>
<li>找到当前最合适的方向（求导）</li>
<li>走那么一下不，走快了了会“跌倒”，走慢了可能会花非常多的时间（参数值的更新值）</li>
<li>按照方向与步伐去更新我们的参数</li>
</ol>
<h3 id="梯度下降的方法"><a href="#梯度下降的方法" class="headerlink" title="梯度下降的方法"></a>梯度下降的方法</h3><ol>
<li><p>批量梯度下降：$\frac{\partial{J(\theta)}}{\partial{\theta_j}} = - \frac{1}{m} \sum_{i=1}^{m} (y^i - h_{\theta}(x^i)) x_j^i \quad \theta_j = \theta_j + \frac{1}{m} \sum_{i=1}^{m} (y^i - h_{\theta}(x^i)) x_j^i $</p>
<p> 容易得到最优解，但是由于每次考虑所有的样本，所以速度非常慢</p>
</li>
<li><p>随机梯度下降：$\theta_j = \theta_j + (y^i - h_{\theta}(x^i)) x_j^i$</p>
<p> 每次找一个样本，迭代速度快，但是不一定每次都朝收敛的方向</p>
</li>
<li><p>小批量梯度下降法：$\theta_j := \theta_j - \alpha \frac{1}{10} \sum_{k=i}^{i+9} (h_{\theta}(x^{(k)}) - y^{(k)})x_j^{(k)}$</p>
<p> 每次更新选择一小部分数据来算，是最实用的，批量样本一般拿16 64 128为批量数</p>
</li>
</ol>
<h3 id="学习率"><a href="#学习率" class="headerlink" title="学习率"></a>学习率</h3><p>学习率也叫步长，不同的学习率对结果会产生巨大的影响，一般小一些</p>
<p>一般思路是以小的学习率和大的迭代次数来计算</p>
<p>学习率也不是一个固定的数值，可以前期选大一点的数，后期选小点的数</p>
<p>一般先拿0.01 64开始</p>

          
        
      
    </div>
    
    
    

    

    

    

    <footer class="post-footer">
      

      

      

      
      
        <div class="post-eof"></div>
      
    </footer>
  </div>
  
  
  
  </article>


    
  </section>

  
  <nav class="pagination">
    <span class="page-number current">1</span><a class="page-number" href="/blog/page/2/">2</a><a class="extend next" rel="next" href="/blog/page/2/"><i class="fa fa-angle-right"></i></a>
  </nav>



          </div>
          


          

        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      

      <section class="site-overview-wrap sidebar-panel sidebar-panel-active">
        <div class="site-overview">
          <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
            
              <img class="site-author-image" itemprop="image"
                src="/blog/images/avatar.jpg"
                alt="jackstraw" />
            
              <p class="site-author-name" itemprop="name">jackstraw</p>
              <p class="site-description motion-element" itemprop="description">人生的意义，不在于最终获得了什么，而在于曾经努力追求过什么</p>
          </div>

          <nav class="site-state motion-element">

            
              <div class="site-state-item site-state-posts">
              
                <a href="/blog/archives/">
              
                  <span class="site-state-item-count">19</span>
                  <span class="site-state-item-name">日志</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-categories">
                
                  <span class="site-state-item-count">22</span>
                  <span class="site-state-item-name">分类</span>
                
              </div>
            

            
              
              
              <div class="site-state-item site-state-tags">
                
                  <span class="site-state-item-count">22</span>
                  <span class="site-state-item-name">标签</span>
                
              </div>
            

          </nav>

          
            <div class="feed-link motion-element">
              <a href="/blog/atom.xml" rel="alternate">
                <i class="fa fa-rss"></i>
                RSS
              </a>
            </div>
          

          

          
          

          
          

          

        </div>
      </section>

      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright">&copy; <span itemprop="copyrightYear">2019</span>
  <span class="with-love">
    <i class="fa fa-user"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">jackstraw</span>

  
</div>









        
<div class="busuanzi-count">
  <script async src="https://busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script>

  
    <span class="site-uv">
      <i class="fa fa-user"></i>访问人数
      <span class="busuanzi-value" id="busuanzi_value_site_uv"></span>
      人
    </span>
  

  
    <span class="site-pv">
      <i class="fa fa-eye"></i>总访问量
      <span class="busuanzi-value" id="busuanzi_value_site_pv"></span>
      次
    </span>
  
</div>








        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  












  
  
    <script type="text/javascript" src="/blog/lib/jquery/index.js?v=2.1.3"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/velocity/velocity.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/blog/lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>
  


  


  <script type="text/javascript" src="/blog/js/src/utils.js?v=5.1.4"></script>

  <script type="text/javascript" src="/blog/js/src/motion.js?v=5.1.4"></script>



  
  


  <script type="text/javascript" src="/blog/js/src/affix.js?v=5.1.4"></script>

  <script type="text/javascript" src="/blog/js/src/schemes/pisces.js?v=5.1.4"></script>



  

  


  <script type="text/javascript" src="/blog/js/src/bootstrap.js?v=5.1.4"></script>



  


  




	





  





  










  <script src="//cdn1.lncld.net/static/js/3.0.4/av-min.js"></script>
  <script src="//unpkg.com/valine/dist/Valine.min.js"></script>
  
  <script type="text/javascript">
    var GUEST = ['nick','mail','link'];
    var guest = 'nick,mail';
    guest = guest.split(',').filter(item=>{
      return GUEST.indexOf(item)>-1;
    });
    new Valine({
        el: '#comments' ,
        verify: true,
        notify: false,
        appId: 'lQM75w94ggNR0TjX61NLerrg-gzGzoHsz',
        appKey: 'I2wtQ2rd9KtoJmcEOiYG9zqT',
        placeholder: '如需帮助，请留下邮箱',
        avatar:'mm',
        guest_info:guest,
        pageSize:'10' || 10,
    });
    var infoEle = document.querySelector('#comments .info');
    if (infoEle && infoEle.childNodes && infoEle.childNodes.length > 0){
      infoEle.childNodes.forEach(function(item) {
        item.parentNode.removeChild(item);
      });
    }
  </script>



  





  

  
  <script src="https://cdn1.lncld.net/static/js/av-core-mini-0.6.4.js"></script>
  <script>AV.initialize("lQM75w94ggNR0TjX61NLerrg-gzGzoHsz", "I2wtQ2rd9KtoJmcEOiYG9zqT");</script>
  <script>
    function showTime(Counter) {
      var query = new AV.Query(Counter);
      var entries = [];
      var $visitors = $(".leancloud_visitors");

      $visitors.each(function () {
        entries.push( $(this).attr("id").trim() );
      });

      query.containedIn('url', entries);
      query.find()
        .done(function (results) {
          var COUNT_CONTAINER_REF = '.leancloud-visitors-count';

          if (results.length === 0) {
            $visitors.find(COUNT_CONTAINER_REF).text(0);
            return;
          }

          for (var i = 0; i < results.length; i++) {
            var item = results[i];
            var url = item.get('url');
            var time = item.get('time');
            var element = document.getElementById(url);

            $(element).find(COUNT_CONTAINER_REF).text(time);
          }
          for(var i = 0; i < entries.length; i++) {
            var url = entries[i];
            var element = document.getElementById(url);
            var countSpan = $(element).find(COUNT_CONTAINER_REF);
            if( countSpan.text() == '') {
              countSpan.text(0);
            }
          }
        })
        .fail(function (object, error) {
          console.log("Error: " + error.code + " " + error.message);
        });
    }

    function addCount(Counter) {
      var $visitors = $(".leancloud_visitors");
      var url = $visitors.attr('id').trim();
      var title = $visitors.attr('data-flag-title').trim();
      var query = new AV.Query(Counter);

      query.equalTo("url", url);
      query.find({
        success: function(results) {
          if (results.length > 0) {
            var counter = results[0];
            counter.fetchWhenSave(true);
            counter.increment("time");
            counter.save(null, {
              success: function(counter) {
                var $element = $(document.getElementById(url));
                $element.find('.leancloud-visitors-count').text(counter.get('time'));
              },
              error: function(counter, error) {
                console.log('Failed to save Visitor num, with error message: ' + error.message);
              }
            });
          } else {
            var newcounter = new Counter();
            /* Set ACL */
            var acl = new AV.ACL();
            acl.setPublicReadAccess(true);
            acl.setPublicWriteAccess(true);
            newcounter.setACL(acl);
            /* End Set ACL */
            newcounter.set("title", title);
            newcounter.set("url", url);
            newcounter.set("time", 1);
            newcounter.save(null, {
              success: function(newcounter) {
                var $element = $(document.getElementById(url));
                $element.find('.leancloud-visitors-count').text(newcounter.get('time'));
              },
              error: function(newcounter, error) {
                console.log('Failed to create');
              }
            });
          }
        },
        error: function(error) {
          console.log('Error:' + error.code + " " + error.message);
        }
      });
    }

    $(function() {
      var Counter = AV.Object.extend("Counter");
      if ($('.leancloud_visitors').length == 1) {
        addCount(Counter);
      } else if ($('.post-title-link').length > 1) {
        showTime(Counter);
      }
    });
  </script>



  

  

  
  

  
  
    <script type="text/x-mathjax-config">
      MathJax.Hub.Config({
        tex2jax: {
          inlineMath: [ ['$','$'], ["\\(","\\)"]  ],
          processEscapes: true,
          skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
        }
      });
    </script>

    <script type="text/x-mathjax-config">
      MathJax.Hub.Queue(function() {
        var all = MathJax.Hub.getAllJax(), i;
        for (i=0; i < all.length; i += 1) {
          all[i].SourceElement().parentNode.className += ' has-jax';
        }
      });
    </script>
    <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
  


  

  

</body>
</html>
