<!DOCTYPE html>












  




<html class="theme-next gemini use-motion" lang="zh-CN">
<head>
  <!-- hexo-inject:begin --><!-- hexo-inject:end --><meta charset="UTF-8"/>
<meta name="google-site-verification" content="o9IkI77-fxkhBZW-n0ww9JALMCqdDbeTgdcXO_Bw4Zc" />
<meta name="baidu-site-verification" content="3frqY9KiVO" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2"/>
<meta name="theme-color" content="#222">



  
  
  <link rel="stylesheet" href="/lib/needsharebutton/needsharebutton.css">










<meta http-equiv="Cache-Control" content="no-transform" />
<meta http-equiv="Cache-Control" content="no-siteapp" />



















  
  
  
  

  
    
    
  

  
    
      
    

    
  

  

  
    
      
    

    
  

  
    
      
    

    
  

  
    
    
    <link href="//fonts.googleapis.com/css?family=Monda:300,300italic,400,400italic,700,700italic|Roboto Slab:300,300italic,400,400italic,700,700italic|Lobster Two:300,300italic,400,400italic,700,700italic|PT Mono:300,300italic,400,400italic,700,700italic&subset=latin,latin-ext" rel="stylesheet" type="text/css">
  






<link href="/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css" />

<link href="/css/main.css?v=6.4.1" rel="stylesheet" type="text/css" />


  <link rel="apple-touch-icon" sizes="180x180" href="/images/logo.png?v=6.4.1">


  <link rel="icon" type="image/png" sizes="32x32" href="/images/logo.png?v=6.4.1">


  <link rel="icon" type="image/png" sizes="16x16" href="/images/logo.png?v=6.4.1">


  <link rel="mask-icon" href="/images/logo.svg?v=6.4.1" color="#222">









<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/',
    scheme: 'Gemini',
    version: '6.4.1',
    sidebar: {"position":"left","display":"post","offset":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: false,
    fastclick: false,
    lazyload: false,
    tabs: true,
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>


  




  <meta name="description" content="摘要：决策树算法是一种基本的分类与回归方法，是最经常使用的算法之一。决策树模型呈树形结构，在分类问题中，表示基于特征对实例进行分类的过程。它可以认为是基于规则的集合。本文首先介绍决策树定义、工作原理、算法流程、优缺点等，然后结合案例进行分析。（本文原创，转载必须注明出处.）">
<meta name="keywords" content="Python,机器学习算法,sklean,决策树,文本聚类">
<meta property="og:type" content="article">
<meta property="og:title" content="一步步教你轻松学决策树模型算法">
<meta property="og:url" content="https://bainingchao.github.io/2018/09/19/一步步教你轻松学决策树算法/index.html">
<meta property="og:site_name" content="白宁超的官网">
<meta property="og:description" content="摘要：决策树算法是一种基本的分类与回归方法，是最经常使用的算法之一。决策树模型呈树形结构，在分类问题中，表示基于特征对实例进行分类的过程。它可以认为是基于规则的集合。本文首先介绍决策树定义、工作原理、算法流程、优缺点等，然后结合案例进行分析。（本文原创，转载必须注明出处.）">
<meta property="og:locale" content="zh-CN">
<meta property="og:image" content="https://i.imgur.com/8D2Latt.png">
<meta property="og:image" content="https://i.imgur.com/IHF6343.png">
<meta property="og:image" content="http://latex.codecogs.com/gif.latex?H=-\sum_{i=1}^{n}p(x_{i})log_{2}p(x_{i})">
<meta property="og:image" content="https://i.imgur.com/ycAXsNF.png">
<meta property="og:image" content="https://i.imgur.com/xyMYqWt.png">
<meta property="og:image" content="https://i.imgur.com/A283sAm.png">
<meta property="og:updated_time" content="2018-10-19T07:40:24.242Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="一步步教你轻松学决策树模型算法">
<meta name="twitter:description" content="摘要：决策树算法是一种基本的分类与回归方法，是最经常使用的算法之一。决策树模型呈树形结构，在分类问题中，表示基于特征对实例进行分类的过程。它可以认为是基于规则的集合。本文首先介绍决策树定义、工作原理、算法流程、优缺点等，然后结合案例进行分析。（本文原创，转载必须注明出处.）">
<meta name="twitter:image" content="https://i.imgur.com/8D2Latt.png">



  <link rel="alternate" href="/atom.xml" title="白宁超的官网" type="application/atom+xml" />




  <link rel="canonical" href="https://bainingchao.github.io/2018/09/19/一步步教你轻松学决策树算法/"/>



<script type="text/javascript" id="page.configurations">
  CONFIG.page = {
    sidebar: "",
  };
</script>

  <title>一步步教你轻松学决策树模型算法 | 白宁超的官网</title>
  









  <noscript>
  <style type="text/css">
    .use-motion .motion-element,
    .use-motion .brand,
    .use-motion .menu-item,
    .sidebar-inner,
    .use-motion .post-block,
    .use-motion .pagination,
    .use-motion .comments,
    .use-motion .post-header,
    .use-motion .post-body,
    .use-motion .collection-title { opacity: initial; }

    .use-motion .logo,
    .use-motion .site-title,
    .use-motion .site-subtitle {
      opacity: initial;
      top: initial;
    }

    .use-motion {
      .logo-line-before i { left: initial; }
      .logo-line-after i { right: initial; }
    }
  </style>
</noscript><!-- hexo-inject:begin --><!-- hexo-inject:end -->

</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-CN">

  
  
    
  

  <!-- hexo-inject:begin --><!-- hexo-inject:end --><div class="container sidebar-position-left page-post-detail">
    <div class="headband"></div>

	<!-- <a href="https://github.com/bainingchao"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub"></a> !-->
	
    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/" class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">白宁超的官网</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
    
      
        <h1 class="site-subtitle" itemprop="description">专注人工智能领域研究</h1>
      
    
  </div>

  <div class="site-nav-toggle">
    <button aria-label="切换导航栏">
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>



<nav class="site-nav">
  
    <ul id="menu" class="menu">
      
        
        
        
          
          <li class="menu-item menu-item-首页">
    <a href="/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-home"></i> <br />首页</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-标签">
    <a href="/tags/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-tags"></i> <br />标签</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-分类">
    <a href="/categories/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-th"></i> <br />分类</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-归档">
    <a href="/archives/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-archive"></i> <br />归档</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-视频">
    <a href="/videos/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-sitemap"></i> <br />视频</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-书籍">
    <a href="/books/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-th"></i> <br />书籍</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-链接">
    <a href="/links/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-question-circle"></i> <br />链接</a>
  </li>
        
        
        
          
          <li class="menu-item menu-item-关于">
    <a href="/about/" rel="section">
      <i class="menu-item-icon fa fa-fw fa-user"></i> <br />关于</a>
  </li>

      
      
        <li class="menu-item menu-item-search">
          
            <a href="javascript:;" class="popup-trigger">
          
            
              <i class="menu-item-icon fa fa-search fa-fw"></i> <br />搜索</a>
        </li>
      
    </ul>
  

  

  
    <div class="site-search">
      
  <div class="popup search-popup local-search-popup">
  <div class="local-search-header clearfix">
    <span class="search-icon">
      <i class="fa fa-search"></i>
    </span>
    <span class="popup-btn-close">
      <i class="fa fa-times-circle"></i>
    </span>
    <div class="local-search-input-wrapper">
      <input autocomplete="off"
             placeholder="搜索..." spellcheck="false"
             type="text" id="local-search-input">
    </div>
  </div>
  <div id="local-search-result"></div>
</div>



    </div>
  
</nav>



  



</div>
    </header>

    


    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          
            

          
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="https://bainingchao.github.io/2018/09/19/一步步教你轻松学决策树算法/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content="白宁超">
      <meta itemprop="description" content="本站主要研究深度学习、机器学习、自然语言处理等前沿技术。ML&NLP交流群：436303759 <span><a target="_blank" href="http://shang.qq.com/wpa/qunwpa?idkey=ef3bbb679b06ac59b136c57ba9e7935ff9d3b10faeabde6e4efcafe523bbbf4d"><img border="0" src="http://pub.idqqimg.com/wpa/images/group.png" alt="自然语言处理和机器学习技术QQ交流：436303759 " title="自然语言处理和机器学习技术交流"></a></span>">
      <meta itemprop="image" content="/../images/header.png">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="白宁超的官网">
    </span>

    
      <header class="post-header">

        
        
          <h2 class="post-title" itemprop="name headline">一步步教你轻松学决策树模型算法
              
            
          </h2>
        

        <div class="post-meta">
          <span class="post-time">

            
            
            

            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              

              
                
              

              <time title="创建时间：2018-09-19 13:19:08" itemprop="dateCreated datePublished" datetime="2018-09-19T13:19:08+08:00">2018-09-19</time>
            

            
              

              
                
                <span class="post-meta-divider">|</span>
                

                <span class="post-meta-item-icon">
                  <i class="fa fa-calendar-check-o"></i>
                </span>
                
                  <span class="post-meta-item-text">更新于</span>
                
                <time title="修改时间：2018-10-19 15:40:24" itemprop="dateModified" datetime="2018-10-19T15:40:24+08:00">2018-10-19</time>
              
            
          </span>

          
            <span class="post-category" >
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing"><a href="/categories/机器学习/" itemprop="url" rel="index"><span itemprop="name">机器学习</span></a></span>

                
                
                  ，
                
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing"><a href="/categories/机器学习/决策树/" itemprop="url" rel="index"><span itemprop="name">决策树</span></a></span>

                
                
              
            </span>
          

          
            
          

          
          

          
            <span class="post-meta-divider">|</span>
            <span class="post-meta-item-icon"
            >
            <i class="fa fa-eye"></i>
             阅读次数： 
            <span class="busuanzi-value" id="busuanzi_value_page_pv" ></span>
            </span>
          
		  

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <blockquote>
<p>摘要：决策树算法是一种基本的分类与回归方法，是最经常使用的算法之一。决策树模型呈树形结构，在分类问题中，表示基于特征对实例进行分类的过程。它可以认为是基于规则的集合。本文首先介绍决策树定义、工作原理、算法流程、优缺点等，然后结合案例进行分析。（本文原创，转载必须注明出处.）</p>
</blockquote>
<a id="more"></a>
<h1 id="理论介绍"><a href="#理论介绍" class="headerlink" title="理论介绍"></a>理论介绍</h1><h2 id="什么是决策树"><a href="#什么是决策树" class="headerlink" title="什么是决策树"></a>什么是决策树</h2><ul>
<li><p>维基百科：决策树（Decision Tree）是一个预测模型；他代表的是对象属性与对象值之间的一种映射关系。树中每个节点表示某个对象，而每个分叉路径则代表某个可能的属性值，而每个叶节点则对应从根节点到该叶节点所经历的路径所表示的对象的值。数据挖掘中决策树是一种经常要用到的技术，可以用于分析数据，同样也可以用来作预测。从数据产生决策树的机器学习技术叫做决策树学习,通俗说就是决策树。</p>
</li>
<li><p>分类决策树模型是一种描述对实例进行分类的树形结构。决策树由结点（node）和有向边（directed edge）组成。结点有两种类型：内部结点（internal node）和叶结点（leaf node）。内部结点表示一个特征或属性(features)，叶结点表示一个类(labels)。</p>
</li>
</ul>
<p>用决策树对需要测试的实例进行分类：从根节点开始，对实例的某一特征进行测试，根据测试结果，将实例分配到其子结点；这时，每一个子结点对应着该特征的一个取值。如此递归地对实例进行测试并分配，直至达到叶结点。最后将实例分配到叶结点的类中。</p>
<h2 id="什么是信息熵和信息增益"><a href="#什么是信息熵和信息增益" class="headerlink" title="什么是信息熵和信息增益"></a>什么是信息熵和信息增益</h2><ul>
<li><p>熵（entropy）： 熵指的是体系的混乱的程度，在不同的学科中也有引申出的更为具体的定义，是各领域十分重要的参量。</p>
</li>
<li><p>信息论（information theory）中的熵（香农熵）： 是一种信息的度量方式，表示信息的混乱程度，也就是说：信息越有序，信息熵越低。例如：火柴有序放在火柴盒里，熵值很低，相反，熵值很高。</p>
</li>
<li><p>信息增益（information gain）： 在划分数据集前后信息发生的变化称为信息增益，信息增益越大，确定性越强。</p>
</li>
</ul>
<h2 id="决策树工作原理"><a href="#决策树工作原理" class="headerlink" title="决策树工作原理"></a>决策树工作原理</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="string">决策树工作原理：基于迭代的思想。</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">createBranch</span><span class="params">()</span>:</span></span><br><span class="line">    检测数据集中的所有数据的分类标签是否相同:</span><br><span class="line">        If so <span class="keyword">return</span> 类标签</span><br><span class="line">        Else:</span><br><span class="line">            寻找划分数据集的最好特征（划分之后信息熵最小，也就是信息增益最大的特征）</span><br><span class="line">            划分数据集</span><br><span class="line">            创建分支节点</span><br><span class="line">                <span class="keyword">for</span> 每个划分的子集</span><br><span class="line">                    调用函数 createBranch （创建分支的函数）并增加返回结果到分支节点中</span><br><span class="line">            <span class="keyword">return</span> 分支节点</span><br></pre></td></tr></table></figure>
<h2 id="决策树算法流程"><a href="#决策树算法流程" class="headerlink" title="决策树算法流程"></a>决策树算法流程</h2><pre><code>收集数据：可以使用任何方法。
准备数据：树构造算法 (这里使用的是ID3算法，只适用于标称型数据，这就是为什么数值型数据必须离散化。 还有其他的树构造算法，比如CART)
分析数据：可以使用任何方法，构造树完成之后，我们应该检查图形是否符合预期。
训练算法：构造树的数据结构。
测试算法：使用训练好的树计算错误率。
使用算法：此步骤可以适用于任何监督学习任务，而使用决策树可以更好地理解数据的内在含义。
</code></pre><h2 id="决策树优缺点"><a href="#决策树优缺点" class="headerlink" title="决策树优缺点"></a>决策树优缺点</h2><p>相对于其他数据挖掘算法，决策树在以下几个方面拥有优势：</p>
<pre><code>1 决策树易于理解和实现.人们在通过解释后都有能力去理解决策树所表达的意义。
2 对于决策树，数据的准备往往是简单或者是不必要的.其他的技术往往要求先把数据一般化，比如去掉多余的或者空白的属性。
3 能够同时处理数据型和常规型属性。其他的技术往往要求数据属性的单一。
4 是一个白盒模型如果给定一个观察的模型，那么根据所产生的决策树很容易推出相应的逻辑表达式。
5 易于通过静态测试来对模型进行评测。表示有可能测量该模型的可信度。
6 在相对短的时间内能够对大型数据源做出可行且效果良好的结果。
7 计算复杂度不高，输出结果易于理解，数据有缺失也能跑，可以处理不相关特征。
</code></pre><p>缺点：</p>
<pre><code>1 容易过拟合。
2 对于那些各类别样本数量不一致的数据，在决策树当中信息增益的结果偏向于那些具有更多数值的特征。
</code></pre><p>适用数据类型：数值型和标称型。</p>
<pre><code>1 数值型：数值型目标变量则可以从无限的数值集合中取值，如0.100，42.001等 (数值型目标变量主要用于回归分析)
2 标称型：标称型目标变量的结果只在有限目标集中取值，如真与假(标称型目标变量主要用于分类)
</code></pre><hr>
<h1 id="案例描述：加深决策树理解"><a href="#案例描述：加深决策树理解" class="headerlink" title="案例描述：加深决策树理解"></a>案例描述：加深决策树理解</h1><h2 id="案例描述"><a href="#案例描述" class="headerlink" title="案例描述"></a>案例描述</h2><p>小王是一家著名高尔夫俱乐部的经理。但是他被雇员数量问题搞得心情十分不好。某些天好像所有人都来玩高尔夫，以至于所有员工都忙的团团转还是应付不过来，而有些天不知道什么原因却一个人也不来，俱乐部为雇员数量浪费了不少资金。小王的目的是通过下周天气预报寻找什么时候人们会打高尔夫，以适时调整雇员数量。因此首先他必须了解人们决定是否打球的原因。</p>
<h2 id="数据采集"><a href="#数据采集" class="headerlink" title="数据采集"></a>数据采集</h2><p>在2周时间内我们得到以下记录：</p>
<p>天气状况有晴，云和雨；气温用华氏温度表示；相对湿度用百分比；还有有无风。当然还有顾客是不是在这些日子光顾俱乐部。最终他得到了14行5列的数据表格。</p>
<p><img src="https://i.imgur.com/8D2Latt.png" alt=""></p>
<h2 id="构建决策树"><a href="#构建决策树" class="headerlink" title="构建决策树"></a>构建决策树</h2><p>决策树模型就被建起来用于解决问题。 </p>
<p><img src="https://i.imgur.com/IHF6343.png" alt=""></p>
<h2 id="结果分析"><a href="#结果分析" class="headerlink" title="结果分析"></a>结果分析</h2><p>决策树是一个有向无环图。根结点代表所有数据。分类树算法可以通过变量outlook，找出最好地解释非独立变量play（打高尔夫的人）的方法。变量outlook的范畴被划分为以下三个组：晴天，多云天和雨天。</p>
<p>我们得出第一个结论：如果天气是多云，人们总是选择玩高尔夫，而只有少数很着迷的甚至在雨天也会玩。</p>
<p>接下来我们把晴天组的分为两部分，我们发现顾客不喜欢湿度高于70%的天气。最终我们还发现，如果雨天还有风的话，就不会有人打了。</p>
<p>这就通过分类树给出了一个解决方案。小王（老板）在晴天，潮湿的天气或者刮风的雨天解雇了大部分员工，因为这种天气不会有人打高尔夫。而其他的天气会有很多人打高尔夫，因此可以雇用一些临时员工来工作。</p>
<h1 id="决策树算法实现与分析"><a href="#决策树算法实现与分析" class="headerlink" title="决策树算法实现与分析"></a>决策树算法实现与分析</h1><h2 id="案例-判定鱼类和非鱼类"><a href="#案例-判定鱼类和非鱼类" class="headerlink" title="案例: 判定鱼类和非鱼类"></a>案例: 判定鱼类和非鱼类</h2><blockquote>
<p>案例需求描述</p>
</blockquote>
<p>我们采集海洋生物数据信息，选择其中5条如下表所示，从诸多特征中选择2个最主要特征，以及判定是否属于鱼类（此处我们选择二分类法即只考虑鱼类和非鱼类）。根据这些信息如何创建一个决策树进行分类并可视化展示？</p>
<blockquote>
<p>收集数据</p>
</blockquote>
<p>部分数据采集信息</p>
<div class="table-container">
<table>
<thead>
<tr>
<th style="text-align:center">序号</th>
<th style="text-align:center">不浮出水面是否可以生存</th>
<th style="text-align:center">是否有脚蹼</th>
<th style="text-align:center">属于鱼类</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center">1</td>
<td style="text-align:center">是</td>
<td style="text-align:center">是</td>
<td style="text-align:center">是</td>
</tr>
<tr>
<td style="text-align:center">2</td>
<td style="text-align:center">是</td>
<td style="text-align:center">是</td>
<td style="text-align:center">是</td>
</tr>
<tr>
<td style="text-align:center">3</td>
<td style="text-align:center">是</td>
<td style="text-align:center">否</td>
<td style="text-align:center">否</td>
</tr>
<tr>
<td style="text-align:center">4</td>
<td style="text-align:center">否</td>
<td style="text-align:center">是</td>
<td style="text-align:center">否</td>
</tr>
<tr>
<td style="text-align:center">5</td>
<td style="text-align:center">否</td>
<td style="text-align:center">是</td>
<td style="text-align:center">否</td>
</tr>
</tbody>
</table>
</div>
<p>我们将自然语言数据转化为计算机输入数据，代码实现如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''创建数据集，返回数据集和标签'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">createDataSet</span><span class="params">()</span>:</span></span><br><span class="line">    dataSet = [[<span class="number">1</span>, <span class="number">1</span>, <span class="string">'yes'</span>],</span><br><span class="line">               [<span class="number">1</span>, <span class="number">1</span>, <span class="string">'yes'</span>],</span><br><span class="line">               [<span class="number">1</span>, <span class="number">0</span>, <span class="string">'no'</span>],</span><br><span class="line">               [<span class="number">0</span>, <span class="number">1</span>, <span class="string">'no'</span>],</span><br><span class="line">               [<span class="number">0</span>, <span class="number">1</span>, <span class="string">'no'</span>]]</span><br><span class="line">    labels = [<span class="string">'no surfacing'</span>, <span class="string">'flippers'</span>]</span><br><span class="line">    <span class="keyword">return</span> dataSet, labels</span><br></pre></td></tr></table></figure></p>
<p>运行查看数据集的特征向量和分类标签：    </p>
<pre><code># 1 打印数据集和标签
dataset,label=createDataSet()
print(dataset)
print(label)
</code></pre><p>运行结果：</p>
<pre><code>[[1, 1, &#39;yes&#39;], [1, 1, &#39;yes&#39;], [1, 0, &#39;no&#39;], [0, 1, &#39;no&#39;], [0, 1, &#39;no&#39;]]
[&#39;no surfacing&#39;, &#39;flippers&#39;]
</code></pre><blockquote>
<p>准备数据</p>
</blockquote>
<p>由于我们输入的数据已经是数据预处理后的数据，这一步不需要进行。</p>
<h2 id="分析数据"><a href="#分析数据" class="headerlink" title="分析数据"></a>分析数据</h2><p>我们得到数据之后，到底是按照第一个特征即(不浮出水面是否可以生存)还是第二个特征即（是否有脚蹼）进行数据划分呢？这里面就需要找到一种量化的方法判断特征的选择。在介绍具体数据划分方法之前，我们首先明白划分数据集的最大原则是：<strong>将无序的数据变得更加有序</strong></p>
<pre><code>1948 年，香农引入信息熵，将其定义为离散随机事件的出现概率。一个系统越有序，信息熵就越低；反之，一个系统越混乱，信息熵就越高。所以说，信息熵可以被认为是系统有序化程度的一个度量。
</code></pre><p>这里就要用的信息熵的概念，熵越高表示混合数据越多，度量数据集无序程度。我们看下信息熵的数学描述（具体请自行查找熵相关知识）：</p>
<p><a href="http://www.codecogs.com/eqnedit.php?latex=H=-\sum_{i=1}^{n}p(x_{i})log_{2}p(x_{i})" target="_blank"><img src="http://latex.codecogs.com/gif.latex?H=-\sum_{i=1}^{n}p(x_{i})log_{2}p(x_{i})" title="H=-\sum_{i=1}^{n}p(x_{i})log_{2}p(x_{i})"></a></p>
<blockquote>
<p>计算数据集的香农熵(信息期望值)</p>
</blockquote>
<p>根据公式比较容易理解的实现方法1如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''计算数据集的香农熵(信息期望值):熵越高表示混合数据越多，度量数据集无序程度'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calcShannonEnt</span><span class="params">(dataSet)</span>:</span></span><br><span class="line">    numEntries = len(dataSet) <span class="comment"># 计算数据集中实例总数</span></span><br><span class="line">    labelCounts = &#123;&#125; <span class="comment"># 创建字典，计算分类标签label出现的次数</span></span><br><span class="line">    <span class="keyword">for</span> featVec <span class="keyword">in</span> dataSet:</span><br><span class="line">        currentLabel = featVec[<span class="number">-1</span>] <span class="comment"># 记录当前实例的标签</span></span><br><span class="line">        <span class="keyword">if</span> currentLabel <span class="keyword">not</span> <span class="keyword">in</span> labelCounts.keys():<span class="comment"># 为所有可能的分类创建字典</span></span><br><span class="line">            labelCounts[currentLabel] = <span class="number">0</span></span><br><span class="line">        labelCounts[currentLabel] += <span class="number">1</span></span><br><span class="line">        <span class="comment"># print(featVec, labelCounts) # 打印特征向量和字典的键值对</span></span><br><span class="line"></span><br><span class="line">    <span class="comment"># 对于label标签的占比，求出label标签的香农熵</span></span><br><span class="line">    shannonEnt = <span class="number">0.0</span></span><br><span class="line">    <span class="keyword">for</span> key <span class="keyword">in</span> labelCounts:</span><br><span class="line">        prob = float(labelCounts[key])/numEntries <span class="comment"># 计算类别出现的概率。</span></span><br><span class="line">        shannonEnt -= prob * log(prob, <span class="number">2</span>) <span class="comment"># 计算香农熵，以 2 为底求对数</span></span><br><span class="line">    print(Decimal(shannonEnt).quantize(Decimal(<span class="string">'0.00000'</span>)))</span><br><span class="line">    <span class="keyword">return</span> shannonEnt</span><br></pre></td></tr></table></figure></p>
<p>更高级的实现方法2如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''计算数据集的香农熵(信息期望值):熵越高表示混合数据越多，度量数据集无序程度'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">calcShannonEnt</span><span class="params">(dataSet)</span>:</span></span><br><span class="line">    <span class="comment"># 需要对 list 中的大量计数时,可以直接使用Counter,不用新建字典来计数</span></span><br><span class="line">    label_count = Counter(data[<span class="number">-1</span>] <span class="keyword">for</span> data <span class="keyword">in</span> dataSet) <span class="comment"># 统计标签出现的次数</span></span><br><span class="line">    probs = [p[<span class="number">1</span>] / len(dataSet) <span class="keyword">for</span> p <span class="keyword">in</span> label_count.items()] <span class="comment"># 计算概率</span></span><br><span class="line">    shannonEnt = sum([-p * log(p, <span class="number">2</span>) <span class="keyword">for</span> p <span class="keyword">in</span> probs]) <span class="comment"># 计算香农熵</span></span><br><span class="line">    print(Decimal(shannonEnt).quantize(Decimal(<span class="string">'0.00000'</span>)))</span><br><span class="line">    <span class="keyword">return</span> shannonEnt</span><br></pre></td></tr></table></figure></p>
<p>调用运行如下：</p>
<pre><code># 2 计算数据集的熵
calcShannonEnt(dataset)
</code></pre><blockquote>
<p>按照给定的特征划分数据集</p>
</blockquote>
<p>我们根据信息熵度量出来的特征，进行数据集划分方法1如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''划分数据集:按照特征划分'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">splitDataSet</span><span class="params">(dataSet, index, value)</span>:</span></span><br><span class="line">    retDataSet = []</span><br><span class="line">    <span class="keyword">for</span> featVec <span class="keyword">in</span> dataSet:</span><br><span class="line">        <span class="keyword">if</span> featVec[index] == value:<span class="comment"># 判断index列的值是否为value</span></span><br><span class="line">            reducedFeatVec = featVec[:index] <span class="comment"># [:index]表示取前index个特征</span></span><br><span class="line">            reducedFeatVec.extend(featVec[index+<span class="number">1</span>:]) <span class="comment"># 取接下来的数据</span></span><br><span class="line">            retDataSet.append(reducedFeatVec)</span><br><span class="line">    print(retDataSet)</span><br><span class="line">    <span class="keyword">return</span> retDataSet</span><br><span class="line">```   </span><br><span class="line">我们根据信息熵度量出来的特征，进行数据集划分方法<span class="number">2</span>如下：</span><br><span class="line">```python   </span><br><span class="line"><span class="string">'''划分数据集:按照特征划分'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">splitDataSet</span><span class="params">(dataSet, index, value)</span>:</span></span><br><span class="line">    retDataSet = [data <span class="keyword">for</span> data <span class="keyword">in</span> dataSet <span class="keyword">for</span> i, v <span class="keyword">in</span> enumerate(data) <span class="keyword">if</span> i == index <span class="keyword">and</span> v == value]</span><br><span class="line">    print(retDataSet)</span><br><span class="line">    <span class="keyword">return</span> retDataSet</span><br></pre></td></tr></table></figure></p>
<p>指定特征的数据集划分方法调用</p>
<pre><code>#3 划分数据集
splitDataSet(dataset,0,1)
</code></pre><p>运行结果如下：</p>
<pre><code>[[1, 1, &#39;yes&#39;], [1, 1, &#39;yes&#39;], [1, 0, &#39;no&#39;]]
</code></pre><blockquote>
<p>选择最好的数据集划分方式</p>
</blockquote>
<p>选择最好的数据集划分方式：特征选择，划分数据集、计算最好的划分数据集特征，方法1如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="string">注意：一是数据集列表元素具备相同数据长度，二是最后一列是标签列</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">chooseBestFeatureToSplit</span><span class="params">(dataSet)</span>:</span></span><br><span class="line">    numFeatures = len(dataSet[<span class="number">0</span>]) - <span class="number">1</span> <span class="comment"># 特征总个数, 最后一列是标签</span></span><br><span class="line">    baseEntropy = calcShannonEnt(dataSet) <span class="comment"># 计算数据集的信息熵</span></span><br><span class="line">    bestInfoGain, bestFeature = <span class="number">0.0</span>, <span class="number">-1</span> <span class="comment"># 最优的信息增益值, 和最优的Featurn编号</span></span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(numFeatures):</span><br><span class="line">        featList = [example[i] <span class="keyword">for</span> example <span class="keyword">in</span> dataSet] <span class="comment"># 获取各实例第i+1个特征</span></span><br><span class="line">        uniqueVals = set(featList) <span class="comment"># 获取去重后的集合</span></span><br><span class="line">        newEntropy = <span class="number">0.0</span>  <span class="comment"># 创建一个新的信息熵</span></span><br><span class="line">        <span class="keyword">for</span> value <span class="keyword">in</span> uniqueVals:</span><br><span class="line">            subDataSet = splitDataSet(dataSet, i, value)</span><br><span class="line">            prob = len(subDataSet)/float(len(dataSet))</span><br><span class="line">            newEntropy += prob * calcShannonEnt(subDataSet)</span><br><span class="line">        <span class="comment"># 比较所有特征中的信息增益，返回最好特征划分的索引值。</span></span><br><span class="line">        infoGain = baseEntropy - newEntropy</span><br><span class="line">        print(<span class="string">'infoGain='</span>, infoGain, <span class="string">'bestFeature='</span>, i, baseEntropy, newEntropy)</span><br><span class="line">        <span class="keyword">if</span> (infoGain &gt; bestInfoGain):</span><br><span class="line">            bestInfoGain = infoGain</span><br><span class="line">            bestFeature = i</span><br><span class="line">    <span class="comment"># print(bestFeature)</span></span><br><span class="line">    <span class="keyword">return</span> bestFeature</span><br></pre></td></tr></table></figure></p>
<p>选择最好的数据集划分方式：特征选择，划分数据集、计算最好的划分数据集特征，方法2如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="string">注意：一是数据集列表元素具备相同数据长度，二是最后一列是标签列</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">chooseBestFeatureToSplit</span><span class="params">(dataSet)</span>:</span></span><br><span class="line">    base_entropy = calcShannonEnt(dataSet) <span class="comment"># 计算初始香农熵</span></span><br><span class="line">    best_info_gain = <span class="number">0</span></span><br><span class="line">    best_feature = <span class="number">-1</span></span><br><span class="line">    <span class="comment"># 遍历每一个特征</span></span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(len(dataSet[<span class="number">0</span>]) - <span class="number">1</span>):</span><br><span class="line">        <span class="comment"># 对当前特征进行统计</span></span><br><span class="line">        feature_count = Counter([data[i] <span class="keyword">for</span> data <span class="keyword">in</span> dataSet])</span><br><span class="line">        <span class="comment"># 计算分割后的香农熵</span></span><br><span class="line">        new_entropy = sum(feature[<span class="number">1</span>] / float(len(dataSet)) * calcShannonEnt(splitDataSet(dataSet, i, feature[<span class="number">0</span>])) <span class="keyword">for</span> feature <span class="keyword">in</span> feature_count.items())</span><br><span class="line">        <span class="comment"># 更新值</span></span><br><span class="line">        info_gain = base_entropy - new_entropy</span><br><span class="line">        <span class="comment"># print('No. &#123;0&#125; feature info gain is &#123;1:.3f&#125;'.format(i, info_gain))</span></span><br><span class="line">        <span class="keyword">if</span> info_gain &gt; best_info_gain:</span><br><span class="line">            best_info_gain = info_gain</span><br><span class="line">            best_feature = i</span><br><span class="line">    <span class="comment"># print(best_feature)</span></span><br><span class="line">    <span class="keyword">return</span> best_feature</span><br></pre></td></tr></table></figure></p>
<p>选择最好的数据集划分方法调用</p>
<pre><code># 4 选择最好的数据集划分方式
chooseBestFeatureToSplit(dataset))
</code></pre><p>运行结果如下：</p>
<pre><code>infoGain= 0.4199730940219749 bestFeature= 0 0.9709505944546686 0.5509775004326937
infoGain= 0.17095059445466854 bestFeature= 1 0.9709505944546686 0.8
选择：0
</code></pre><h2 id="训练算法：构造树的数据结构"><a href="#训练算法：构造树的数据结构" class="headerlink" title="训练算法：构造树的数据结构"></a>训练算法：构造树的数据结构</h2><p>创建树的函数代码如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''创建决策树'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">createTree</span><span class="params">(dataSet, labels)</span>:</span></span><br><span class="line">    classList = [example[<span class="number">-1</span>] <span class="keyword">for</span> example <span class="keyword">in</span> dataSet]</span><br><span class="line">    <span class="comment"># 如果数据集的最后一列的第一个值出现的次数=整个集合的数量，也就说只有一个类别，就只直接返回结果就行</span></span><br><span class="line">    <span class="comment"># 第一个停止条件：所有的类标签完全相同，则直接返回该类标签。</span></span><br><span class="line">    <span class="comment"># count() 函数是统计括号中的值在list中出现的次数</span></span><br><span class="line">    <span class="keyword">if</span> classList.count(classList[<span class="number">0</span>]) == len(classList):</span><br><span class="line">        <span class="keyword">return</span> classList[<span class="number">0</span>]</span><br><span class="line">    <span class="comment"># 如果数据集只有1列，那么最初出现label次数最多的一类，作为结果</span></span><br><span class="line">    <span class="comment"># 第二个停止条件：使用完了所有特征，仍然不能将数据集划分成仅包含唯一类别的分组。</span></span><br><span class="line">    <span class="keyword">if</span> len(dataSet[<span class="number">0</span>]) == <span class="number">1</span>:</span><br><span class="line">        <span class="keyword">return</span> majorityCnt(classList)</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 选择最优的列，得到最优列对应的label含义</span></span><br><span class="line">    bestFeat = chooseBestFeatureToSplit(dataSet)</span><br><span class="line">    <span class="comment"># 获取label的名称</span></span><br><span class="line">    bestFeatLabel = labels[bestFeat]</span><br><span class="line">    <span class="comment"># 初始化myTree</span></span><br><span class="line">    myTree = &#123;bestFeatLabel: &#123;&#125;&#125;</span><br><span class="line">    <span class="comment"># 所以这行代码导致函数外的同名变量被删除了元素，造成例句无法执行，提示'no surfacing' is not in list</span></span><br><span class="line">    <span class="comment"># del(labels[bestFeat])</span></span><br><span class="line">    <span class="comment"># 取出最优列，然后它的branch做分类</span></span><br><span class="line">    featValues = [example[bestFeat] <span class="keyword">for</span> example <span class="keyword">in</span> dataSet]</span><br><span class="line">    uniqueVals = set(featValues)</span><br><span class="line">    <span class="keyword">for</span> value <span class="keyword">in</span> uniqueVals:</span><br><span class="line">        <span class="comment"># 求出剩余的标签label</span></span><br><span class="line">        subLabels = labels[:]</span><br><span class="line">        <span class="comment"># 遍历当前选择特征包含的所有属性值，在每个数据集划分上递归调用函数createTree()</span></span><br><span class="line">        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)</span><br><span class="line">        <span class="comment"># print('myTree', value, myTree)</span></span><br><span class="line">    print(myTree)</span><br><span class="line">    <span class="keyword">return</span> myTree</span><br></pre></td></tr></table></figure></p>
<p>其中多数表决方法决定叶子节点的分类实现如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''多数表决方法决定叶子节点的分类：选择出现次数最多的一个结果'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">majorityCnt</span><span class="params">(classList)</span>:</span></span><br><span class="line">    <span class="comment"># -----------多数表决实现的方式一--------------</span></span><br><span class="line">    <span class="comment"># classCount = &#123;&#125;   # 标签字典，用于统计类别频率</span></span><br><span class="line">    <span class="comment"># for vote in classList: # classList标签的列表集合</span></span><br><span class="line">    <span class="comment">#     if vote not in classCount.keys():</span></span><br><span class="line">    <span class="comment">#         classCount[vote] = 0</span></span><br><span class="line">    <span class="comment">#     classCount[vote] += 1</span></span><br><span class="line">    <span class="comment"># # 取出结果（yes/no），即出现次数最多的结果</span></span><br><span class="line">    <span class="comment"># sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)</span></span><br><span class="line">    <span class="comment"># print('sortedClassCount:', sortedClassCount)</span></span><br><span class="line">    <span class="comment"># return sortedClassCount[0][0]</span></span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="comment"># -----------多数表决实现的方式二-----------------</span></span><br><span class="line">    major_label = Counter(classList).most_common(<span class="number">1</span>)[<span class="number">0</span>]</span><br><span class="line">    print(<span class="string">'sortedClassCount:'</span>, major_label[<span class="number">0</span>])</span><br><span class="line">    <span class="keyword">return</span> major_label[<span class="number">0</span>]</span><br></pre></td></tr></table></figure></p>
<p>调用方法：</p>
<pre><code># 6创建决策树
createTree(dataset, label)
</code></pre><p>运行结果：</p>
<pre><code>{&#39;no surfacing&#39;: {0: &#39;no&#39;, 1: {&#39;flippers&#39;: {0: &#39;no&#39;, 1: &#39;yes&#39;}}}}
</code></pre><p>结果分析：<br>此时，每次生成决策树数据都需要大量的计算，并且耗时，最好是每次直接调用生成结果。这里就需要使用Python模块pickle序列化对象，其存储决策树读取决策树代码实现如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''使用pickle模块存储决策树'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">storeTree</span><span class="params">(inputTree, filename)</span>:</span></span><br><span class="line">    <span class="keyword">import</span> pickle</span><br><span class="line">    <span class="comment"># -------------- 第一种方法 --------------</span></span><br><span class="line">    fw = open(filename, <span class="string">'wb'</span>)</span><br><span class="line">    pickle.dump(inputTree, fw)</span><br><span class="line">    fw.close()</span><br><span class="line"></span><br><span class="line">    <span class="comment"># -------------- 第二种方法 --------------</span></span><br><span class="line">    <span class="keyword">with</span> open(filename, <span class="string">'wb'</span>) <span class="keyword">as</span> fw:</span><br><span class="line">        pickle.dump(inputTree, fw)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">grabTree</span><span class="params">(filename)</span>:</span></span><br><span class="line">    <span class="keyword">import</span> pickle</span><br><span class="line">    fr = open(filename,<span class="string">'rb'</span>)</span><br><span class="line">    <span class="keyword">return</span> pickle.load(fr)</span><br></pre></td></tr></table></figure></p>
<h2 id="测试算法：使用决策树执行分类"><a href="#测试算法：使用决策树执行分类" class="headerlink" title="测试算法：使用决策树执行分类"></a>测试算法：使用决策树执行分类</h2><p>用决策树进行鱼类属于分类实现如下：<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''用决策树分类函数'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">classify</span><span class="params">(inputTree, featLabels, testVec)</span>:</span></span><br><span class="line">    firstStr = list(inputTree.keys())[<span class="number">0</span>] <span class="comment"># 获取tree的根节点对于的key值</span></span><br><span class="line">    secondDict = inputTree[firstStr]  <span class="comment"># 通过key得到根节点对应的value</span></span><br><span class="line">    <span class="comment"># 判断根节点名称获取根节点在label中的先后顺序，这样就知道输入的testVec怎么开始对照树来做分类</span></span><br><span class="line">    featIndex = featLabels.index(firstStr)</span><br><span class="line">    <span class="keyword">for</span> key <span class="keyword">in</span> secondDict.keys():</span><br><span class="line">        <span class="keyword">if</span> testVec[featIndex] == key:</span><br><span class="line">            <span class="keyword">if</span> type(secondDict[key]).__name__ == <span class="string">'dict'</span>:</span><br><span class="line">                classLabel = classify(secondDict[key], featLabels, testVec)</span><br><span class="line">            <span class="keyword">else</span>:</span><br><span class="line">                classLabel = secondDict[key]</span><br><span class="line">    print(classLabel)</span><br><span class="line">    <span class="keyword">return</span> classLabel</span><br></pre></td></tr></table></figure></p>
<p>调用方法：</p>
<pre><code># 7 用决策树分类函数
myTree = treePlotter.retrieveTree(0)
# print(myTree)
classify(myTree,label,[1,0])
</code></pre><p>运行结果：</p>
<pre><code>分类结果：no surfacing
</code></pre><h2 id="决策树分类器实现"><a href="#决策树分类器实现" class="headerlink" title="决策树分类器实现"></a>决策树分类器实现</h2><p>使用算法此步骤可以适用于任何监督学习任务，而使用决策树可以更好地理解数据的内在含义。<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''决策树判断是否是鱼'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">fishTest</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># 1.创建数据和结果标签</span></span><br><span class="line">    myDat, labels = createDataSet()</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 计算label分类标签的香农熵</span></span><br><span class="line">    calcShannonEnt(myDat)</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 求第0列 为 1/0的列的数据集【排除第0列】</span></span><br><span class="line">    print(<span class="string">'1---'</span>, splitDataSet(myDat, <span class="number">0</span>, <span class="number">1</span>))</span><br><span class="line">    print(<span class="string">'0---'</span>, splitDataSet(myDat, <span class="number">0</span>, <span class="number">0</span>))</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 计算最好的信息增益的列</span></span><br><span class="line">    print(chooseBestFeatureToSplit(myDat))</span><br><span class="line"></span><br><span class="line">    <span class="keyword">import</span> copy</span><br><span class="line">    myTree = createTree(myDat, copy.deepcopy(labels))</span><br><span class="line">    print(myTree)</span><br><span class="line">    <span class="comment"># [1, 1]表示要取的分支上的节点位置，对应的结果值</span></span><br><span class="line">    print(classify(myTree, labels, [<span class="number">1</span>, <span class="number">1</span>]))</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 画图可视化展现</span></span><br><span class="line">    treePlotter.createPlot(myTree)</span><br></pre></td></tr></table></figure></p>
<p>调用决策树分类方法：</p>
<pre><code># 9 决策树判断是否是鱼
fishTest()
</code></pre><p>运行结果如下：</p>
<pre><code>1--- [[1, 1, &#39;yes&#39;], [1, 1, &#39;yes&#39;], [1, 0, &#39;no&#39;]]
0--- [[0, 1, &#39;no&#39;], [0, 1, &#39;no&#39;]]
{&#39;no surfacing&#39;: {0: &#39;no&#39;, 1: {&#39;flippers&#39;: {0: &#39;no&#39;, 1: &#39;yes&#39;}}}}
yes
</code></pre><blockquote>
<p>可视化结果</p>
</blockquote>
<p><img src="https://i.imgur.com/ycAXsNF.png" alt=""></p>
<h1 id="决策树实际应用-预测隐形眼镜的测试代码"><a href="#决策树实际应用-预测隐形眼镜的测试代码" class="headerlink" title="决策树实际应用:预测隐形眼镜的测试代码"></a>决策树实际应用:预测隐形眼镜的测试代码</h1><h2 id="项目概述"><a href="#项目概述" class="headerlink" title="项目概述"></a>项目概述</h2><p>隐形眼镜类型包括硬材质、软材质以及不适合佩戴隐形眼镜。我们需要使用决策树预测患者需要佩戴的隐形眼镜类型。</p>
<h2 id="开发流程"><a href="#开发流程" class="headerlink" title="开发流程"></a>开发流程</h2><pre><code>收集数据: 提供的文本文件。
解析数据: 解析 tab 键分隔的数据行
分析数据: 快速检查数据，确保正确地解析数据内容，使用 createPlot() 函数绘制最终的树形图。
训练算法: 使用 createTree() 函数。
测试算法: 编写测试函数验证决策树可以正确分类给定的数据实例。
使用算法: 存储树的数据结构，以便下次使用时无需重新构造树。
收集数据：提供的文本文件
</code></pre><h2 id="数据读取"><a href="#数据读取" class="headerlink" title="数据读取"></a>数据读取</h2><blockquote>
<p>文本文件数据格式如下：</p>
</blockquote>
<pre><code>young    myope    no    reduced    no lenses
young    myope    no    normal    soft
young    myope    yes    reduced    no lenses
young    myope    yes    normal    hard
young    hyper    no    reduced    no lenses
young    hyper    no    normal    soft
young    hyper    yes    reduced    no lenses
young    hyper    yes    normal    hard
pre    myope    no    reduced    no lenses
pre    myope    no    normal    soft
pre    myope    yes    reduced    no lenses
pre    myope    yes    normal    hard
pre    hyper    no    reduced    no lenses
pre    hyper    no    normal    soft
pre    hyper    yes    reduced    no lenses
pre    hyper    yes    normal    no lenses
presbyopic    myope    no    reduced    no lenses
presbyopic    myope    no    normal    no lenses
presbyopic    myope    yes    reduced    no lenses
presbyopic    myope    yes    normal    hard
presbyopic    hyper    no    reduced    no lenses
presbyopic    hyper    no    normal    soft
presbyopic    hyper    yes    reduced    no lenses
presbyopic    hyper    yes    normal    no lenses
</code></pre><blockquote>
<p>代码实现: 编写测试函数验证决策树可以正确分类给定的数据实例。<br><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="string">'''预测隐形眼镜的测试代码'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">ContactLensesTest</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># 加载隐形眼镜相关的 文本文件 数据</span></span><br><span class="line">    fr = open(<span class="string">'lenses.txt'</span>)</span><br><span class="line">    <span class="comment"># 解析数据，获得 features 数据</span></span><br><span class="line">    lenses = [inst.strip().split(<span class="string">'    '</span>) <span class="keyword">for</span> inst <span class="keyword">in</span> fr.readlines()]</span><br><span class="line">    <span class="comment"># 得到数据的对应的 Labels</span></span><br><span class="line">    lensesLabels = [<span class="string">'age'</span>, <span class="string">'prescript'</span>, <span class="string">'astigmatic'</span>, <span class="string">'tearRate'</span>]</span><br><span class="line">    <span class="comment"># 使用上面的创建决策树的代码，构造预测隐形眼镜的决策树</span></span><br><span class="line">    lensesTree = createTree(lenses, lensesLabels)</span><br><span class="line">    print(lensesTree)</span><br><span class="line">    <span class="comment"># 画图可视化展现</span></span><br><span class="line">    treePlotter.createPlot(lensesTree)</span><br></pre></td></tr></table></figure></p>
</blockquote>
<h2 id="运行结果"><a href="#运行结果" class="headerlink" title="运行结果"></a>运行结果</h2><blockquote>
<p>调用方法</p>
</blockquote>
<pre><code># 10 预测隐形眼镜类型
ContactLensesTest()
</code></pre><blockquote>
<p>运行结果</p>
</blockquote>
<pre><code>{&#39;tearRate&#39;: {&#39;reduced&#39;: &#39;no lenses&#39;, &#39;normal&#39;: {&#39;astigmatic&#39;: {&#39;no&#39;: {&#39;age&#39;: {&#39;young&#39;: &#39;soft&#39;, &#39;pre&#39;: &#39;soft&#39;, &#39;presbyopic&#39;: {&#39;prescript&#39;: {&#39;myope&#39;: &#39;no lenses&#39;, &#39;hyper&#39;: &#39;soft&#39;}}}}, &#39;yes&#39;: {&#39;prescript&#39;: {&#39;myope&#39;: &#39;hard&#39;, &#39;hyper&#39;: {&#39;age&#39;: {&#39;young&#39;: &#39;hard&#39;, &#39;pre&#39;: &#39;no lenses&#39;, &#39;presbyopic&#39;: &#39;no lenses&#39;}}}}}}}}
</code></pre><blockquote>
<p>决策树可视化</p>
</blockquote>
<p><img src="https://i.imgur.com/xyMYqWt.png" alt=""></p>
<h1 id="完整代码下载"><a href="#完整代码下载" class="headerlink" title="完整代码下载"></a>完整代码下载</h1><blockquote>
<p>源码请进QQ群文件下载：</p>
</blockquote>
<p><img src="https://i.imgur.com/A283sAm.png" alt=""></p>
<h1 id="作者声明"><a href="#作者声明" class="headerlink" title="作者声明"></a>作者声明</h1><blockquote>
<p>本文版权归作者所有，旨在技术交流使用。未经作者同意禁止转载，转载后需在文章页面明显位置给出原文连接，否则相关责任自行承担。</p>
</blockquote>

      
    </div>

    

    
    
    

    
      <div>
        <div id="wechat_subscriber" style="display: block; padding: 10px 0; margin: 20px auto; width: 100%; text-align: center">
    <img id="wechat_subscriber_qcode" src="/uploads/wechat.png" alt="白宁超 wechat" style="width: 200px; max-width: 100%;"/>
    <div>扫一扫关注微信公众号，机器学习和自然语言处理，订阅号datathinks！</div>
</div>

      </div>
    

    
      <div>
        <div style="padding: 10px 0; margin: 20px auto; width: 90%; text-align: center;">
  <div></div>
  <button id="rewardButton" disable="enable" onclick="var qr = document.getElementById('QR'); if (qr.style.display === 'none') {qr.style.display='block';} else {qr.style.display='none'}">
    <span>打赏</span>
  </button>
  <div id="QR" style="display: none;">

    
      <div id="wechat" style="display: inline-block">
        <img id="wechat_qr" src="/images/wechatpay.jpg" alt="白宁超 微信支付"/>
        <p>微信支付</p>
      </div>
    

    
      <div id="alipay" style="display: inline-block">
        <img id="alipay_qr" src="/images/alipay.jpg" alt="白宁超 支付宝"/>
        <p>支付宝</p>
      </div>
    

    

  </div>
</div>

      </div>
    

    

    <footer class="post-footer">
      
        <div class="post-tags">
          
            <a href="/tags/Python/" rel="tag"><i class="fa fa-tag"></i> Python</a>
          
            <a href="/tags/机器学习算法/" rel="tag"><i class="fa fa-tag"></i> 机器学习算法</a>
          
            <a href="/tags/sklean/" rel="tag"><i class="fa fa-tag"></i> sklean</a>
          
            <a href="/tags/决策树/" rel="tag"><i class="fa fa-tag"></i> 决策树</a>
          
            <a href="/tags/文本聚类/" rel="tag"><i class="fa fa-tag"></i> 文本聚类</a>
          
        </div>
      

      
      
        <div class="post-widgets">
        

        

        
          
          <div class="social_share">
            
               <div>
                 
  <div class="bdsharebuttonbox">
    <a href="#" class="bds_tsina" data-cmd="tsina" title="分享到新浪微博"></a>
    <a href="#" class="bds_douban" data-cmd="douban" title="分享到豆瓣网"></a>
    <a href="#" class="bds_sqq" data-cmd="sqq" title="分享到QQ好友"></a>
    <a href="#" class="bds_qzone" data-cmd="qzone" title="分享到QQ空间"></a>
    <a href="#" class="bds_weixin" data-cmd="weixin" title="分享到微信"></a>
    <a href="#" class="bds_tieba" data-cmd="tieba" title="分享到百度贴吧"></a>
    <a href="#" class="bds_twi" data-cmd="twi" title="分享到Twitter"></a>
    <a href="#" class="bds_fbook" data-cmd="fbook" title="分享到Facebook"></a>
    <a href="#" class="bds_more" data-cmd="more"></a>
    <a class="bds_count" data-cmd="count"></a>
  </div>
  <script>
    window._bd_share_config = {
      "common": {
        "bdText": "",
        "bdMini": "2",
        "bdMiniList": false,
        "bdPic": ""
      },
      "share": {
        "bdSize": "16",
        "bdStyle": "0"
      },
      "image": {
        "viewList": ["tsina", "douban", "sqq", "qzone", "weixin", "twi", "fbook"],
        "viewText": "分享到：",
        "viewSize": "16"
      }
    }
  </script>

<script>
  with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='/static/api/js/share.js?cdnversion='+~(-new Date()/36e5)];
</script>

               </div>
            
            
               <div id="needsharebutton-postbottom">
                 <span class="btn">
                    <i class="fa fa-share-alt" aria-hidden="true"></i>
                 </span>
               </div>
            
          </div>
        
        </div>
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="/2018/09/19/一步步教你轻松学KNN模型算法/" rel="next" title="一步步教你轻松学KNN模型算法">
                <i class="fa fa-chevron-left"></i> 一步步教你轻松学KNN模型算法
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="/2018/09/19/数据处理：量身打造自定义文件格式转换/" rel="prev" title="如何自定义文件格式转换">
                如何自定义文件格式转换 <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>


  </div>


          </div>
          

  
    <div class="comments" id="comments">
      <div id="lv-container" data-id="city" data-uid="MTAyMC8zOTc5NC8xNjMyMQ=="></div>
    </div>

  
 





        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap">
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview-wrap">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview-wrap sidebar-panel">
        <div class="site-overview">
          <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
            
              <img class="site-author-image" itemprop="image"
                src="/../images/header.png"
                alt="白宁超" />
            
              <p class="site-author-name" itemprop="name">白宁超</p>
              <p class="site-description motion-element" itemprop="description">本站主要研究深度学习、机器学习、自然语言处理等前沿技术。ML&NLP交流群：436303759 <span><a target="_blank" href="http://shang.qq.com/wpa/qunwpa?idkey=ef3bbb679b06ac59b136c57ba9e7935ff9d3b10faeabde6e4efcafe523bbbf4d"><img border="0" src="http://pub.idqqimg.com/wpa/images/group.png" alt="自然语言处理和机器学习技术QQ交流：436303759 " title="自然语言处理和机器学习技术交流"></a></span></p>
          </div>

          
            <nav class="site-state motion-element">
              
                <div class="site-state-item site-state-posts">
                
                  <a href="/archives">
                
                    <span class="site-state-item-count">65</span>
                    <span class="site-state-item-name">日志</span>
                  </a>
                </div>
              

              
                
                
                <div class="site-state-item site-state-categories">
                  <a href="/categories/index.html">
                    
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                    <span class="site-state-item-count">29</span>
                    <span class="site-state-item-name">分类</span>
                  </a>
                </div>
              

              
                
                
                <div class="site-state-item site-state-tags">
                  <a href="/tags/index.html">
                    
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                      
                    
                    <span class="site-state-item-count">119</span>
                    <span class="site-state-item-name">标签</span>
                  </a>
                </div>
              
            </nav>
          

          
            <div class="feed-link motion-element">
              <a href="/atom.xml" rel="alternate">
                <i class="fa fa-rss"></i>
                RSS
              </a>
            </div>
          

          
            <div class="links-of-author motion-element">
              
                <span class="links-of-author-item">
                  <a href="https://github.com/bainingchao" target="_blank" title="GitHub" rel="external nofollow"><i class="fa fa-fw fa-github"></i>GitHub</a>
                  
                </span>
              
                <span class="links-of-author-item">
                  <a href="https://www.google.com.hk/" target="_blank" title="Google" rel="external nofollow"><i class="fa fa-fw fa-google"></i>Google</a>
                  
                </span>
              
                <span class="links-of-author-item">
                  <a href="https://www.baidu.com/" target="_blank" title="百度" rel="external nofollow"><i class="fa fa-fw fa-globe"></i>百度</a>
                  
                </span>
              
                <span class="links-of-author-item">
                  <a href="https://weibo.com/p/1005056002073632?is_all=1" target="_blank" title="微博" rel="external nofollow"><i class="fa fa-fw fa-weibo"></i>微博</a>
                  
                </span>
              
                <span class="links-of-author-item">
                  <a href="http://www.cnblogs.com/baiboy/" target="_blank" title="博客园" rel="external nofollow"><i class="fa fa-fw fa-globe"></i>博客园</a>
                  
                </span>
              
                <span class="links-of-author-item">
                  <a href="https://mp.weixin.qq.com/s/s97I4gtEJIt5rMivWMkPkQ" target="_blank" title="微信公众号" rel="external nofollow"><i class="fa fa-fw fa-weixin"></i>微信公众号</a>
                  
                </span>
              
            </div>
          

          
          

          
          

          
            
          
          

        </div>
      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#理论介绍"><span class="nav-number">1.</span> <span class="nav-text">理论介绍</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#什么是决策树"><span class="nav-number">1.1.</span> <span class="nav-text">什么是决策树</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#什么是信息熵和信息增益"><span class="nav-number">1.2.</span> <span class="nav-text">什么是信息熵和信息增益</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#决策树工作原理"><span class="nav-number">1.3.</span> <span class="nav-text">决策树工作原理</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#决策树算法流程"><span class="nav-number">1.4.</span> <span class="nav-text">决策树算法流程</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#决策树优缺点"><span class="nav-number">1.5.</span> <span class="nav-text">决策树优缺点</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#案例描述：加深决策树理解"><span class="nav-number">2.</span> <span class="nav-text">案例描述：加深决策树理解</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#案例描述"><span class="nav-number">2.1.</span> <span class="nav-text">案例描述</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#数据采集"><span class="nav-number">2.2.</span> <span class="nav-text">数据采集</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#构建决策树"><span class="nav-number">2.3.</span> <span class="nav-text">构建决策树</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#结果分析"><span class="nav-number">2.4.</span> <span class="nav-text">结果分析</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#决策树算法实现与分析"><span class="nav-number">3.</span> <span class="nav-text">决策树算法实现与分析</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#案例-判定鱼类和非鱼类"><span class="nav-number">3.1.</span> <span class="nav-text">案例: 判定鱼类和非鱼类</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#分析数据"><span class="nav-number">3.2.</span> <span class="nav-text">分析数据</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#训练算法：构造树的数据结构"><span class="nav-number">3.3.</span> <span class="nav-text">训练算法：构造树的数据结构</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#测试算法：使用决策树执行分类"><span class="nav-number">3.4.</span> <span class="nav-text">测试算法：使用决策树执行分类</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#决策树分类器实现"><span class="nav-number">3.5.</span> <span class="nav-text">决策树分类器实现</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#决策树实际应用-预测隐形眼镜的测试代码"><span class="nav-number">4.</span> <span class="nav-text">决策树实际应用:预测隐形眼镜的测试代码</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#项目概述"><span class="nav-number">4.1.</span> <span class="nav-text">项目概述</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#开发流程"><span class="nav-number">4.2.</span> <span class="nav-text">开发流程</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#数据读取"><span class="nav-number">4.3.</span> <span class="nav-text">数据读取</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#运行结果"><span class="nav-number">4.4.</span> <span class="nav-text">运行结果</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#完整代码下载"><span class="nav-number">5.</span> <span class="nav-text">完整代码下载</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#作者声明"><span class="nav-number">6.</span> <span class="nav-text">作者声明</span></a></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <script async src="https://dn-lbstatics.qbox.me/busuanzi/2.3/busuanzi.pure.mini.js">
</script>

<div class="copyright">&copy; <span itemprop="copyrightYear">2019</span>
  <span class="with-love" id="animate">
    <i class="fa fa-user"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">白宁超</span>

  

  
</div>




  



  <!--<div class="powered-by">由 <a class="theme-link" target="_blank" rel="external nofollow" href="https://hexo.io">Hexo</a> 强力驱动 v3.7.1</div> -->



   <!--<span class="post-meta-divider">|</span>-->



   <!--<div class="theme-info">主题 – <a class="theme-link" target="_blank" rel="external nofollow" href="https://theme-next.org">NexT.Gemini</a> v6.4.1</div>-->




        <script async src="//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script>



<div class="busuanzi-count">
  
    <span class="site-uv" title="总访客量">
      <i class="fa fa-user"></i>
      <span class="busuanzi-value" id="busuanzi_value_site_uv"></span>
    </span>
  

  
    <span class="site-pv" title="总访问量">
      <i class="fa fa-eye"></i>
      <span class="busuanzi-value" id="busuanzi_value_site_pv"></span>
    </span>
  
</div>









        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

    
	
    

    
  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>


























  
  
    <script type="text/javascript" src="/lib/jquery/index.js?v=2.1.3"></script>
  

  
  
    <script type="text/javascript" src="/lib/velocity/velocity.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>
  


  


  <script type="text/javascript" src="/js/src/utils.js?v=6.4.1"></script>

  <script type="text/javascript" src="/js/src/motion.js?v=6.4.1"></script>



  
  


  <script type="text/javascript" src="/js/src/affix.js?v=6.4.1"></script>

  <script type="text/javascript" src="/js/src/schemes/pisces.js?v=6.4.1"></script>



  
  <script type="text/javascript" src="/js/src/scrollspy.js?v=6.4.1"></script>
<script type="text/javascript" src="/js/src/post-details.js?v=6.4.1"></script>



  


  <script type="text/javascript" src="/js/src/bootstrap.js?v=6.4.1"></script>



  



  
    <script type="text/javascript">
      window.livereOptions = {
        refer: '2018/09/19/一步步教你轻松学决策树算法/'
      };
      (function(d, s) {
        var j, e = d.getElementsByTagName(s)[0];
        if (typeof LivereTower === 'function') { return; }
        j = d.createElement(s);
        j.src = 'https://cdn-city.livere.com/js/embed.dist.js';
        j.async = true;
        e.parentNode.insertBefore(j, e);
      })(document, 'script');
    </script>
  










  

  <script type="text/javascript">
    // Popup Window;
    var isfetched = false;
    var isXml = true;
    // Search DB path;
    var search_path = "search.xml";
    if (search_path.length === 0) {
      search_path = "search.xml";
    } else if (/json$/i.test(search_path)) {
      isXml = false;
    }
    var path = "/" + search_path;
    // monitor main search box;

    var onPopupClose = function (e) {
      $('.popup').hide();
      $('#local-search-input').val('');
      $('.search-result-list').remove();
      $('#no-result').remove();
      $(".local-search-pop-overlay").remove();
      $('body').css('overflow', '');
    }

    function proceedsearch() {
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay"></div>')
        .css('overflow', 'hidden');
      $('.search-popup-overlay').click(onPopupClose);
      $('.popup').toggle();
      var $localSearchInput = $('#local-search-input');
      $localSearchInput.attr("autocapitalize", "none");
      $localSearchInput.attr("autocorrect", "off");
      $localSearchInput.focus();
    }

    // search function;
    var searchFunc = function(path, search_id, content_id) {
      'use strict';

      // start loading animation
      $("body")
        .append('<div class="search-popup-overlay local-search-pop-overlay">' +
          '<div id="search-loading-icon">' +
          '<i class="fa fa-spinner fa-pulse fa-5x fa-fw"></i>' +
          '</div>' +
          '</div>')
        .css('overflow', 'hidden');
      $("#search-loading-icon").css('margin', '20% auto 0 auto').css('text-align', 'center');

      

      $.ajax({
        url: path,
        dataType: isXml ? "xml" : "json",
        async: true,
        success: function(res) {
          // get the contents from search data
          isfetched = true;
          $('.popup').detach().appendTo('.header-inner');
          var datas = isXml ? $("entry", res).map(function() {
            return {
              title: $("title", this).text(),
              content: $("content",this).text(),
              url: $("url" , this).text()
            };
          }).get() : res;
          var input = document.getElementById(search_id);
          var resultContent = document.getElementById(content_id);
          var inputEventFunction = function() {
            var searchText = input.value.trim().toLowerCase();
            var keywords = searchText.split(/[\s\-]+/);
            if (keywords.length > 1) {
              keywords.push(searchText);
            }
            var resultItems = [];
            if (searchText.length > 0) {
              // perform local searching
              datas.forEach(function(data) {
                var isMatch = false;
                var hitCount = 0;
                var searchTextCount = 0;
                var title = data.title.trim();
                var titleInLowerCase = title.toLowerCase();
                var content = data.content.trim().replace(/<[^>]+>/g,"");
                
                var contentInLowerCase = content.toLowerCase();
                var articleUrl = decodeURIComponent(data.url);
                var indexOfTitle = [];
                var indexOfContent = [];
                // only match articles with not empty titles
                if(title != '') {
                  keywords.forEach(function(keyword) {
                    function getIndexByWord(word, text, caseSensitive) {
                      var wordLen = word.length;
                      if (wordLen === 0) {
                        return [];
                      }
                      var startPosition = 0, position = [], index = [];
                      if (!caseSensitive) {
                        text = text.toLowerCase();
                        word = word.toLowerCase();
                      }
                      while ((position = text.indexOf(word, startPosition)) > -1) {
                        index.push({position: position, word: word});
                        startPosition = position + wordLen;
                      }
                      return index;
                    }

                    indexOfTitle = indexOfTitle.concat(getIndexByWord(keyword, titleInLowerCase, false));
                    indexOfContent = indexOfContent.concat(getIndexByWord(keyword, contentInLowerCase, false));
                  });
                  if (indexOfTitle.length > 0 || indexOfContent.length > 0) {
                    isMatch = true;
                    hitCount = indexOfTitle.length + indexOfContent.length;
                  }
                }

                // show search results

                if (isMatch) {
                  // sort index by position of keyword

                  [indexOfTitle, indexOfContent].forEach(function (index) {
                    index.sort(function (itemLeft, itemRight) {
                      if (itemRight.position !== itemLeft.position) {
                        return itemRight.position - itemLeft.position;
                      } else {
                        return itemLeft.word.length - itemRight.word.length;
                      }
                    });
                  });

                  // merge hits into slices

                  function mergeIntoSlice(text, start, end, index) {
                    var item = index[index.length - 1];
                    var position = item.position;
                    var word = item.word;
                    var hits = [];
                    var searchTextCountInSlice = 0;
                    while (position + word.length <= end && index.length != 0) {
                      if (word === searchText) {
                        searchTextCountInSlice++;
                      }
                      hits.push({position: position, length: word.length});
                      var wordEnd = position + word.length;

                      // move to next position of hit

                      index.pop();
                      while (index.length != 0) {
                        item = index[index.length - 1];
                        position = item.position;
                        word = item.word;
                        if (wordEnd > position) {
                          index.pop();
                        } else {
                          break;
                        }
                      }
                    }
                    searchTextCount += searchTextCountInSlice;
                    return {
                      hits: hits,
                      start: start,
                      end: end,
                      searchTextCount: searchTextCountInSlice
                    };
                  }

                  var slicesOfTitle = [];
                  if (indexOfTitle.length != 0) {
                    slicesOfTitle.push(mergeIntoSlice(title, 0, title.length, indexOfTitle));
                  }

                  var slicesOfContent = [];
                  while (indexOfContent.length != 0) {
                    var item = indexOfContent[indexOfContent.length - 1];
                    var position = item.position;
                    var word = item.word;
                    // cut out 100 characters
                    var start = position - 20;
                    var end = position + 80;
                    if(start < 0){
                      start = 0;
                    }
                    if (end < position + word.length) {
                      end = position + word.length;
                    }
                    if(end > content.length){
                      end = content.length;
                    }
                    slicesOfContent.push(mergeIntoSlice(content, start, end, indexOfContent));
                  }

                  // sort slices in content by search text's count and hits' count

                  slicesOfContent.sort(function (sliceLeft, sliceRight) {
                    if (sliceLeft.searchTextCount !== sliceRight.searchTextCount) {
                      return sliceRight.searchTextCount - sliceLeft.searchTextCount;
                    } else if (sliceLeft.hits.length !== sliceRight.hits.length) {
                      return sliceRight.hits.length - sliceLeft.hits.length;
                    } else {
                      return sliceLeft.start - sliceRight.start;
                    }
                  });

                  // select top N slices in content

                  var upperBound = parseInt('1');
                  if (upperBound >= 0) {
                    slicesOfContent = slicesOfContent.slice(0, upperBound);
                  }

                  // highlight title and content

                  function highlightKeyword(text, slice) {
                    var result = '';
                    var prevEnd = slice.start;
                    slice.hits.forEach(function (hit) {
                      result += text.substring(prevEnd, hit.position);
                      var end = hit.position + hit.length;
                      result += '<b class="search-keyword">' + text.substring(hit.position, end) + '</b>';
                      prevEnd = end;
                    });
                    result += text.substring(prevEnd, slice.end);
                    return result;
                  }

                  var resultItem = '';

                  if (slicesOfTitle.length != 0) {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + highlightKeyword(title, slicesOfTitle[0]) + "</a>";
                  } else {
                    resultItem += "<li><a href='" + articleUrl + "' class='search-result-title'>" + title + "</a>";
                  }

                  slicesOfContent.forEach(function (slice) {
                    resultItem += "<a href='" + articleUrl + "'>" +
                      "<p class=\"search-result\">" + highlightKeyword(content, slice) +
                      "...</p>" + "</a>";
                  });

                  resultItem += "</li>";
                  resultItems.push({
                    item: resultItem,
                    searchTextCount: searchTextCount,
                    hitCount: hitCount,
                    id: resultItems.length
                  });
                }
              })
            };
            if (keywords.length === 1 && keywords[0] === "") {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-search fa-5x" /></div>'
            } else if (resultItems.length === 0) {
              resultContent.innerHTML = '<div id="no-result"><i class="fa fa-frown-o fa-5x" /></div>'
            } else {
              resultItems.sort(function (resultLeft, resultRight) {
                if (resultLeft.searchTextCount !== resultRight.searchTextCount) {
                  return resultRight.searchTextCount - resultLeft.searchTextCount;
                } else if (resultLeft.hitCount !== resultRight.hitCount) {
                  return resultRight.hitCount - resultLeft.hitCount;
                } else {
                  return resultRight.id - resultLeft.id;
                }
              });
              var searchResultList = '<ul class=\"search-result-list\">';
              resultItems.forEach(function (result) {
                searchResultList += result.item;
              })
              searchResultList += "</ul>";
              resultContent.innerHTML = searchResultList;
            }
          }

          if ('auto' === 'auto') {
            input.addEventListener('input', inputEventFunction);
          } else {
            $('.search-icon').click(inputEventFunction);
            input.addEventListener('keypress', function (event) {
              if (event.keyCode === 13) {
                inputEventFunction();
              }
            });
          }

          // remove loading animation
          $(".local-search-pop-overlay").remove();
          $('body').css('overflow', '');

          proceedsearch();
        }
      });
    }

    // handle and trigger popup window;
    $('.popup-trigger').click(function(e) {
      e.stopPropagation();
      if (isfetched === false) {
        searchFunc(path, 'local-search-input', 'local-search-result');
      } else {
        proceedsearch();
      };
    });

    $('.popup-btn-close').click(onPopupClose);
    $('.popup').click(function(e){
      e.stopPropagation();
    });
    $(document).on('keyup', function (event) {
      var shouldDismissSearchPopup = event.which === 27 &&
        $('.search-popup').is(':visible');
      if (shouldDismissSearchPopup) {
        onPopupClose();
      }
    });
  </script>





  

  

  
<script>
(function(){
    var bp = document.createElement('script');
    var curProtocol = window.location.protocol.split(':')[0];
    if (curProtocol === 'https') {
        bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';        
    }
    else {
        bp.src = 'http://push.zhanzhang.baidu.com/push.js';
    }
    var s = document.getElementsByTagName("script")[0];
    s.parentNode.insertBefore(bp, s);
})();
</script>


  
  

  
  

  
    
      <script type="text/x-mathjax-config">
    MathJax.Hub.Config({
      tex2jax: {
        inlineMath: [ ['$','$'], ["\\(","\\)"]  ],
        processEscapes: true,
        skipTags: ['script', 'noscript', 'style', 'textarea', 'pre', 'code']
      },
      TeX: {equationNumbers: { autoNumber: "AMS" }}
    });
</script>

<script type="text/x-mathjax-config">
    MathJax.Hub.Queue(function() {
      var all = MathJax.Hub.getAllJax(), i;
        for (i=0; i < all.length; i += 1) {
          all[i].SourceElement().parentNode.className += ' has-jax';
        }
    });
</script>
<script type="text/javascript" src="//cdn.jsdelivr.net/npm/mathjax@2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>

    
  


  
  
  
  <script src="/lib/needsharebutton/needsharebutton.js"></script>

  <script>
    
      pbOptions = {};
      
          pbOptions.iconStyle = "box";
      
          pbOptions.boxForm = "horizontal";
      
          pbOptions.position = "bottomCenter";
      
          pbOptions.networks = "Weibo,Wechat,Douban,QQZone,Linkedin,Facebook";
      
      new needShareButton('#needsharebutton-postbottom', pbOptions);
    
    
  </script>

  

  

  

  

  

  

  <!-- 页面点击小红心 -->
	<script type="text/javascript" src="../js/src/love.js"></script><!-- hexo-inject:begin --><!-- Begin: Injected MathJax -->
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({"tex2jax":{"inlineMath":[["$","$"],["\\(","\\)"]],"skipTags":["script","noscript","style","textarea","pre","code"],"processEscapes":true},"TeX":{"equationNumbers":{"autoNumber":"AMS"}}});
</script>

<script type="text/x-mathjax-config">
  MathJax.Hub.Queue(function() {
    var all = MathJax.Hub.getAllJax(), i;
    for(i=0; i < all.length; i += 1) {
      all[i].SourceElement().parentNode.className += ' has-jax';
    }
  });
</script>

<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js">
</script>
<!-- End: Injected MathJax -->
<!-- hexo-inject:end -->
</body>
</html>
