<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-Hans">
<head><meta name="generator" content="Hexo 3.9.0">
  <meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta name="theme-color" content="#222">



  
  
    
    
  <script src="/dxl/lib/pace/pace.min.js?v=1.0.2"></script>
  <link href="/dxl/lib/pace/pace-theme-minimal.min.css?v=1.0.2" rel="stylesheet">







<meta http-equiv="Cache-Control" content="no-transform">
<meta http-equiv="Cache-Control" content="no-siteapp">
















  
  
  <link href="/dxl/lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css">







<link href="/dxl/lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css">

<link href="/dxl/css/main.css?v=5.1.4" rel="stylesheet" type="text/css">


  <link rel="apple-touch-icon" sizes="180x180" href="/dxl/images/apple-touch-icon-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="32x32" href="/dxl/images/favicon-32x32-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="16x16" href="/dxl/images/favicon-16x16-next.png?v=5.1.4">


  <link rel="mask-icon" href="/dxl/images/logo.svg?v=5.1.4" color="#222">





  <meta name="keywords" content="爬虫,">










<meta name="description" content="一、数据分析1.1数据解析的作用​    用于获取页面中局部的页面源码数据 1.2实现数据解析的方法1234正则bs4（独有）xpath（最为通用）pyquery（自学）  1.3数据解析的通用原理121、标签定位2、将标签中间存储的文本数据或者其属性值进行捕获  二、正则解析123456789101112131415161718192021222324252627282930import osi">
<meta name="keywords" content="爬虫">
<meta property="og:type" content="article">
<meta property="og:title" content="爬虫数据解析">
<meta property="og:url" content="http://yoursite.com/2019/09/20/【爬虫03】03数据解析/index.html">
<meta property="og:site_name" content="我的快乐时光">
<meta property="og:description" content="一、数据分析1.1数据解析的作用​    用于获取页面中局部的页面源码数据 1.2实现数据解析的方法1234正则bs4（独有）xpath（最为通用）pyquery（自学）  1.3数据解析的通用原理121、标签定位2、将标签中间存储的文本数据或者其属性值进行捕获  二、正则解析123456789101112131415161718192021222324252627282930import osi">
<meta property="og:locale" content="zh-Hans">
<meta property="og:updated_time" content="2019-09-20T12:27:56.506Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="爬虫数据解析">
<meta name="twitter:description" content="一、数据分析1.1数据解析的作用​    用于获取页面中局部的页面源码数据 1.2实现数据解析的方法1234正则bs4（独有）xpath（最为通用）pyquery（自学）  1.3数据解析的通用原理121、标签定位2、将标签中间存储的文本数据或者其属性值进行捕获  二、正则解析123456789101112131415161718192021222324252627282930import osi">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/dxl/',
    scheme: 'Gemini',
    version: '5.1.4',
    sidebar: {"position":"left","display":"post","offset":12,"b2t":true,"scrollpercent":true,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="http://yoursite.com/2019/09/20/【爬虫03】03数据解析/">





  <title>爬虫数据解析 | 我的快乐时光</title>
  








</head>

<body itemscope itemtype="http://schema.org/WebPage" lang="zh-Hans">

  
  
    
  

  <div class="container sidebar-position-left page-post-detail">
    <div class="headband"></div>

    <header id="header" class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/dxl/" class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">我的快乐时光</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle"></p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="/dxl/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br>
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-categories">
          <a href="/dxl/categories/" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-th"></i> <br>
            
            分类
          </a>
        </li>
      

      
    </ul>
  

  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  <article class="post post-type-normal" itemscope itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="http://yoursite.com/dxl/2019/09/20/【爬虫03】03数据解析/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="name" content>
      <meta itemprop="description" content>
      <meta itemprop="image" content="/dxl/images/avatar.png">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="我的快乐时光">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">爬虫数据解析</h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2019-09-20T20:44:46+08:00">
                2019-09-20
              </time>
            

            

            
          </span>

          
            <span class="post-category">
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/dxl/categories/爬虫/" itemprop="url" rel="index">
                    <span itemprop="name">爬虫</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
              <span class="post-comments-count">
                <span class="post-meta-divider">|</span>
                <span class="post-meta-item-icon">
                  <i class="fa fa-comment-o"></i>
                </span>
                <a href="/dxl/2019/09/20/【爬虫03】03数据解析/#comments" itemprop="discussionUrl">
                  <span class="post-comments-count valine-comment-count" data-xid="/dxl/2019/09/20/【爬虫03】03数据解析/" itemprop="commentCount"></span>
                </a>
              </span>
            
          

          
          

          
            <span class="post-meta-divider">|</span>
            <span class="page-pv">本文总阅读量
            <span class="busuanzi-value" id="busuanzi_value_page_pv"></span>次
            </span>
          

          
            <div class="post-wordcount">
              
                
                <span class="post-meta-item-icon">
                  <i class="fa fa-file-word-o"></i>
                </span>
                
                  <span class="post-meta-item-text">字数统计&#58;</span>
                
                <span title="字数统计">
                  
                </span>
              

              
                <span class="post-meta-divider">|</span>
              

              
                <span class="post-meta-item-icon">
                  <i class="fa fa-clock-o"></i>
                </span>
                
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                
                <span title="阅读时长">
                  
                </span>
              
            </div>
          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <h2 id="一、数据分析"><a href="#一、数据分析" class="headerlink" title="一、数据分析"></a>一、数据分析</h2><h3 id="1-1数据解析的作用"><a href="#1-1数据解析的作用" class="headerlink" title="1.1数据解析的作用"></a>1.1数据解析的作用</h3><p>​    用于获取页面中局部的页面源码数据</p>
<h3 id="1-2实现数据解析的方法"><a href="#1-2实现数据解析的方法" class="headerlink" title="1.2实现数据解析的方法"></a>1.2实现数据解析的方法</h3><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">正则</span><br><span class="line">bs4（独有）</span><br><span class="line">xpath（最为通用）</span><br><span class="line">pyquery（自学）</span><br></pre></td></tr></table></figure>

<h4 id="1-3数据解析的通用原理"><a href="#1-3数据解析的通用原理" class="headerlink" title="1.3数据解析的通用原理"></a>1.3数据解析的通用原理</h4><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">1、标签定位</span><br><span class="line">2、将标签中间存储的文本数据或者其属性值进行捕获</span><br></pre></td></tr></table></figure>

<h2 id="二、正则解析"><a href="#二、正则解析" class="headerlink" title="二、正则解析"></a>二、正则解析</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> os</span><br><span class="line"><span class="keyword">import</span> re</span><br><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"></span><br><span class="line">url =<span class="string">"https://www.qiushibaike.com/imgrank/page/"</span></span><br><span class="line">headers =&#123;</span><br><span class="line">    <span class="string">"User-Agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36"</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="comment">#创建文件夹</span></span><br><span class="line">base_name = <span class="string">"./image"</span></span><br><span class="line"><span class="keyword">if</span> <span class="keyword">not</span> os.path.exists(base_name):</span><br><span class="line">    os.mkdir(base_name)</span><br><span class="line"><span class="comment">#下载指定页码中的图片数据</span></span><br><span class="line"><span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>,<span class="number">3</span>):</span><br><span class="line">    url_page = url+<span class="string">"i"</span></span><br><span class="line">    res =requests.get(url=url,headers=headers).text</span><br><span class="line">    <span class="comment">#解析response中的图片链接</span></span><br><span class="line">    ex = <span class="string">'&lt;div class="thumb"&gt;.*?&lt;img src="(.*?)" alt=".*?&lt;/div&gt;'</span></span><br><span class="line">    pic_list  = re.findall(ex,res,re.S) <span class="comment">#r.S不匹配换行</span></span><br><span class="line">    <span class="comment"># 循环下载该页码下所有的图片数据</span></span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> pic_list:</span><br><span class="line">        pic_url = <span class="string">'https:'</span>+ i</span><br><span class="line">        res_pic = requests.get(url=pic_url,headers=headers)</span><br><span class="line">        pic_content = res_pic.content</span><br><span class="line">        pic_name = i.split(<span class="string">'/'</span>)[<span class="number">-1</span>]</span><br><span class="line">        pic_path=os.path.join(base_name,pic_name)</span><br><span class="line">        <span class="keyword">with</span> open(pic_path,<span class="string">'wb'</span>) <span class="keyword">as</span> f:</span><br><span class="line">            f.write(pic_content)</span><br><span class="line">        print(pic_name)</span><br></pre></td></tr></table></figure>

<h2 id="三、bs4解析"><a href="#三、bs4解析" class="headerlink" title="三、bs4解析"></a>三、bs4解析</h2><h3 id="3-1环境安装"><a href="#3-1环境安装" class="headerlink" title="3.1环境安装"></a>3.1环境安装</h3><p>1、模块的安装</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">1、pip3 install bs4</span><br><span class="line">2、pip3 install lxml</span><br></pre></td></tr></table></figure>

<p>2、模块的导入</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">from bs4 import BeautifulSoup</span><br></pre></td></tr></table></figure>

<p>3、使用</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="number">1</span>、本地文件</span><br><span class="line">fp = open(<span class="string">"text.html"</span>,<span class="string">'r'</span>,encoding=<span class="string">'utf-8'</span>)</span><br><span class="line">soup = BeautifulSoup(fp,<span class="string">'lxml'</span>)</span><br><span class="line"><span class="number">2</span>、网络文件</span><br><span class="line">page_text = requests.get(url= url,headers =headers).text</span><br><span class="line">soup = BeautifulSoup(page_text,<span class="string">'lxml'</span></span><br></pre></td></tr></table></figure>

<p>4、bs4解析原理</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">实例化一个BeautifulSoup的对象，且将即将被解析的页面源码加载到该对象中</span><br><span class="line">使用该对象中的属性或者方法进行标签定位和数据提取</span><br></pre></td></tr></table></figure>

<p>5、BeautifulSoup对象的实例化方式</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">BeautifulSoup(fp,&apos;lxml&apos;):将本地存储的html文档加载到该对象中</span><br><span class="line">BeautifulSoup(page_text,&apos;lxml&apos;):将互联网上获取的html源码加载到该对象中</span><br></pre></td></tr></table></figure>

<h3 id="3-2模块方法的使用"><a href="#3-2模块方法的使用" class="headerlink" title="3.2模块方法的使用"></a>3.2模块方法的使用</h3><p>1、标签定位</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">s = soup.a # a标签的第一个</span><br></pre></td></tr></table></figure>

<p>2、获取属性</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">s1 =soup.a.attrs</span><br><span class="line">#以下两种方法等效</span><br><span class="line">s2 =soup.a.attrs[&apos;href&apos;]</span><br><span class="line">s3 =soup.a[&apos;href&apos;]</span><br></pre></td></tr></table></figure>

<p>3、获取内容</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">print(soup.p.string) # 百里守约</span><br><span class="line">print(soup.a.string)</span><br><span class="line">print(soup.a.text)</span><br><span class="line">print(soup.a.get_text)</span><br></pre></td></tr></table></figure>

<p>4、find 找到第一个符合要求的</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">print(soup.find(&quot;a&quot;))</span><br><span class="line">print(soup.find(&quot;a&quot;,title =&quot;qin&quot;))</span><br></pre></td></tr></table></figure>

<p>5、find_all #找到所有符合要求的标签</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">print(soup.find_all([&apos;a&apos;,&apos;p&apos;])) #所有a标签和p标签</span><br><span class="line">print(soup.find_all(&quot;a&quot;,limit=2)) #限制2个</span><br></pre></td></tr></table></figure>

<p>6、select常见的选择器：标签选择器(a)、类选择器(.)、id选择器(#)、层级选择器</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">print(soup.select(<span class="string">'#mi'</span>)) <span class="comment">#[&lt;i id="mi"&gt;度蜜月&lt;/i&gt;]</span></span><br><span class="line">print(soup.select(<span class="string">'.du'</span>)) <span class="comment">#所有类含有du的标签</span></span><br><span class="line">print(soup.select(<span class="string">'p'</span>)) <span class="comment">#所有p 标签</span></span><br><span class="line">print(soup.select(<span class="string">'.tang &gt;ul &gt;li&gt;a'</span>)[<span class="number">0</span>]) <span class="comment">#所有p 标签</span></span><br></pre></td></tr></table></figure>

<p>7、总结</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">1、soup.tagName:返回的就是页面中第一次出现的tagName标签（返回的是一个单数）</span><br><span class="line">2、soup.find(&apos;tagName&apos;,attrName=&apos;value&apos;)返回的是单数</span><br><span class="line">3、soup.find_all(&apos;tagName&apos;):定位所有的tagName的标签</span><br><span class="line">	- soup.find_all(&apos;tagName&apos;,attrName=&apos;value&apos;)：属性定位</span><br><span class="line">    - 注意：返回值是列表</span><br><span class="line">4、string获取的是标签中直系的文本内容，text获取的是标签中所有的文本内容</span><br><span class="line">5、&gt;表示下一级标题 “ ” 表示多级</span><br></pre></td></tr></table></figure>

<p>3.3案例</p>
<p>爬取三国演义小说所有内容</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"><span class="keyword">from</span> bs4 <span class="keyword">import</span> BeautifulSoup</span><br><span class="line">headers =&#123;</span><br><span class="line">    <span class="string">"User-Agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36"</span></span><br><span class="line">&#125;</span><br><span class="line">url = <span class="string">"http://www.shicimingju.com/book/sanguoyanyi.html"</span></span><br><span class="line">page_text = requests.get(url= url,headers =headers).text</span><br><span class="line">soup = BeautifulSoup(page_text,<span class="string">'lxml'</span>)</span><br><span class="line"></span><br><span class="line">ex = <span class="string">".book-mulu a"</span></span><br><span class="line">page_list = soup.select(ex)</span><br><span class="line"><span class="comment"># print(page_list)</span></span><br><span class="line"><span class="keyword">with</span> open(<span class="string">"三国"</span>,<span class="string">'w'</span>,encoding=<span class="string">'utf-8'</span>) <span class="keyword">as</span> f:</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> page_list:</span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        http://www.shicimingju.com/book/sanguoyanyi/3.html</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        detail_url = <span class="string">"http://www.shicimingju.com"</span>+i[<span class="string">"href"</span>]</span><br><span class="line">        content = requests.get(url=detail_url,headers=headers).text</span><br><span class="line">        detail_soup = BeautifulSoup(content,<span class="string">'lxml'</span>)</span><br><span class="line">        <span class="comment">#不建议使用select 因为是列表没有办法直接使用text 取值						</span></span><br><span class="line">        detail_content = detail_soup.find(<span class="string">"div"</span>,class_ = <span class="string">"chapter_content"</span>).text</span><br><span class="line">        <span class="comment"># print(detail_content)</span></span><br><span class="line">        book_content = i.text+<span class="string">"\n"</span>+detail_content</span><br><span class="line">        f.write(book_content)</span><br></pre></td></tr></table></figure>

<h2 id="四、-Xpath解析"><a href="#四、-Xpath解析" class="headerlink" title="四、 Xpath解析"></a>四、 Xpath解析</h2><h3 id="4-1介绍"><a href="#4-1介绍" class="headerlink" title="4.1介绍"></a>4.1介绍</h3><p>1、环境安装</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">pip3 install lxml</span><br><span class="line">from lxml import etree #旧版</span><br><span class="line"># from lxml.html.clean import tree #新版</span><br></pre></td></tr></table></figure>

<p>2、解析原理</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">- 实例化一个etree类型的对象，且将即将被解析的页面源码数据加载到该对象中</span><br><span class="line">- 调用该对象中的xpath方法结合着不同的xpath表达式进行标签定位和数据提取</span><br></pre></td></tr></table></figure>

<p>3、实例化对象</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">- etree.parse(fileName)</span><br><span class="line">- etree.HTML(page_text)</span><br></pre></td></tr></table></figure>

<h3 id="4-2模块的使用"><a href="#4-2模块的使用" class="headerlink" title="4.2模块的使用"></a>4.2模块的使用</h3><p>1、基于标签定位的xpath表达式</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">print(tree.xpath(&apos;/html/head/meta&apos;))</span><br><span class="line">print(tree.xpath(&apos;//meta&apos;))</span><br></pre></td></tr></table></figure>

<p>2、索引定位</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">print(tree.xpath(&apos;//div[@class=&quot;tang&quot;]/ul/li[1]&apos;))</span><br></pre></td></tr></table></figure>

<p>3、取文本</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">print(tree.xpath(&apos;//div[@class=&quot;tang&quot;]/ul/li[5]//text()&apos;))</span><br><span class="line">print(tree.xpath(&apos;//div[@class=&quot;tang&quot;]/ul/li[5]/a/text()&apos;))</span><br></pre></td></tr></table></figure>

<p>4、取属性</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">print(tree.xpath(&apos;//div[@class=&quot;tang&quot;]/ul/li[5]/a/@href&apos;))</span><br></pre></td></tr></table></figure>

<p>5、逻辑运算</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">#找到href属性值为空且class属性值为du的a标签</span><br><span class="line">print(tree.xpath(&apos;//a[@class=&quot;du&quot; and @href=&quot; &quot;]&apos;))</span><br></pre></td></tr></table></figure>

<p>6、模糊匹配</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">print(tree.xpath(&apos;//div[contains(@class,&quot;ng&quot; )]&apos;))</span><br><span class="line">print(tree.xpath(&apos;//div[starts-with(@class, &quot;ta&quot;)]&apos;))</span><br></pre></td></tr></table></figure>

<p>7、总结</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">1、- 取文本</span><br><span class="line">    - /text()：获取的是标签下直系的文本数据</span><br><span class="line">    - //text()：获取的是标签下所有的文本数据</span><br><span class="line">2、索引定位</span><br><span class="line">	索引值是从1开始:xpath里面</span><br><span class="line">	索引值是从0开始:tree.xpath(&apos;//div[@class=&quot;tang&quot;]/ul/li[5]/a/&apos;)[0]</span><br><span class="line">	</span><br><span class="line">3、在xpath表达式中非最左侧的/和//的区别？</span><br><span class="line">/表示一个层级</span><br><span class="line">//表示多个层级</span><br></pre></td></tr></table></figure>

<h3 id="4-4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）"><a href="#4-4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）" class="headerlink" title="4.4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）"></a>4.4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span>  etree</span><br><span class="line">headers =&#123;</span><br><span class="line">    <span class="string">"User-Agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36"</span></span><br><span class="line">&#125;</span><br><span class="line">url = <span class="string">'https://www.zhipin.com/c101010100/?query=python爬虫&amp;page=%d'</span></span><br><span class="line"><span class="keyword">with</span> open(<span class="string">"boss"</span>,<span class="string">"w"</span>,encoding=<span class="string">"utf-8"</span>) <span class="keyword">as</span> f:</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>,<span class="number">3</span>):</span><br><span class="line">        new_url = url + str(i)</span><br><span class="line">        page = requests.get(url=new_url,headers=headers).text</span><br><span class="line">        tree = etree.HTML(page)</span><br><span class="line">        ex =<span class="string">'//*[@id="main"]/div/div[3]/ul/li'</span></span><br><span class="line">        detail_list =tree.xpath(ex)</span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> detail_list:</span><br><span class="line">            detail_salery = i.xpath(<span class="string">'./div/div/h3/a/span/text()'</span>)</span><br><span class="line">            detail_name = i.xpath(<span class="string">'./div/div[1]/h3/a/div[1]/text()'</span>)</span><br><span class="line">            detail_url = i.xpath(<span class="string">'./div/div/h3/a/@href'</span>)</span><br><span class="line">            print(detail_url)</span><br><span class="line"></span><br><span class="line">            detail_company = i.xpath(<span class="string">'./div/div[2]/div/h3/a/text()'</span>)</span><br><span class="line">            <span class="string">"""</span></span><br><span class="line"><span class="string">            https://www.zhipin.com/job_detail/3b00c2ab3d21fe621HFz0926F1U~.html?ka=search_list_1</span></span><br><span class="line"><span class="string">            """</span></span><br><span class="line">            detail_new_url = <span class="string">"https://www.zhipin.com"</span>+ detail_url[<span class="number">0</span>]</span><br><span class="line">            detail_page = requests.get(url=detail_new_url, headers=headers).text</span><br><span class="line"></span><br><span class="line">            detail_tree = etree.HTML(detail_page)</span><br><span class="line">            <span class="string">"""</span></span><br><span class="line"><span class="string">            //*[@id="main"]/div[3]/div/div[2]/div[2]/div[1]/div</span></span><br><span class="line"><span class="string">            //*[@id="main"]/div[3]/div/div[2]/div[2]/div[1]/div</span></span><br><span class="line"><span class="string">            //*[@id="main"]/div[3]/div/div[2]/div[2]/div[1]/div</span></span><br><span class="line"><span class="string">            """</span></span><br><span class="line"></span><br><span class="line">            print(detail_tree)</span><br><span class="line">            decrcible_company = detail_tree.xpath(<span class="string">'//*[@id="main"]/div[3]/div/div[2]/div[2]/div[1]/div/text()'</span>)</span><br><span class="line">            info = detail_name[<span class="number">0</span>]+<span class="string">"\n"</span> +detail_salery[<span class="number">0</span>] +<span class="string">"\n"</span> +detail_company[<span class="number">0</span>]+<span class="string">"\n"</span> +<span class="string">''</span>.join([i.strip() <span class="keyword">for</span> i <span class="keyword">in</span> decrcible_company])+<span class="string">"\n"</span></span><br><span class="line">            f.write(info)</span><br></pre></td></tr></table></figure>

<h3 id="4-5爬取城市信息"><a href="#4-5爬取城市信息" class="headerlink" title="4.5爬取城市信息"></a>4.5爬取城市信息</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"><span class="keyword">import</span> requests</span><br><span class="line">url = <span class="string">'https://www.aqistudy.cn/historydata/'</span></span><br><span class="line">headers =&#123;</span><br><span class="line">    <span class="string">"User-Agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36"</span></span><br><span class="line">&#125;</span><br><span class="line">page_text = requests.get(url=url,headers = headers).text</span><br><span class="line">tree = etree.HTML(page_text)</span><br><span class="line">city = tree.xpath(<span class="string">"/html/body/div[3]/div/div[1]/div[2]/div[2]/ul/div[2]/li/a/text()|/html/body/div[3]/div/div[1]/div[1]/div[2]/ul/li//text()"</span>)</span><br><span class="line">print(city)</span><br></pre></td></tr></table></figure>

<h3 id="4-6总结"><a href="#4-6总结" class="headerlink" title="4.6总结"></a>4.6总结</h3><h2 id="五、selenium"><a href="#五、selenium" class="headerlink" title="五、selenium"></a>五、selenium</h2><h3 id="5-1介绍"><a href="#5-1介绍" class="headerlink" title="5.1介绍"></a>5.1介绍</h3><p>1、简介</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决requests无法直接执行JavaScript代码的问题 selenium本质是通过驱动浏览器，完全模拟浏览器的操作，比如跳转、输入、点击、下拉等，来拿到网页渲染之后的结果，可支持多种浏览器</span><br></pre></td></tr></table></figure>

<p>2、环境安装</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">下载安装selenium：pip install selenium</span><br><span class="line">下载浏览器驱动程序：</span><br><span class="line">http://chromedriver.storage.googleapis.com/index.html</span><br><span class="line">查看驱动和浏览器版本的映射关系：</span><br><span class="line">http://blog.csdn.net/huilan_same/article/details/5189667</span><br><span class="line">高板版的下载的时候有对应关系，64位操作可以下载32位的。谷歌浏览器最好安装的c盘，不然可能会报错</span><br></pre></td></tr></table></figure>

<h3 id="5-2简单使用"><a href="#5-2简单使用" class="headerlink" title="5.2简单使用"></a>5.2简单使用</h3><p>1、浏览器的创建</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">	Selenium支持非常多的浏览器，如Chrome、Firefox、Edge等，还有Android、BlackBerry等手机端的浏览器。另外，也支持无界面浏览器PhantomJS。</span><br><span class="line"></span><br><span class="line">from selenium import webdriver </span><br><span class="line">browser = webdriver.Chrome()</span><br><span class="line">browser = webdriver.Firefox()</span><br><span class="line">browser = webdriver.Edge()</span><br><span class="line">browser = webdriver.PhantomJS()</span><br><span class="line">browser = webdriver.Safari()</span><br></pre></td></tr></table></figure>

<p>2、元素定位</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line">webdriver 提供了一系列的元素定位方法，常用的有以下几种：</span><br><span class="line">find_element_by_id()</span><br><span class="line">find_element_by_name()</span><br><span class="line">find_element_by_class_name()</span><br><span class="line">find_element_by_tag_name()</span><br><span class="line">find_element_by_link_text()</span><br><span class="line">find_element_by_partial_link_text()</span><br><span class="line">find_element_by_xpath()</span><br><span class="line">find_element_by_css_selector()</span><br><span class="line">注意：</span><br><span class="line"><span class="number">1</span>、find_element_by_xxx找的是第一个符合条件的标签，find_elements_by_xxx找的是所有符合条件的标签。</span><br><span class="line"><span class="number">2</span>、根据ID、CSS选择器和XPath获取，它们返回的结果完全一致。</span><br><span class="line"><span class="number">3</span>、另外，Selenium还提供了通用方法find_element()，它需要传入两个参数：查找方式By和值。实际上，它就是find_element_by_id()这种方法的通用函数版本，比如find_element_by_id(id)就等价find_element(By.ID, id)，二者得到的结果完全一致。</span><br></pre></td></tr></table></figure>

<p>3、节点交互</p>
<p>​        Selenium可以驱动浏览器来执行一些操作，也就是说可以让浏览器模拟执行一些动作。比较常见的用法有：输入文字时用<code>send_keys()</code>方法，清空文字时用<code>clear()</code>方法，点击按钮时用<code>click()</code>方法。示例如下：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"> </span><br><span class="line">browser = webdriver.Chrome()</span><br><span class="line">browser.get(<span class="string">'https://www.taobao.com'</span>)</span><br><span class="line">input = browser.find_element_by_id(<span class="string">'q'</span>)</span><br><span class="line">input.send_keys(<span class="string">'MAC'</span>)</span><br><span class="line">time.sleep(<span class="number">1</span>)</span><br><span class="line">input.clear()</span><br><span class="line">input.send_keys(<span class="string">'IPhone'</span>)</span><br><span class="line">button = browser.find_element_by_class_name(<span class="string">'btn-search'</span>)</span><br><span class="line">button.click()</span><br><span class="line">browser.quit()</span><br></pre></td></tr></table></figure>

<p>4、动作链</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">在上面的实例中，一些交互动作都是针对某个节点执行的。比如，对于输入框，我们就调用它的输入文字和清空文字方法；对于按钮，就调用它的点击方法。其实，还有另外一些操作，它们没有特定的执行对象，比如鼠标拖曳、键盘按键等，这些动作用另一种方式来执行，那就是动作链。</span><br><span class="line">比如，现在实现一个节点的拖曳操作，将某个节点从一处拖曳到另外一处，可以这样实现</span><br></pre></td></tr></table></figure>

<p>​        将某个节点从一处拖曳到另外一处</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">from</span> selenium.webdriver <span class="keyword">import</span> ActionChains</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line">browser = webdriver.Chrome()</span><br><span class="line">url = <span class="string">'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'</span></span><br><span class="line">browser.get(url)</span><br><span class="line">browser.switch_to.frame(<span class="string">'iframeResult'</span>)</span><br><span class="line">source = browser.find_element_by_css_selector(<span class="string">'#draggable'</span>)</span><br><span class="line">target = browser.find_element_by_css_selector(<span class="string">'#droppable'</span>)</span><br><span class="line">actions = ActionChains(browser)</span><br><span class="line"><span class="comment"># actions.drag_and_drop(source, target)</span></span><br><span class="line"><span class="comment"># actions.perform() #执行动作链</span></span><br><span class="line">actions.click_and_hold(source)</span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"><span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">5</span>):</span><br><span class="line">    actions.move_by_offset(xoffset=<span class="number">17</span>,yoffset=<span class="number">0</span>).perform()</span><br><span class="line">    time.sleep(<span class="number">0.5</span>)</span><br><span class="line"></span><br><span class="line">actions.release()</span><br></pre></td></tr></table></figure>

<p>5、执行JavaScript</p>
<p>​        对于某些操作，Selenium API并没有提供。比如，下拉进度条，它可以直接模拟运行JavaScript，此时使用<code>execute_script()</code>方法即可实现，代码如下</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"></span><br><span class="line">browser = webdriver.Chrome()</span><br><span class="line">browser.get(<span class="string">'https://www.jd.com/'</span>)</span><br><span class="line">browser.execute_script(<span class="string">'window.scrollTo(0, document.body.scrollHeight)'</span>)</span><br><span class="line">browser.execute_script(<span class="string">'alert("123")'</span>)</span><br></pre></td></tr></table></figure>

<p>6、获取页面源码数据</p>
<p>​        通过<code>page_source</code>属性可以获取网页的源代码，接着就可以使用解析库（如正则表达式、Beautiful Soup、pyquery等）来提取信息了。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">#模拟浏览器的前进后退</span></span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"> </span><br><span class="line">browser=webdriver.Chrome()</span><br><span class="line">browser.get(<span class="string">'https://www.baidu.com'</span>)</span><br><span class="line">browser.get(<span class="string">'https://www.taobao.com'</span>)</span><br><span class="line">browser.get(<span class="string">'http://www.sina.com.cn/'</span>)</span><br><span class="line"> </span><br><span class="line">browser.back()</span><br><span class="line">time.sleep(<span class="number">10</span>)</span><br><span class="line">browser.forward()</span><br><span class="line">browser.close()</span><br></pre></td></tr></table></figure>

<p>7、selenium操作cookie</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">使用Selenium，还可以方便地对Cookies进行操作，例如获取、添加、删除Cookies等。示例如下：</span><br><span class="line"></span><br><span class="line">from selenium import webdriver </span><br><span class="line">browser = webdriver.Chrome()</span><br><span class="line">browser.get(&apos;https://www.zhihu.com/explore&apos;)</span><br><span class="line">print(browser.get_cookies())</span><br><span class="line">browser.add_cookie(&#123;&apos;name&apos;: &apos;name&apos;, &apos;domain&apos;: &apos;www.zhihu.com&apos;, &apos;value&apos;: &apos;germey&apos;&#125;)</span><br><span class="line">print(browser.get_cookies())</span><br><span class="line">browser.delete_all_cookies()</span><br><span class="line">print(browser.get_cookies())</span><br></pre></td></tr></table></figure>

<p>8、异常处理</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">from</span> selenium.common.exceptions <span class="keyword">import</span> TimeoutException,NoSuchElementException,NoSuchFrameException</span><br><span class="line"></span><br><span class="line"><span class="keyword">try</span>:</span><br><span class="line">    browser=webdriver.Chrome()</span><br><span class="line">    browser.get(<span class="string">'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'</span>)</span><br><span class="line">    browser.switch_to.frame(<span class="string">'iframssseResult'</span>)</span><br><span class="line"></span><br><span class="line"><span class="keyword">except</span> TimeoutException <span class="keyword">as</span> e:</span><br><span class="line">    print(e)</span><br><span class="line"><span class="keyword">except</span> NoSuchFrameException <span class="keyword">as</span> e:</span><br><span class="line">    print(e)</span><br><span class="line"><span class="keyword">finally</span>:</span><br><span class="line">    browser.close()</span><br></pre></td></tr></table></figure>

<h3 id="5-3plantomJS"><a href="#5-3plantomJS" class="headerlink" title="5.3plantomJS"></a>5.3plantomJS</h3><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">PhantomJS是一款无界面的浏览器，其自动化操作流程和上述操作谷歌浏览器是一致的。由于是无界面的，为了能够展示自动化操作流程，PhantomJS为用户提供了一个截屏的功能，使用save_screenshot函数实现。</span><br></pre></td></tr></table></figure>

<p>实例：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line"><span class="comment"># phantomjs路径</span></span><br><span class="line">path = <span class="string">r'PhantomJS驱动路径'</span></span><br><span class="line">browser = webdriver.PhantomJS(path)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打开百度</span></span><br><span class="line">url = <span class="string">'http://www.baidu.com/'</span></span><br><span class="line">browser.get(url)</span><br><span class="line"></span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"></span><br><span class="line">browser.save_screenshot(<span class="string">r'phantomjs\baidu.png'</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 查找input输入框</span></span><br><span class="line">my_input = browser.find_element_by_id(<span class="string">'kw'</span>)</span><br><span class="line"><span class="comment"># 往框里面写文字</span></span><br><span class="line">my_input.send_keys(<span class="string">'美女'</span>)</span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"><span class="comment">#截屏</span></span><br><span class="line">browser.save_screenshot(<span class="string">r'phantomjs\meinv.png'</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 查找搜索按钮</span></span><br><span class="line">button = browser.find_elements_by_class_name(<span class="string">'s_btn'</span>)[<span class="number">0</span>]</span><br><span class="line">button.click()</span><br><span class="line"></span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"></span><br><span class="line">browser.save_screenshot(<span class="string">r'phantomjs\show.png'</span>)</span><br><span class="line"></span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"></span><br><span class="line">browser.quit()</span><br></pre></td></tr></table></figure>

<h3 id="5-4无头浏览器"><a href="#5-4无头浏览器" class="headerlink" title="5.4无头浏览器"></a>5.4无头浏览器</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">from</span> selenium.webdriver.chrome.options <span class="keyword">import</span> Options</span><br><span class="line"><span class="keyword">from</span> time <span class="keyword">import</span> sleep</span><br><span class="line">chrome_options = Options()</span><br><span class="line">chrome_options.add_argument(<span class="string">'--headless'</span>)</span><br><span class="line">chrome_options.add_argument(<span class="string">'--disable-gpu'</span>)</span><br><span class="line"></span><br><span class="line">bro = webdriver.Chrome(executable_path=<span class="string">'chromedriver.exe'</span>, chrome_options=chrome_options)</span><br><span class="line"></span><br><span class="line">bro.get(<span class="string">'https://www.baidu.com'</span>)</span><br><span class="line">sleep(<span class="number">3</span>)</span><br><span class="line">print(bro.page_source)</span><br><span class="line">bro.save_screenshot(<span class="string">'1.png'</span>)</span><br><span class="line"></span><br><span class="line">bro.quit()</span><br></pre></td></tr></table></figure>

<h3 id="5-5-selenium规避检测"><a href="#5-5-selenium规避检测" class="headerlink" title="5.5 selenium规避检测"></a>5.5 selenium规避检测</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">现在不少大网站有对selenium采取了监测机制。比如正常情况下我们用浏览器访问淘宝等网站的 window.navigator.webdriver的值为 </span><br><span class="line">undefined。而使用selenium访问则该值为true。那么如何解决这个问题呢？</span><br><span class="line">	只需要设置Chromedriver的启动参数即可解决问题。在启动Chromedriver之前，为Chrome开启实验性功能参数excludeSwitches，它的值为[<span class="string">'enable-automation'</span>]，完整代码如下：</span><br><span class="line">    </span><br><span class="line">在浏览器的console里面输入  window.navigator.webdriver</span><br></pre></td></tr></table></figure>

<p>示例：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium.webdriver <span class="keyword">import</span> Chrome</span><br><span class="line"><span class="keyword">from</span> selenium.webdriver <span class="keyword">import</span> ChromeOptions</span><br><span class="line"></span><br><span class="line">option = ChromeOptions()</span><br><span class="line">option.add_experimental_option(<span class="string">'excludeSwitches'</span>, [<span class="string">'enable-automation'</span>])</span><br><span class="line">driver = Chrome(options=option)</span><br></pre></td></tr></table></figure>

<h3 id="5-6度自动化设置效果演示"><a href="#5-6度自动化设置效果演示" class="headerlink" title="5.6度自动化设置效果演示"></a>5.6度自动化设置效果演示</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">from</span> time <span class="keyword">import</span> sleep</span><br><span class="line"></span><br><span class="line"><span class="comment"># 后面是你的浏览器驱动位置，记得前面加r'','r'是防止字符转义的</span></span><br><span class="line">driver = webdriver.Chrome(<span class="string">r'chromedriver.exe'</span>)</span><br><span class="line"><span class="comment"># 用get打开百度页面</span></span><br><span class="line">driver.get(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line"><span class="comment"># 查找页面的“设置”选项，并进行点击</span></span><br><span class="line">driver.find_elements_by_link_text(<span class="string">'设置'</span>)[<span class="number">0</span>].click()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"><span class="comment"># # 打开设置后找到“搜索设置”选项，设置为每页显示50条</span></span><br><span class="line">driver.find_elements_by_link_text(<span class="string">'搜索设置'</span>)[<span class="number">0</span>].click()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 选中每页显示50条</span></span><br><span class="line">m = driver.find_element_by_id(<span class="string">'nr'</span>)</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line">m.find_element_by_xpath(<span class="string">'//*[@id="nr"]/option[3]'</span>).click()</span><br><span class="line">m.find_element_by_xpath(<span class="string">'.//option[3]'</span>).click()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 点击保存设置</span></span><br><span class="line">driver.find_elements_by_class_name(<span class="string">"prefpanelgo"</span>)[<span class="number">0</span>].click()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 处理弹出的警告页面   确定accept() 和 取消dismiss()</span></span><br><span class="line">driver.switch_to_alert().accept()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"><span class="comment"># 找到百度的输入框，并输入 美女</span></span><br><span class="line">driver.find_element_by_id(<span class="string">'kw'</span>).send_keys(<span class="string">'美女'</span>)</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"><span class="comment"># 点击搜索按钮</span></span><br><span class="line">driver.find_element_by_id(<span class="string">'su'</span>).click()</span><br><span class="line">sleep(<span class="number">2</span>)</span><br><span class="line"><span class="comment"># 在打开的页面中找到“Selenium - 开源中国社区”，并打开这个页面</span></span><br><span class="line">driver.find_elements_by_link_text(<span class="string">'美女_百度图片'</span>)[<span class="number">0</span>].click()</span><br><span class="line">sleep(<span class="number">3</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 关闭浏览器</span></span><br><span class="line">driver.quit()</span><br></pre></td></tr></table></figure>

<h3 id="5-7模拟登陆qq空间"><a href="#5-7模拟登陆qq空间" class="headerlink" title="5.7模拟登陆qq空间"></a>5.7模拟登陆qq空间</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> time</span><br><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span>  webdriver</span><br><span class="line">bro = webdriver.Chrome(executable_path=<span class="string">'chromedriver.exe'</span>)</span><br><span class="line"></span><br><span class="line">url = <span class="string">'https://qzone.qq.com'</span></span><br><span class="line">bro.get(url)</span><br><span class="line"></span><br><span class="line">bro.switch_to.frame(<span class="string">'login_frame'</span>)</span><br><span class="line">a_tag = bro.find_element_by_id(<span class="string">'switcher_plogin'</span>)</span><br><span class="line">a_tag.click()</span><br><span class="line"></span><br><span class="line">bro.find_element_by_id(<span class="string">'u'</span>).send_keys(<span class="string">'1298143029'</span>)</span><br><span class="line">bro.find_element_by_id(<span class="string">'p'</span>).send_keys(<span class="string">'129814xi'</span>)</span><br><span class="line">bro.find_element_by_id(<span class="string">'login_button'</span>).click()</span><br><span class="line">time.sleep(<span class="number">2</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment">#登陆成功后对应的主页</span></span><br><span class="line">page_text = bro.page_source</span><br><span class="line">bro.close()</span><br></pre></td></tr></table></figure>

<h2 id="六、Pyppeteer"><a href="#六、Pyppeteer" class="headerlink" title="六、Pyppeteer"></a>六、Pyppeteer</h2><h2 id="6-1简介"><a href="#6-1简介" class="headerlink" title="6.1简介"></a>6.1简介</h2><p>1、什么Pyppeteer</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">Pyppeteer 实际上是 Puppeteer 的 Python 版本的实现，但他不是 Google 开发的，是一位来自于日本的工程师依据 Puppeteer 的一些功能开发出来的非官方版本。</span><br></pre></td></tr></table></figure>

<p>2、为什么要使用Pyppeteer</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">Selenium 在被使用的时候有个麻烦事，就是环境的相关配置，得安装好相关浏览器，比如 Chrome、Firefox 等等，然后还要到官方网站去下载对应的驱动，最重要的还需要安装对应的 Python Selenium 库，确实是不是很方便，另外如果要做大规模部署的话，环境配置的一些问题也是个头疼的事情</span><br></pre></td></tr></table></figure>

<p>3、特点</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">Pyppeteer 就是依赖于 Chromium 这个浏览器来运行的</span><br><span class="line">Pyppeteer 是基于 Python 的新特性 async 实现的，所以它的一些执行也支持异步操作，效率相对于 Selenium 来说也提高了</span><br></pre></td></tr></table></figure>

<p>4、安装</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">pip install pyppeteer</span><br></pre></td></tr></table></figure>

<h3 id="6-2具体使用"><a href="#6-2具体使用" class="headerlink" title="6.2具体使用"></a>6.2具体使用</h3><p>1、参数介绍</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">ignoreHTTPSErrors (bool): 是否要忽略 HTTPS 的错误，默认是 <span class="literal">False</span>。</span><br><span class="line">headless (bool): 是否启用 Headless 模式，即无界面模式，如果 devtools 这个参数是 <span class="literal">True</span> 的话，那么该参数就会被设置为 <span class="literal">False</span>，否则为 <span class="literal">True</span>，即默认是开启无界面模式的。</span><br><span class="line">executablePath (str): 可执行文件的路径，如果指定之后就不需要使用默认的 Chromium 了，可以指定为已有的 Chrome 或 Chromium。</span><br><span class="line">args (List[str]): 在执行过程中可以传入的额外参数。</span><br><span class="line">devtools (bool): 是否为每一个页面自动开启调试工具，默认是 <span class="literal">False</span>。如果这个参数设置为 <span class="literal">True</span>，那么 headless 参数就会无效，会被强制设置为 <span class="literal">False</span>。</span><br></pre></td></tr></table></figure>

<p>2、关闭显示条</p>
<p>​    ”Chrome 正受到自动测试软件的控制”，这个提示条有点烦，那咋关闭呢？这时候就需要用到 args 参数了，禁用操作如下：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">browser = await launch(headless=False, args=[&apos;--disable-infobars&apos;])</span><br></pre></td></tr></table></figure>

<p>3、访问淘宝首页</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"> </span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(headless=<span class="literal">False</span>)</span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'https://www.taobao.com'</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">10</span>)</span><br><span class="line"></span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br></pre></td></tr></table></figure>

<p>4、发现页面显示出现了问题，需要手动调用setViewport方法设置显示页面的长宽像素</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"> </span><br><span class="line">width, height = <span class="number">1366</span>,<span class="number">768</span></span><br><span class="line"> </span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(headless=<span class="literal">False</span>)</span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page.setViewport(&#123;<span class="string">'width'</span>: width, <span class="string">'height'</span>: height&#125;)</span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'https://www.taobao.com'</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    </span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br></pre></td></tr></table></figure>

<p>5、执行js程序：拖动滚轮。调用evaluate方法</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line">width, height = <span class="number">1366</span>, <span class="number">768</span></span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(headless=<span class="literal">False</span>)</span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page.setViewport(&#123;<span class="string">'width'</span>: width, <span class="string">'height'</span>: height&#125;)</span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'https://movie.douban.com/typerank?type_name=%E5%8A%A8%E4%BD%9C&amp;type=5&amp;interval_id=100:90&amp;action='</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    <span class="comment">#evaluate可以返回js程序的返回值</span></span><br><span class="line">    dimensions = <span class="keyword">await</span> page.evaluate(<span class="string">'window.scrollTo(0,document.body.scrollHeight)'</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    print(dimensions)</span><br><span class="line">    <span class="keyword">await</span> browser.close()</span><br><span class="line"> </span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br></pre></td></tr></table></figure>

<p>6、规避webdriver检测</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(headless=<span class="literal">False</span>, args=[<span class="string">'--disable-infobars'</span>])</span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'https://login.taobao.com/member/login.jhtml?redirectURL=https://www.taobao.com/'</span>)</span><br><span class="line">    <span class="keyword">await</span> page.evaluate(</span><br><span class="line">    <span class="string">'''() =&gt;&#123; Object.defineProperties(navigator,&#123; webdriver:&#123; get: () =&gt; false &#125; &#125;) &#125;'''</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">10</span>)</span><br><span class="line"> </span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br></pre></td></tr></table></figure>

<p>7、UA伪装</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">await</span> self.page.setUserAgent(<span class="string">'xxx'</span>)</span><br></pre></td></tr></table></figure>

<p>8、节点交互</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># headless参数设为False，则变成有头模式</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(</span><br><span class="line">    headless=<span class="literal">False</span></span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="comment"># 设置页面视图大小</span></span><br><span class="line">    <span class="keyword">await</span> page.setViewport(viewport=&#123;<span class="string">'width'</span>: <span class="number">1280</span>, <span class="string">'height'</span>: <span class="number">800</span>&#125;)</span><br><span class="line"></span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'https://www.baidu.com/'</span>)</span><br><span class="line">    <span class="comment">#节点交互</span></span><br><span class="line">    <span class="keyword">await</span> page.type(<span class="string">'#kw'</span>,<span class="string">'周杰伦'</span>,&#123;<span class="string">'delay'</span>: <span class="number">1000</span>&#125;)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    <span class="keyword">await</span> page.click(<span class="string">'#su'</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    <span class="comment">#使用选择器选中标签进行点击</span></span><br><span class="line">    alist = <span class="keyword">await</span> page.querySelectorAll(<span class="string">'.s_tab_inner &gt; a'</span>)</span><br><span class="line">    a = alist[<span class="number">3</span>]</span><br><span class="line">    <span class="keyword">await</span> a.click()</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">3</span>)</span><br><span class="line">    <span class="keyword">await</span> browser.close()</span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br></pre></td></tr></table></figure>

<h2 id="6-3小小爬"><a href="#6-3小小爬" class="headerlink" title="6.3小小爬"></a>6.3小小爬</h2><p>1、<a href="http://quotes.toscrape.com/js/" target="_blank" rel="noopener">http://quotes.toscrape.com/js/</a> 全部页面数据</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"> </span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch()</span><br><span class="line">    page = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page.goto(<span class="string">'http://quotes.toscrape.com/js/'</span>)</span><br><span class="line">    page_text = <span class="keyword">await</span> page.content()</span><br><span class="line">    tree = etree.HTML(page_text)</span><br><span class="line">    div_list = tree.xpath(<span class="string">'//div[@class="quote"]'</span>)</span><br><span class="line">    print(len(div_list))</span><br><span class="line">    <span class="keyword">await</span> browser.close()</span><br><span class="line"> </span><br><span class="line">asyncio.get_event_loop().run_until_complete(main())</span><br><span class="line"><span class="comment">#创建一个Browser对象</span></span><br><span class="line"><span class="comment">#然后调用Browser对象的newPage 方法相当于浏览器中新建了一个选项卡，同时创建了page对象</span></span><br><span class="line"><span class="comment">#Page 对象调用了 goto 方法就相当于在浏览器中输入了这个 URL</span></span><br><span class="line"><span class="comment">#浏览器跳转到了对应的页面进行加载，加载完成之后再调用 content 方法，返回当前浏览器页面的源代码</span></span><br></pre></td></tr></table></figure>

<p>2、爬取头条和网易的新闻标题</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> asyncio</span><br><span class="line"><span class="keyword">from</span> pyppeteer <span class="keyword">import</span> launch</span><br><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"><span class="keyword">async</span> <span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># headless参数设为False，则变成有头模式</span></span><br><span class="line">    browser = <span class="keyword">await</span> launch(</span><br><span class="line">    headless=<span class="literal">False</span></span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line">    page1 = <span class="keyword">await</span> browser.newPage()</span><br><span class="line"></span><br><span class="line">    <span class="comment"># 设置页面视图大小</span></span><br><span class="line">    <span class="keyword">await</span> page1.setViewport(viewport=&#123;<span class="string">'width'</span>: <span class="number">1280</span>, <span class="string">'height'</span>: <span class="number">800</span>&#125;)</span><br><span class="line"></span><br><span class="line">    <span class="keyword">await</span> page1.goto(<span class="string">'https://www.toutiao.com/'</span>)</span><br><span class="line">    <span class="keyword">await</span> asyncio.sleep(<span class="number">2</span>)</span><br><span class="line">    <span class="comment"># 打印页面文本</span></span><br><span class="line">    page_text = <span class="keyword">await</span> page1.content()</span><br><span class="line"></span><br><span class="line">    page2 = <span class="keyword">await</span> browser.newPage()</span><br><span class="line">    <span class="keyword">await</span> page2.setViewport(viewport=&#123;<span class="string">'width'</span>: <span class="number">1280</span>, <span class="string">'height'</span>: <span class="number">800</span>&#125;)</span><br><span class="line">    <span class="keyword">await</span> page2.goto(<span class="string">'https://news.163.com/domestic/'</span>)</span><br><span class="line">    <span class="keyword">await</span> page2.evaluate(<span class="string">'window.scrollTo(0,document.body.scrollHeight)'</span>)</span><br><span class="line">    page_text1 = <span class="keyword">await</span> page2.content()</span><br><span class="line"></span><br><span class="line">    <span class="keyword">await</span> browser.close()</span><br><span class="line"></span><br><span class="line">    <span class="keyword">return</span> &#123;<span class="string">'wangyi'</span>:page_text1,<span class="string">'toutiao'</span>:page_text&#125;</span><br><span class="line"> </span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(task)</span>:</span></span><br><span class="line">    content_dic = task.result()</span><br><span class="line">    wangyi = content_dic[<span class="string">'wangyi'</span>]</span><br><span class="line">    toutiao = content_dic[<span class="string">'toutiao'</span>]</span><br><span class="line">    tree = etree.HTML(toutiao)</span><br><span class="line">    a_list = tree.xpath(<span class="string">'//div[@class="title-box"]/a'</span>)</span><br><span class="line">    <span class="keyword">for</span> a <span class="keyword">in</span> a_list:</span><br><span class="line">    title = a.xpath(<span class="string">'./text()'</span>)[<span class="number">0</span>]</span><br><span class="line">    print(<span class="string">'toutiao:'</span>,title)</span><br><span class="line">    tree = etree.HTML(wangyi)</span><br><span class="line">    div_list = tree.xpath(<span class="string">'//div[@class="data_row news_article clearfix "]'</span>)</span><br><span class="line">    print(len(div_list))</span><br><span class="line">    <span class="keyword">for</span> div <span class="keyword">in</span> div_list:</span><br><span class="line">    title = div.xpath(<span class="string">'.//div[@class="news_title"]/h3/a/text()'</span>)[<span class="number">0</span>]</span><br><span class="line">    print(<span class="string">'wangyi:'</span>,title)</span><br><span class="line"> </span><br><span class="line">tasks = []</span><br><span class="line">task1 = asyncio.ensure_future(main())</span><br><span class="line">task1.add_done_callback(parse)</span><br><span class="line">tasks.append(task1)</span><br><span class="line">asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))</span><br></pre></td></tr></table></figure>


      
    </div>
    
    
    

    

    

    
      <div>
        <ul class="post-copyright">
  <li class="post-copyright-author">
    <strong>本文作者：</strong>
    
  </li>
  <li class="post-copyright-link">
    <strong>本文链接：</strong>
    <a href="http://yoursite.com/2019/09/20/【爬虫03】03数据解析/" title="爬虫数据解析">http://yoursite.com/2019/09/20/【爬虫03】03数据解析/</a>
  </li>
  <li class="post-copyright-license">
    <strong>版权声明： </strong>
    本博客所有文章除特别声明外，均采用 <a href="https://creativecommons.org/licenses/by-nc-sa/3.0/" rel="external nofollow" target="_blank">CC BY-NC-SA 3.0</a> 许可协议。转载请注明出处！
  </li>
</ul>

      </div>
    

    <footer class="post-footer">
      
        <div class="post-tags">
          
            <a href="/dxl/tags/爬虫/" rel="tag"># 爬虫</a>
          
        </div>
      

      
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="/dxl/2019/09/20/【爬虫02】1UA代理/" rel="next" title="爬虫UA代理">
                <i class="fa fa-chevron-left"></i> 爬虫UA代理
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="/dxl/2019/09/20/【爬虫04】01验证码识别、模拟12306登录/" rel="prev" title="验证码识别、模拟12306登录">
                验证码识别、模拟12306登录 <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>



    <div class="post-spread">
      
    </div>
  </div>


          </div>
          


          

  
    <div class="comments" id="comments">
    </div>
  



        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap">
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview-wrap">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview-wrap sidebar-panel">
        <div class="site-overview">
          <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
            
              <img class="site-author-image" itemprop="image" src="/dxl/images/avatar.png" alt>
            
              <p class="site-author-name" itemprop="name"></p>
              <p class="site-description motion-element" itemprop="description"></p>
          </div>

          <nav class="site-state motion-element">

            
              <div class="site-state-item site-state-posts">
              
                <a href="/dxl/archives">
              
                  <span class="site-state-item-count">43</span>
                  <span class="site-state-item-name">日志</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-categories">
                <a href="/dxl/categories/index.html">
                  <span class="site-state-item-count">6</span>
                  <span class="site-state-item-name">分类</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-tags">
                <a href="/dxl/tags/index.html">
                  <span class="site-state-item-count">6</span>
                  <span class="site-state-item-name">标签</span>
                </a>
              </div>
            

          </nav>

          

          

          
          

          
          
            <div class="links-of-blogroll motion-element links-of-blogroll-inline">
              <div class="links-of-blogroll-title">
                <i class="fa  fa-fw fa-sign-out"></i>
                我的友链
              </div>
              <ul class="links-of-blogroll-list">
                
                  <li class="links-of-blogroll-item">
                    <a href="tencent://message/?Menu=yes&uin=1258517737&Site=QQ%E6%9E%81%E5%AE%A2&Service=300&sigT=45a1e5847943b64c6ff3990f8a9e644d2b31356cb0b4ac6b24663a3c8dd0f8aa12a595b1714f9d45/" title="申请坑位" target="_blank">申请坑位</a>
                  </li>
                
              </ul>
            </div>
          

          

        </div>
      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-2"><a class="nav-link" href="#一、数据分析"><span class="nav-number">1.</span> <span class="nav-text">一、数据分析</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#1-1数据解析的作用"><span class="nav-number">1.1.</span> <span class="nav-text">1.1数据解析的作用</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-2实现数据解析的方法"><span class="nav-number">1.2.</span> <span class="nav-text">1.2实现数据解析的方法</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3数据解析的通用原理"><span class="nav-number">1.2.1.</span> <span class="nav-text">1.3数据解析的通用原理</span></a></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#二、正则解析"><span class="nav-number">2.</span> <span class="nav-text">二、正则解析</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#三、bs4解析"><span class="nav-number">3.</span> <span class="nav-text">三、bs4解析</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1环境安装"><span class="nav-number">3.1.</span> <span class="nav-text">3.1环境安装</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-2模块方法的使用"><span class="nav-number">3.2.</span> <span class="nav-text">3.2模块方法的使用</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#四、-Xpath解析"><span class="nav-number">4.</span> <span class="nav-text">四、 Xpath解析</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#4-1介绍"><span class="nav-number">4.1.</span> <span class="nav-text">4.1介绍</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-2模块的使用"><span class="nav-number">4.2.</span> <span class="nav-text">4.2模块的使用</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）"><span class="nav-number">4.3.</span> <span class="nav-text">4.4爬取boss中的岗位信息（岗位名称，薪资，公司名称，岗位描述）</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-5爬取城市信息"><span class="nav-number">4.4.</span> <span class="nav-text">4.5爬取城市信息</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-6总结"><span class="nav-number">4.5.</span> <span class="nav-text">4.6总结</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#五、selenium"><span class="nav-number">5.</span> <span class="nav-text">五、selenium</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#5-1介绍"><span class="nav-number">5.1.</span> <span class="nav-text">5.1介绍</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-2简单使用"><span class="nav-number">5.2.</span> <span class="nav-text">5.2简单使用</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-3plantomJS"><span class="nav-number">5.3.</span> <span class="nav-text">5.3plantomJS</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-4无头浏览器"><span class="nav-number">5.4.</span> <span class="nav-text">5.4无头浏览器</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-5-selenium规避检测"><span class="nav-number">5.5.</span> <span class="nav-text">5.5 selenium规避检测</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-6度自动化设置效果演示"><span class="nav-number">5.6.</span> <span class="nav-text">5.6度自动化设置效果演示</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-7模拟登陆qq空间"><span class="nav-number">5.7.</span> <span class="nav-text">5.7模拟登陆qq空间</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#六、Pyppeteer"><span class="nav-number">6.</span> <span class="nav-text">六、Pyppeteer</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-1简介"><span class="nav-number">7.</span> <span class="nav-text">6.1简介</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#6-2具体使用"><span class="nav-number">7.1.</span> <span class="nav-text">6.2具体使用</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-3小小爬"><span class="nav-number">8.</span> <span class="nav-text">6.3小小爬</span></a></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      
        <div class="back-to-top">
          <i class="fa fa-arrow-up"></i>
          
            <span id="scrollpercent"><span>0</span>%</span>
          
        </div>
      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright">&copy; <span itemprop="copyrightYear">2019</span>
  <span class="with-love">
    <i class="fa fa-hand-peace-o"></i>
  </span>
  <span class="author" itemprop="copyrightHolder"></span>

  
</div>









        
<div class="busuanzi-count">
  <script async src="https://dn-lbstatics.qbox.me/busuanzi/2.3/busuanzi.pure.mini.js"></script>

  
    <span class="site-uv">
      本站访客数
      <span class="busuanzi-value" id="busuanzi_value_site_uv"></span>
      人次
    </span>
  

  
    <span class="site-pv">
      本站总访问量
      <span class="busuanzi-value" id="busuanzi_value_site_pv"></span>
      次
    </span>
  
</div>








        
      </div>
    </footer>

    

    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  












  
  
    <script type="text/javascript" src="/dxl/lib/jquery/index.js?v=2.1.3"></script>
  

  
  
    <script type="text/javascript" src="/dxl/lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>
  

  
  
    <script type="text/javascript" src="/dxl/lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>
  

  
  
    <script type="text/javascript" src="/dxl/lib/velocity/velocity.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/dxl/lib/velocity/velocity.ui.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="/dxl/lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>
  


  


  <script type="text/javascript" src="/dxl/js/src/utils.js?v=5.1.4"></script>

  <script type="text/javascript" src="/dxl/js/src/motion.js?v=5.1.4"></script>



  
  


  <script type="text/javascript" src="/dxl/js/src/affix.js?v=5.1.4"></script>

  <script type="text/javascript" src="/dxl/js/src/schemes/pisces.js?v=5.1.4"></script>



  
  <script type="text/javascript" src="/dxl/js/src/scrollspy.js?v=5.1.4"></script>
<script type="text/javascript" src="/dxl/js/src/post-details.js?v=5.1.4"></script>



  


  <script type="text/javascript" src="/dxl/js/src/bootstrap.js?v=5.1.4"></script>



  


  




	





  





  










  <script src="//cdn1.lncld.net/static/js/3.0.4/av-min.js"></script>
  <script src="//unpkg.com/valine/dist/Valine.min.js"></script>
  
  <script type="text/javascript">
    var GUEST = ['nick','mail','link'];
    var guest = 'nick,mail,link';
    guest = guest.split(',').filter(item=>{
      return GUEST.indexOf(item)>-1;
    });
    new Valine({
        el: '#comments' ,
        verify: false,
        notify: false,
        appId: '13B0JGDuA6ttduN8AQaR8CzF-gzGzoHsz',
        appKey: 'I13r9r5mVgq4jQYpYy6V4gW3',
        placeholder: '欢迎大佬指点~~~',
        avatar:'mm',
        guest_info:guest,
        pageSize:'10' || 10,
    });
  </script>



  





  

  

  

  
  

  

  

  

</body>
</html>
