<!DOCTYPE html>
<html lang="zh-CN">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 4.2.0">
  <link rel="apple-touch-icon" sizes="180x180" href="/blog/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/blog/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/blog/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/blog/images/logo.svg" color="#222">

<link rel="stylesheet" href="/blog/css/main.css">


<link rel="stylesheet" href="/blog/lib/font-awesome/css/font-awesome.min.css">
  <link rel="stylesheet" href="/blog/lib/pace/pace-theme-bounce.min.css">
  <script src="/blog/lib/pace/pace.min.js"></script>

<script id="hexo-configurations">
    var NexT = window.NexT || {};
    var CONFIG = {"hostname":"ipvb.gitee.io","root":"/blog/","scheme":"Gemini","version":"7.7.2","exturl":false,"sidebar":{"position":"left","display":"post","padding":18,"offset":12,"onmobile":false,"b2t":true,"scrollpercent":true},"copycode":{"enable":true,"show_result":true,"style":null},"back2top":{"enable":true,"sidebar":false,"scrollpercent":false},"bookmark":{"enable":false,"color":"#222","save":"auto"},"fancybox":false,"mediumzoom":false,"lazyload":false,"pangu":false,"comments":{"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},"algolia":{"hits":{"per_page":10},"labels":{"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}},"localsearch":{"enable":true,"trigger":"auto","top_n_per_article":1,"unescape":false,"preload":false},"motion":{"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},"path":"search.xml"};
  </script>

  <meta name="description" content="爬虫项目地址：https:&#x2F;&#x2F;gitee.com&#x2F;itchenyumeng&#x2F;Crawler 一、Python爬虫介绍1. 什么是爬虫？网络爬虫也叫网络蜘蛛，如果把互联网比喻成一个蜘蛛网，那么蜘蛛就是在网上爬来爬去的蜘蛛，爬虫程序通过请求url地址，根据响应的内容进行解析采集数据，比如：如果响应内容是html，分析dom结构，进行dom解析、或者正则匹配，如果响应内容是xml&#x2F;json数据，就可以转">
<meta property="og:type" content="article">
<meta property="og:title" content="Python爬虫学习记录">
<meta property="og:url" content="https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/index.html">
<meta property="og:site_name" content="Chenyumeng的博客">
<meta property="og:description" content="爬虫项目地址：https:&#x2F;&#x2F;gitee.com&#x2F;itchenyumeng&#x2F;Crawler 一、Python爬虫介绍1. 什么是爬虫？网络爬虫也叫网络蜘蛛，如果把互联网比喻成一个蜘蛛网，那么蜘蛛就是在网上爬来爬去的蜘蛛，爬虫程序通过请求url地址，根据响应的内容进行解析采集数据，比如：如果响应内容是html，分析dom结构，进行dom解析、或者正则匹配，如果响应内容是xml&#x2F;json数据，就可以转">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="https://images2015.cnblogs.com/blog/918906/201608/918906-20160830220006980-1873919293.png">
<meta property="article:published_time" content="2020-07-04T08:53:55.000Z">
<meta property="article:modified_time" content="2020-07-04T11:50:01.147Z">
<meta property="article:author" content="Chenyumeng">
<meta property="article:tag" content="Python">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://images2015.cnblogs.com/blog/918906/201608/918906-20160830220006980-1873919293.png">

<link rel="canonical" href="https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/">


<script id="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome : false,
    isPost : true
  };
</script>

  <title>Python爬虫学习记录 | Chenyumeng的博客</title>
  






  <noscript>
  <style>
  .use-motion .brand,
  .use-motion .menu-item,
  .sidebar-inner,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line-before i { left: initial; }
  .use-motion .logo-line-after i { right: initial; }
  </style>
</noscript>

  <link rel="stylesheet" href="/dist/css/share.min.css">
</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container use-motion">
    <div class="headband"></div>

    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-nav-toggle">
    <div class="toggle" aria-label="切换导航栏">
      <span class="toggle-line toggle-line-first"></span>
      <span class="toggle-line toggle-line-middle"></span>
      <span class="toggle-line toggle-line-last"></span>
    </div>
  </div>

  <div class="site-meta">

    <div>
      <a href="/blog/" class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">Chenyumeng的博客</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
        <p class="site-subtitle">热爱自己，热爱生活</p>
  </div>

  <div class="site-nav-right">
    <div class="toggle popup-trigger">
        <i class="fa fa-search fa-fw fa-lg"></i>
    </div>
  </div>
</div>


<nav class="site-nav">
  
  <ul id="menu" class="menu">
        <li class="menu-item menu-item-home">

    <a href="/blog/" rel="section"><i class="fa fa-fw fa-home"></i>首页</a>

  </li>
        <li class="menu-item menu-item-about">

    <a href="/blog/about/" rel="section"><i class="fa fa-fw fa-user"></i>关于</a>

  </li>
        <li class="menu-item menu-item-tags">

    <a href="/blog/tags/" rel="section"><i class="fa fa-fw fa-tags"></i>标签<span class="badge">29</span></a>

  </li>
        <li class="menu-item menu-item-categories">

    <a href="/blog/categories/" rel="section"><i class="fa fa-fw fa-th"></i>分类<span class="badge">1</span></a>

  </li>
        <li class="menu-item menu-item-archives">

    <a href="/blog/archives/" rel="section"><i class="fa fa-fw fa-archive"></i>归档<span class="badge">32</span></a>

  </li>
      <li class="menu-item menu-item-search">
        <a role="button" class="popup-trigger"><i class="fa fa-search fa-fw"></i>搜索
        </a>
      </li>
  </ul>

</nav>
  <div class="site-search">
    <div class="search-pop-overlay">
  <div class="popup search-popup">
      <div class="search-header">
  <span class="search-icon">
    <i class="fa fa-search"></i>
  </span>
  <div class="search-input-container">
    <input autocomplete="off" autocorrect="off" autocapitalize="off"
           placeholder="搜索..." spellcheck="false"
           type="search" class="search-input">
  </div>
  <span class="popup-btn-close">
    <i class="fa fa-times-circle"></i>
  </span>
</div>
<div id="search-result">
  <div id="no-result">
    <i class="fa fa-spinner fa-pulse fa-5x fa-fw"></i>
  </div>
</div>

  </div>
</div>

  </div>
</div>
    </header>

    
  <div class="back-to-top">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>

  <a href="https://github.com/chen-yumeng" class="github-corner" title="Follow me on GitHub" aria-label="Follow me on GitHub" rel="noopener" target="_blank"><svg width="80" height="80" viewBox="0 0 250 250" aria-hidden="true"><path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path><path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path></svg></a>


    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          

          <div class="content">
            

  <div class="posts-expand">
      
  
  
  <article itemscope itemtype="http://schema.org/Article" class="post-block " lang="zh-CN">
    <link itemprop="mainEntityOfPage" href="https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/blog/images/touxiang.JPG">
      <meta itemprop="name" content="Chenyumeng">
      <meta itemprop="description" content="用来记录自己学习中所遇到的问题以及如何解决和自己所学知识的理解">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="Chenyumeng的博客">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          Python爬虫学习记录
        </h1>

        <div class="post-meta">
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              <span class="post-meta-item-text">发表于</span>
              

              <time title="创建时间：2020-07-04 16:53:55 / 修改时间：19:50:01" itemprop="dateCreated datePublished" datetime="2020-07-04T16:53:55+08:00">2020-07-04</time>
            </span>

          
            <span id="/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/" class="post-meta-item leancloud_visitors" data-flag-title="Python爬虫学习记录" title="阅读次数">
              <span class="post-meta-item-icon">
                <i class="fa fa-eye"></i>
              </span>
              <span class="post-meta-item-text">阅读次数：</span>
              <span class="leancloud-visitors-count"></span>
            </span><br>
            <span class="post-meta-item" title="本文字数">
              <span class="post-meta-item-icon">
                <i class="fa fa-file-word-o"></i>
              </span>
                <span class="post-meta-item-text">本文字数：</span>
              <span>70k</span>
            </span>
            <span class="post-meta-item" title="阅读时长">
              <span class="post-meta-item-icon">
                <i class="fa fa-clock-o"></i>
              </span>
                <span class="post-meta-item-text">阅读时长 &asymp;</span>
              <span>1:04</span>
            </span>

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">

      
        <p>爬虫项目地址：<a href="https://gitee.com/itchenyumeng/Crawler" target="_blank" rel="noopener">https://gitee.com/itchenyumeng/Crawler</a></p>
<h1 id="一、Python爬虫介绍"><a href="#一、Python爬虫介绍" class="headerlink" title="一、Python爬虫介绍"></a>一、Python爬虫介绍</h1><h2 id="1-什么是爬虫？"><a href="#1-什么是爬虫？" class="headerlink" title="1. 什么是爬虫？"></a>1. 什么是爬虫？</h2><p><strong>网络爬虫</strong>也叫<strong>网络蜘蛛</strong>，如果把互联网比喻成一个蜘蛛网，那么蜘蛛就是在网上爬来爬去的蜘蛛，爬虫程序通过请求url地址，根据响应的内容进行解析采集数据，<br>比如：如果响应内容是html，分析dom结构，进行dom解析、或者正则匹配，如果响应内容是xml/json数据，就可以转数据对象，然后对数据进行解析。</p>
<a id="more"></a>

<h2 id="2-有什么作用？"><a href="#2-有什么作用？" class="headerlink" title="2. 有什么作用？"></a>2. 有什么作用？</h2><p>通过有效的爬虫手段批量采集数据，可以降低人工成本，提高有效数据量，给予运营/销售的数据支撑，加快产品发展。 </p>
<h2 id="3-合法性"><a href="#3-合法性" class="headerlink" title="3. 合法性"></a>3. 合法性</h2><p>爬虫是利用程序进行批量爬取网页上的公开信息，也就是前端显示的数据信息。因为信息是完全公开的，所以是合法的。其实就像浏览器一样，浏览器解析响应内容并渲染为页面，而爬虫解析响应内容采集想要的数据进行存储。</p>
<h2 id="4-爬虫基本套路"><a href="#4-爬虫基本套路" class="headerlink" title="4. 爬虫基本套路"></a>4. 爬虫基本套路</h2><ul>
<li>基本流程<ul>
<li>目标数据</li>
<li>来源地址</li>
<li>结构分析</li>
<li>实现构思</li>
<li>操刀编码</li>
</ul>
</li>
<li>基本手段<ul>
<li>破解请求限制<ul>
<li>请求头设置，如：User-Agant为有效客户端</li>
<li>控制请求频率(根据实际情景)</li>
<li>IP代理</li>
<li>签名/加密参数从html/cookie/js分析</li>
</ul>
</li>
<li>破解登录授权<ul>
<li>请求带上用户cookie信息</li>
</ul>
</li>
<li>破解验证码<ul>
<li>简单的验证码可以使用识图读验证码第三方库</li>
</ul>
</li>
</ul>
</li>
<li>解析数据<ul>
<li>HTML Dom解析<ul>
<li>正则匹配，通过的正则表达式来匹配想要爬取的数据，如：有些数据不是在html 标签里，而是在html的script 标签的js变量中</li>
<li>使用第三方库解析html dom，比较喜欢类jquery的库</li>
</ul>
</li>
<li>数据字符串<ul>
<li>正则匹配(根据情景使用) </li>
<li>转 JSON/XML 对象进行解析</li>
</ul>
</li>
</ul>
</li>
</ul>
<h2 id="5-python爬虫"><a href="#5-python爬虫" class="headerlink" title="5. python爬虫"></a>5. python爬虫</h2><ul>
<li><p>python写爬虫的优势</p>
<ul>
<li>python语法易学，容易上手</li>
<li>社区活跃，实现方案多可参考</li>
<li>各种功能包丰富</li>
<li>少量代码即可完成强大功能</li>
</ul>
</li>
<li><p>涉及模块包</p>
<ul>
<li><p>请求</p>
<ul>
<li><code>urllib</code></li>
<li><code>requests</code></li>
</ul>
</li>
<li><p>多线程</p>
<ul>
<li><code>threading</code></li>
</ul>
</li>
<li><p>解析</p>
<ul>
<li><p>正则</p>
<ul>
<li><code>re</code></li>
</ul>
</li>
<li><p>json解析</p>
<ul>
<li><code>json</code></li>
</ul>
</li>
<li><p>html dom解析</p>
<ul>
<li><code>beautifulsoup</code></li>
</ul>
</li>
<li><p>lxml</p>
<ul>
<li>xpath</li>
</ul>
</li>
<li><p>CSS选择器</p>
<ul>
<li>pyquery</li>
</ul>
</li>
</ul>
</li>
<li><p>操作浏览器</p>
<ul>
<li><code>selenium</code></li>
</ul>
</li>
</ul>
</li>
</ul>
<h1 id="二、工具的使用"><a href="#二、工具的使用" class="headerlink" title="二、工具的使用"></a>二、工具的使用</h1><h2 id="1-常用的工具"><a href="#1-常用的工具" class="headerlink" title="1. 常用的工具"></a>1. 常用的工具</h2><ol>
<li>python</li>
<li>pycharm</li>
<li>浏览器<ol>
<li>chrome（建议使用）</li>
<li>火狐</li>
</ol>
</li>
</ol>
<h1 id="三、爬取数据-urllib库"><a href="#三、爬取数据-urllib库" class="headerlink" title="三、爬取数据-urllib库"></a>三、爬取数据-urllib库</h1><h2 id="1-快速入门"><a href="#1-快速入门" class="headerlink" title="1. 快速入门"></a>1. 快速入门</h2><p>怎样扒网页呢？</p>
<p>其实就是根据URL来获取它的网页信息，虽然我们在浏览器中看到的是一幅幅优美的画面，但是其实是由浏览器解释才呈现出来的，实质它是一段HTML代码，加 JS、CSS，如果把网页比作一个人，那么HTML便是他的骨架，JS便是他的肌肉，CSS便是它的衣服。所以最重要的部分是存在于HTML中的，下面我们就写个例子来扒一个网页下来</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> urlopen</span><br><span class="line"></span><br><span class="line"><span class="comment"># 第一个实例</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">'https://www.baidu.com'</span></span><br><span class="line"><span class="comment"># 发送请求</span></span><br><span class="line"><span class="comment"># 第一个参数(必填)url: Union[str, Request],</span></span><br><span class="line"><span class="comment"># 第二个参数data: Optional[bytes] = ...,</span></span><br><span class="line"><span class="comment"># 第三个参数timeout: Optional[float] = ...,</span></span><br><span class="line">response = urlopen(url)</span><br><span class="line"><span class="comment"># 读取内容</span></span><br><span class="line">info = response.read()</span><br><span class="line"><span class="comment"># 打印内容</span></span><br><span class="line"><span class="comment"># decode() 转码</span></span><br><span class="line">print(info.decode())</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印状态码</span></span><br><span class="line">print(response.getcode())</span><br><span class="line"><span class="comment"># 打印真实url</span></span><br><span class="line">print(response.geturl())</span><br><span class="line"><span class="comment"># 打印响应头</span></span><br><span class="line">print(response.info())</span><br></pre></td></tr></table></figure>

<p>看，这个网页的源码已经被我们扒下来了，是不是很酸爽？</p>
<h2 id="2-urllib库常用方法"><a href="#2-urllib库常用方法" class="headerlink" title="2. urllib库常用方法"></a>2. urllib库常用方法</h2><p><a href="https://docs.python.org/zh-cn/3/library/urllib.request.html#module-urllib.request" target="_blank" rel="noopener">urllib文档：https://docs.python.org/zh-cn/3/library/urllib.request.html#module-urllib.request</a></p>
<ul>
<li><p>requset.urlopen(url,data,timeout)</p>
<ul>
<li><p>第一个参数url即为URL，第二个参数data是访问URL时要传送的数据，第三个timeout是设置超时时间。</p>
</li>
<li><p>第二三个参数是可以不传送的，data默认为空None，timeout默认为 socket._GLOBAL_DEFAULT_TIMEOUT</p>
</li>
<li><p>第一个参数URL是必须要传送的，在这个例子里面我们传送了百度的URL，执行urlopen方法之后，返回一个response对象，返回信息便保存在这里面。</p>
</li>
</ul>
</li>
</ul>
<ul>
<li><p>response.read()</p>
<ul>
<li>read()方法就是读取文件里的全部内容，返回bytes类型</li>
</ul>
</li>
<li><p>response.getcode()</p>
<ul>
<li>返回 HTTP的响应码，成功返回200，4服务器页面出错，5服务器问题</li>
</ul>
</li>
<li><p>response.geturl()</p>
<ul>
<li>返回 返回实际数据的实际URL，防止重定向问题</li>
</ul>
</li>
<li><p>response.info()</p>
<ul>
<li>返回 服务器响应的HTTP报头</li>
</ul>
</li>
</ul>
<h2 id="3-Request对象"><a href="#3-Request对象" class="headerlink" title="3. Request对象"></a>3. Request对象</h2><p> 其实上面的urlopen参数可以传入一个request请求,它其实就是一个Request类的实例，构造时需要传入Url,Data等等的内容。比如上面的两行代码，我们可以这么改写</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> urlopen</span><br><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> Request</span><br><span class="line"></span><br><span class="line">request = Request(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">response = urlopen(requst)</span><br><span class="line">print(response.read().decode())</span><br></pre></td></tr></table></figure>

<p>运行结果是完全一样的，只不过中间多了一个request对象，推荐大家这么写，因为在构建请求时还需要加入好多内容，通过构建一个request，服务器响应请求得到应答，这样显得逻辑上清晰明确</p>
<h2 id="4-Get-请求"><a href="#4-Get-请求" class="headerlink" title="4. Get 请求"></a>4. Get 请求</h2><p>大部分被传输到浏览器的html，images，js，css, … 都是通过GET方法发出请求的。它是获取数据的主要方法</p>
<p>例如：<a href="http://www.baidu.com" target="_blank" rel="noopener">www.baidu.com</a> 搜索</p>
<p>Get请求的参数都是在Url中体现的,如果有中文，需要转码，这时我们可使用</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.parse <span class="keyword">import</span> urlencode</span><br><span class="line"><span class="keyword">from</span> urllib.parse <span class="keyword">import</span> quote</span><br><span class="line"></span><br><span class="line"><span class="comment"># Get请求的参数都是在Url中体现的,如果有中文，需要转码,这时我们可使用</span></span><br><span class="line"><span class="comment"># urllib.parse.quote()</span></span><br><span class="line"><span class="comment"># urllib.parse.urlencode()</span></span><br><span class="line">url = <span class="string">f'https://www.baidu.com/s?<span class="subst">&#123;urlencode(&#123;<span class="string">"wd"</span>: <span class="string">"编程"</span>, <span class="string">"ie"</span>: <span class="string">"utf-8"</span>&#125;</span>)&#125;'</span></span><br><span class="line">url = <span class="string">f'https://www.baidu.com/s?wd=<span class="subst">&#123;quote(<span class="string">"编程"</span>)&#125;</span>'</span></span><br></pre></td></tr></table></figure>

<h2 id="5-Post-请求"><a href="#5-Post-请求" class="headerlink" title="5. Post 请求"></a>5. Post 请求</h2><p>我们说了Request请求对象的里有data参数，它就是用在POST里的，我们要传送的数据就是这个参数data，data是一个字典，里面要匹配键值对</p>
<p>发送请求/响应header头的含义：</p>
<table>
<thead>
<tr>
<th>名称</th>
<th>含义</th>
</tr>
</thead>
<tbody><tr>
<td>Accept</td>
<td>告诉服务器，客户端支持的数据类型</td>
</tr>
<tr>
<td>Accept-Charset</td>
<td>告诉服务器，客户端采用的编码</td>
</tr>
<tr>
<td>Accept-Encoding</td>
<td>告诉服务器，客户机支持的数据压缩格式</td>
</tr>
<tr>
<td>Accept-Language</td>
<td>告诉服务器，客户机的语言环境</td>
</tr>
<tr>
<td>Host</td>
<td>客户机通过这个头告诉服务器，想访问的主机名</td>
</tr>
<tr>
<td>If-Modified-Since</td>
<td>客户机通过这个头告诉服务器，资源的缓存时间</td>
</tr>
<tr>
<td>Referer</td>
<td>客户机通过这个头告诉服务器，它是从哪个资源来访问服务器的。（一般用于防盗链）</td>
</tr>
<tr>
<td>User-Agent</td>
<td>客户机通过这个头告诉服务器，客户机的软件环境</td>
</tr>
<tr>
<td>Cookie</td>
<td>客户机通过这个头告诉服务器，可以向服务器带数据</td>
</tr>
<tr>
<td>Refresh</td>
<td>服务器通过这个头，告诉浏览器隔多长时间刷新一次</td>
</tr>
<tr>
<td>Content-Type</td>
<td>服务器通过这个头，回送数据的类型</td>
</tr>
<tr>
<td>Content-Language</td>
<td>服务器通过这个头，告诉服务器的语言环境</td>
</tr>
<tr>
<td>Server</td>
<td>服务器通过这个头，告诉浏览器服务器的类型</td>
</tr>
<tr>
<td>Content-Encoding</td>
<td>服务器通过这个头，告诉浏览器数据采用的压缩格式</td>
</tr>
<tr>
<td>Content-Length</td>
<td>服务器通过这个头，告诉浏览器回送数据的长度</td>
</tr>
</tbody></table>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> urllib.parse <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> UserAgent</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 关于post请求的获取</span></span><br><span class="line"><span class="comment"># 登录的案例</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">"https://www.w3cschool.cn/checklogin_1"</span></span><br><span class="line"></span><br><span class="line">form_data = &#123;</span><br><span class="line">    <span class="string">"username"</span>: <span class="string">"username"</span>,</span><br><span class="line">    <span class="string">"password"</span>: <span class="string">"password"</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line">request = Request(url, data=urlencode(form_data).encode(), headers=headers)</span><br><span class="line">response = urlopen(request)</span><br><span class="line">print(response.read().decode())</span><br></pre></td></tr></table></figure>

<h2 id="6-响应的编码"><a href="#6-响应的编码" class="headerlink" title="6. 响应的编码"></a>6. 响应的编码</h2><p>响应状态代码有三位数字组成，第一个数字定义了响应的类别，且有五种可能取值。<br>常见状态码：</p>
<table>
<thead>
<tr>
<th>号码</th>
<th>含义</th>
</tr>
</thead>
<tbody><tr>
<td>100~199</td>
<td>表示服务器成功接收部分请求，要求客户端继续提交其余请求才能完成整个处理过程</td>
</tr>
<tr>
<td>200~299</td>
<td>表示服务器成功接收请求并已完成整个处理过程。常用200（OK 请求成功）</td>
</tr>
<tr>
<td>300~399</td>
<td>为完成请求，客户需进一步细化请求。例如：请求的资源已经移动一个新地址、常用302（所请求的页面已经临时转移至新的url）、307和304（使用缓存资源）</td>
</tr>
<tr>
<td>400~499</td>
<td>客户端的请求有错误，常用404（服务器无法找到被请求的页面）、403（服务器拒绝访问，权限不够）</td>
</tr>
<tr>
<td>500~599</td>
<td>服务器端出现错误，常用500（请求未完成。服务器遇到不可预知的情况）</td>
</tr>
</tbody></table>
<h2 id="7-Ajax的请求获取数据"><a href="#7-Ajax的请求获取数据" class="headerlink" title="7. Ajax的请求获取数据"></a>7. Ajax的请求获取数据</h2><p>有些网页内容使用Ajax加载，而Ajax一般返回的是Json,直接对Ajax地址进行post或get，就返回Json数据了</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 关于ajax请求的获取(豆瓣)</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">base_url = <span class="string">"https://movie.douban.com/j/chart/top_list?type=11&amp;interval_id=100%3A90&amp;action=&amp;"</span></span><br><span class="line"></span><br><span class="line">count = <span class="number">1</span></span><br><span class="line">limit = <span class="number">100</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">while</span> <span class="literal">True</span>:</span><br><span class="line">    args = <span class="string">f"start=<span class="subst">&#123;count*limit&#125;</span>&amp;limit=<span class="subst">&#123;limit&#125;</span>"</span></span><br><span class="line">    request = Request(<span class="string">f"<span class="subst">&#123;base_url&#125;</span><span class="subst">&#123;args&#125;</span>"</span>, headers=headers)</span><br><span class="line"></span><br><span class="line">    response = urlopen(request)</span><br><span class="line"></span><br><span class="line">    info = response.read().decode()</span><br><span class="line"></span><br><span class="line">    <span class="keyword">if</span> info == <span class="string">"[]"</span> <span class="keyword">or</span> info <span class="keyword">is</span> <span class="literal">None</span>:</span><br><span class="line">        <span class="keyword">break</span></span><br><span class="line">    print(info)</span><br><span class="line">    count += <span class="number">1</span></span><br></pre></td></tr></table></figure>

<h2 id="8-请求-SSL证书验证"><a href="#8-请求-SSL证书验证" class="headerlink" title="8. 请求 SSL证书验证"></a>8. 请求 SSL证书验证</h2><p>现在随处可见 https 开头的网站，urllib可以为 HTTPS 请求验证SSL证书，就像web浏览器一样，如果网站的SSL证书是经过CA认证的，则能够正常访问，如：<a href="https://www.baidu.com/" target="_blank" rel="noopener">https://www.baidu.com/</a></p>
<p>如果SSL证书验证不通过，或者操作系统不信任服务器的安全证书，浏览器在访问某些网站证书是自己做的网站，会警告用户证书不受信任。</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">import</span> ssl</span><br><span class="line"></span><br><span class="line"><span class="comment"># 忽略验证证书案例</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">"https://www.xxx.com"</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">request = Request(url, headers=headers)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 忽略验证证书</span></span><br><span class="line">context = ssl._create_unverified_context()</span><br><span class="line"></span><br><span class="line">response = urlopen(request, context=context)</span><br><span class="line"></span><br><span class="line">print(response.read().decode())</span><br></pre></td></tr></table></figure>

<h2 id="9-伪装自己"><a href="#9-伪装自己" class="headerlink" title="9. 伪装自己"></a>9. 伪装自己</h2><h3 id="9-1-设置请求头"><a href="#9-1-设置请求头" class="headerlink" title="9.1. 设置请求头"></a>9.1. 设置请求头</h3><p>其中<code>User-Agent</code>代表用的哪个请求的浏览器</p>
<p>代码如下：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 请求头的设置</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">'https://www.baidu.com'</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">request = Request(url, headers=headers)</span><br><span class="line"></span><br><span class="line">print(request.get_header(<span class="string">"User-agent"</span>))</span><br><span class="line"></span><br><span class="line">response = urlopen(request)</span><br><span class="line"></span><br><span class="line">info = response.read()</span><br><span class="line"></span><br><span class="line">print(info.recode())</span><br></pre></td></tr></table></figure>

<p><strong>提示</strong></p>
<blockquote>
<p>在此可以使用多个User_Agent:然后随即选择</p>
</blockquote>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">import</span> random</span><br><span class="line"></span><br><span class="line"><span class="comment"># 请求头的设置</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">'https://www.baidu.com'</span></span><br><span class="line"></span><br><span class="line">ua_list = [</span><br><span class="line">    <span class="string">"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)"</span>,</span><br><span class="line">    <span class="string">"Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1"</span>,</span><br><span class="line">    <span class="string">"Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1"</span>,</span><br><span class="line">    <span class="string">"Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27"</span>,</span><br><span class="line">    <span class="string">"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ;  QIHU 360EE)"</span></span><br><span class="line">]</span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: random.choice(ua_list)</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">request = Request(url, headers=headers)</span><br><span class="line"></span><br><span class="line">print(request.get_header(<span class="string">"User-agent"</span>))</span><br><span class="line"></span><br><span class="line">response = urlopen(request)</span><br><span class="line"></span><br><span class="line">info = response.read()</span><br><span class="line"></span><br><span class="line">print(info.recode())</span><br></pre></td></tr></table></figure>

<p>当然，也可以使用fake_useragent库进行选择</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 请求头的设置</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">'https://www.baidu.com'</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 获取fake_useragent库中的UserAgent</span></span><br><span class="line">agent = UserAgent()</span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: agent.chrome,</span><br><span class="line">    <span class="comment"># "User-agent": agent.firefox</span></span><br><span class="line">    <span class="comment"># 随机选择</span></span><br><span class="line">    <span class="comment"># "User-agent": agent.random</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">request = Request(url, headers=headers)</span><br><span class="line"></span><br><span class="line">print(request.get_header(<span class="string">"User-agent"</span>))</span><br><span class="line"></span><br><span class="line">response = urlopen(request)</span><br><span class="line"></span><br><span class="line">info = response.read()</span><br><span class="line"></span><br><span class="line">print(info.recode())</span><br></pre></td></tr></table></figure>

<h3 id="9-2-设置代理Proxy"><a href="#9-2-设置代理Proxy" class="headerlink" title="9.2. 设置代理Proxy"></a>9.2. 设置代理Proxy</h3><blockquote>
<p>假如一个网站它会检测某一段时间某个IP 的访问次数，如果访问次数过多，它会禁止你的访问。所以你可以设置一些代理服务器来帮助你做工作，每隔一段时间换一个代理，网站君都不知道是谁在捣鬼了，这酸爽！</p>
</blockquote>
<p><strong>分类：</strong></p>
<p>透明代理：目标网站知道你使用了代理并且知道你的源IP地址，这种代理显然不符合我们这里使用代理的初衷</p>
<p>匿名代理：匿名程度比较低，也就是网站知道你使用了代理，但是并不知道你的源IP地址</p>
<p>高匿代理：这是最保险的方式，目标网站既不知道你使用的代理更不知道你的源IP </p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># proxy代理</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">"http://httpbin.org/get"</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line">request = Request(url, headers=headers)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 购买的代理，有用户名密码。购买的平台有使用文档</span></span><br><span class="line"><span class="comment"># proxy_handler = ProxyHandler(&#123;"http": "username:password@ip:port"&#125;)</span></span><br><span class="line">proxy_handler = ProxyHandler(&#123;<span class="string">"http"</span>: <span class="string">"175.42.122.249:9999"</span>&#125;)</span><br><span class="line"></span><br><span class="line">opener = build_opener(proxy_handler)</span><br><span class="line"></span><br><span class="line">response = opener.open(request)</span><br><span class="line"></span><br><span class="line">print(response.read().decode())</span><br></pre></td></tr></table></figure>

<h1 id="四、Cookie和URLError"><a href="#四、Cookie和URLError" class="headerlink" title="四、Cookie和URLError"></a>四、Cookie和URLError</h1><h2 id="1-Cookie"><a href="#1-Cookie" class="headerlink" title="1. Cookie"></a>1. Cookie</h2><p>为什么要使用Cookie呢？<br>Cookie，指某些网站为了辨别用户身份、进行session跟踪而储存在用户本地终端上的数据（通常经过加密）<br>比如说有些网站需要登录后才能访问某个页面，在登录之前，你想抓取某个页面内容是不允许的。那么我们可以利用Urllib库保存我们登录的Cookie，然后再抓取其他页面就达到目的了。</p>
<h3 id="1-1-Opener"><a href="#1-1-Opener" class="headerlink" title="1.1. Opener"></a>1.1. Opener</h3><p>当你获取一个URL你使用一个opener(一个urllib.OpenerDirector的实例)。在前面，我们都是使用的默认的opener，也就是urlopen。它是一个特殊的opener，可以理解成opener的一个特殊实例，传入的参数仅仅是url，data，timeout。<br>如果我们需要用到Cookie，只用这个opener是不能达到目的的，所以我们需要创建更一般的opener来实现对Cookie的设置</p>
<h3 id="1-2-Cookielib"><a href="#1-2-Cookielib" class="headerlink" title="1.2. Cookielib"></a>1.2. Cookielib</h3><p>cookielib模块的主要作用是提供可存储cookie的对象，以便于与urllib模块配合使用来访问Internet资源。Cookielib模块非常强大，我们可以利用本模块的CookieJar类的对象来捕获cookie并在后续连接请求时重新发送，比如可以实现模拟登录功能。该模块主要的对象有CookieJar、FileCookieJar、MozillaCookieJar、LWPCookieJar</p>
<p><strong>案例1：获取Cookie保存到变量</strong></p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> urllib.parse <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 携带cookie的使用</span></span><br><span class="line"></span><br><span class="line">form_data = &#123;</span><br><span class="line">    <span class="string">"username"</span>: <span class="string">"username"</span>,</span><br><span class="line">    <span class="string">"password"</span>: <span class="string">"password"</span></span><br><span class="line">&#125;</span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome,</span><br><span class="line">&#125;</span><br><span class="line"><span class="comment"># cookie</span></span><br><span class="line">processor = HTTPCookieProcessor()</span><br><span class="line">opener = build_opener(processor)</span><br><span class="line"><span class="comment"># 登录</span></span><br><span class="line">login_url = <span class="string">"https://www.w3cschool.cn/checklogin_1"</span></span><br><span class="line">request = Request(login_url, data=urlencode(form_data).encode(), headers=headers)</span><br><span class="line">opener.open(request)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 访问页面</span></span><br><span class="line">info_url = <span class="string">"https://www.w3cschool.cn/my"</span></span><br><span class="line">request = Request(info_url, headers=headers)</span><br><span class="line">response = opener.open(request)</span><br><span class="line">print(response.read().decode())</span><br></pre></td></tr></table></figure>

<p>我们使用以上方法将cookie保存到变量中，然后打印出了cookie中的值</p>
<p>以上程序的原理如下</p>
<p>创建一个带有cookie的opener，在访问登录的URL时，将登录后的cookie保存下来，然后利用这个cookie来访问其他网址。</p>
<p><strong>案例2：cookie保存文件的读取</strong></p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> urllib.parse <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> http.cookiejar <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 保存cookie的使用</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: <span class="string">"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"</span>,</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 登录</span></span><br><span class="line"><span class="comment"># 保存cookie到文件中</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">save_cookie</span><span class="params">(url, data)</span>:</span></span><br><span class="line">    request = Request(url, data=urlencode(data).encode(), headers=headers)</span><br><span class="line">    cookie_jar = MozillaCookieJar()</span><br><span class="line">    handler = HTTPCookieProcessor(cookie_jar)</span><br><span class="line">    opener = build_opener(handler)</span><br><span class="line">    response = opener.open(request)</span><br><span class="line">    cookie_jar.save(<span class="string">"cookie.txt"</span>, ignore_discard=<span class="literal">True</span>, ignore_expires=<span class="literal">True</span>)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 从文件中获取cookie</span></span><br><span class="line"><span class="comment"># 携带cookie访问页面</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">use_cookie</span><span class="params">(url)</span>:</span></span><br><span class="line">    request = Request(url, headers=headers)</span><br><span class="line">    cookie_jar = MozillaCookieJar()</span><br><span class="line">    cookie_jar.load(<span class="string">"cookie.txt"</span>, ignore_discard=<span class="literal">True</span>, ignore_expires=<span class="literal">True</span>)</span><br><span class="line">    handler = HTTPCookieProcessor(cookie_jar)</span><br><span class="line">    opener = build_opener(handler)</span><br><span class="line">    response = opener.open(request)</span><br><span class="line">    print(response.read().decode())</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">main</span><span class="params">()</span>:</span></span><br><span class="line">    save_cookie(<span class="string">"https://www.w3cschool.cn/checklogin_1"</span>, &#123;<span class="string">"username"</span>: <span class="string">"username"</span>, <span class="string">"password"</span>: <span class="string">"password"</span>&#125;)</span><br><span class="line">    use_cookie(<span class="string">"https://www.w3cschool.cn/my"</span>)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    main()</span><br></pre></td></tr></table></figure>

<h2 id="2-URLError"><a href="#2-URLError" class="headerlink" title="2. URLError"></a>2. URLError</h2><p>首先解释下URLError可能产生的原因：</p>
<ul>
<li>网络无连接，即本机无法上网</li>
<li>连接不到特定的服务器</li>
<li>服务器不存在</li>
</ul>
<p>在代码中，我们需要用try-except语句来包围并捕获相应的异常</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> urllib.request <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> urllib.error <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># 处理URL异常</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">"http://httpbin.org/get123"</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line"><span class="keyword">try</span>:</span><br><span class="line">    request = Request(url, headers=headers)</span><br><span class="line">    response = urlopen(request)</span><br><span class="line">    print(response.read().decode())</span><br><span class="line"><span class="keyword">except</span> HTTPError <span class="keyword">and</span> URLError <span class="keyword">as</span> e:</span><br><span class="line">    <span class="comment"># HTTPError</span></span><br><span class="line">    <span class="keyword">if</span> e.args:</span><br><span class="line">        print(<span class="string">"HTTPError"</span>)</span><br><span class="line">    <span class="comment"># URLError</span></span><br><span class="line">    <span class="keyword">elif</span> e.code == <span class="number">404</span>:</span><br><span class="line">        print(<span class="string">"URLError"</span>)</span><br></pre></td></tr></table></figure>

<h1 id="五、数据爬取-高级-requests库"><a href="#五、数据爬取-高级-requests库" class="headerlink" title="五、数据爬取(高级)-requests库"></a>五、数据爬取(高级)-requests库</h1><h2 id="1-介绍"><a href="#1-介绍" class="headerlink" title="1. 介绍"></a>1. 介绍</h2><blockquote>
<p>对了解一些爬虫的基本理念，掌握爬虫爬取的流程有所帮助。入门之后，我们就需要学习一些更加高级的内容和工具来方便我们的爬取。那么这一节来简单介绍一下 requests 库的基本用法</p>
</blockquote>
<p><a href="https://requests.readthedocs.io/zh_CN/latest/" target="_blank" rel="noopener">requests库文档：https://requests.readthedocs.io/zh_CN/latest/</a></p>
<h2 id="2-安装"><a href="#2-安装" class="headerlink" title="2. 安装"></a>2. 安装</h2><p>利用 pip 安装</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">pip install requests</span><br></pre></td></tr></table></figure>

<h2 id="3-基本请求"><a href="#3-基本请求" class="headerlink" title="3. 基本请求"></a>3. 基本请求</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"></span><br><span class="line">req = requests.get(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">req = requests.post(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">req = requests.put(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">req = requests.delete(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">req = requests.head(<span class="string">"http://www.baidu.com"</span>)</span><br><span class="line">req = requests.options(<span class="string">"http://www.baidu.com"</span>)</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># requests库的使用</span></span><br><span class="line"></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># get请求</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">get_dmeo</span><span class="params">()</span>:</span></span><br><span class="line">    url = <span class="string">"https://www.baidu.com/s?"</span></span><br><span class="line">    params = &#123;</span><br><span class="line">        <span class="string">"wd"</span>: <span class="string">"B站"</span></span><br><span class="line">    &#125;</span><br><span class="line">    response = requests.get(url, headers=headers, params=params)</span><br><span class="line">    print(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># post请求</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">post_demo</span><span class="params">()</span>:</span></span><br><span class="line">    login_url = <span class="string">"https://www.w3cschool.cn/checklogin_1"</span></span><br><span class="line">    data = &#123;</span><br><span class="line">        <span class="string">"username"</span>: <span class="string">"username"</span>,</span><br><span class="line">        <span class="string">"password"</span>: <span class="string">"password"</span></span><br><span class="line">    &#125;</span><br><span class="line">    response = requests.post(login_url, headers=headers, data=data)</span><br><span class="line">    print(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># proxy代理</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">proxy_demo</span><span class="params">()</span>:</span></span><br><span class="line">    url = <span class="string">"http://httpbin.org/get"</span></span><br><span class="line">    proxies = &#123;</span><br><span class="line">        <span class="string">"http"</span>: <span class="string">"175.42.122.249:9999"</span>,</span><br><span class="line">        <span class="comment"># "http": "username:password@ip:port"</span></span><br><span class="line">    &#125;</span><br><span class="line">    response = requests.get(url, headers=headers, proxies=proxies)</span><br><span class="line">    print(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># ssl证书和字符编码问题的解决</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">ssl_demo</span><span class="params">()</span>:</span></span><br><span class="line">    url = <span class="string">"https://www.12306.cn/index/"</span></span><br><span class="line">    <span class="comment"># 禁用安全请求警告</span></span><br><span class="line">    requests.packages.urllib3.disable_warnings()</span><br><span class="line">    response = requests.get(url, headers=headers, verify=<span class="literal">False</span>)</span><br><span class="line">    response.encoding = <span class="string">"utf-8"</span></span><br><span class="line">    print(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># cookie的使用</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">cookie_demo</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># 通过session进行请求</span></span><br><span class="line">    session = requests.Session()</span><br><span class="line">    login_url = <span class="string">"https://www.w3cschool.cn/checklogin_1"</span></span><br><span class="line">    data = &#123;</span><br><span class="line">        <span class="string">"username"</span>: <span class="string">"username"</span>,</span><br><span class="line">        <span class="string">"password"</span>: <span class="string">"password"</span></span><br><span class="line">    &#125;</span><br><span class="line">    session.post(login_url, headers=headers, data=data)</span><br><span class="line">    info_url = <span class="string">"https://www.w3cschool.cn/my"</span></span><br><span class="line">    response = session.get(info_url, headers=headers)</span><br><span class="line">    print(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">mian</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="comment"># get_dmeo()</span></span><br><span class="line">    <span class="comment"># post_demo()</span></span><br><span class="line">    <span class="comment"># proxy_demo()</span></span><br><span class="line">    <span class="comment"># ssl_demo()</span></span><br><span class="line">    cookie_demo()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    mian()</span><br></pre></td></tr></table></figure>
<p>可以通过timeout属性设置超时时间，一旦超过这个时间还没获得响应内容，就会提示错误</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">requests.get(&#39;http:&#x2F;&#x2F;github.com&#39;, timeout&#x3D;0.001)</span><br></pre></td></tr></table></figure>

<h2 id="4-获取响应信息"><a href="#4-获取响应信息" class="headerlink" title="4. 获取响应信息"></a>4. 获取响应信息</h2><table>
<thead>
<tr>
<th>代码</th>
<th>含义</th>
</tr>
</thead>
<tbody><tr>
<td>resp.json()</td>
<td>获取响应内容（以json字符串）</td>
</tr>
<tr>
<td>resp.text</td>
<td>获取响应内容 (以字符串)</td>
</tr>
<tr>
<td>resp.content</td>
<td>获取响应内容（以字节的方式）</td>
</tr>
<tr>
<td>resp.headers</td>
<td>获取响应头内容</td>
</tr>
<tr>
<td>resp.url</td>
<td>获取访问地址</td>
</tr>
<tr>
<td>resp.encoding</td>
<td>获取网页编码</td>
</tr>
<tr>
<td>resp.request.headers</td>
<td>请求头内容</td>
</tr>
<tr>
<td>resp.cookie</td>
<td>获取cookie</td>
</tr>
</tbody></table>
<h1 id="六、数据解析-正则"><a href="#六、数据解析-正则" class="headerlink" title="六、数据解析-正则"></a>六、数据解析-正则</h1><h2 id="1-提取数据"><a href="#1-提取数据" class="headerlink" title="1. 提取数据"></a>1. 提取数据</h2><p>在前面我们已经搞定了怎样获取页面的内容，不过还差一步，这么多杂乱的代码夹杂文字我们怎样把它提取出来整理呢？下面就开始介绍一个十分强大的工具，正则表达式！</p>
<blockquote>
<p>正则表达式是对字符串操作的一种逻辑公式，就是用事先定义好的一些特定字符、及这些特定字符的组合，组成一个“规则字符串”，这个“规则字符串”用来表达对字符串的一种过滤逻辑。</p>
</blockquote>
<p>正则表达式是用来匹配字符串非常强大的工具，在其他编程语言中同样有正则表达式的概念，Python同样不例外，利用了正则表达式，我们想要从返回的页面内容提取出我们想要的内容就易如反掌了</p>
<p><a href="https://docs.python.org/zh-cn/3/library/re.html" target="_blank" rel="noopener">re库文档：https://docs.python.org/zh-cn/3/library/re.html</a></p>
<p><strong>规则</strong>：</p>
<table>
<thead>
<tr>
<th>模式</th>
<th>描述</th>
</tr>
</thead>
<tbody><tr>
<td>^</td>
<td>匹配字符串的开头</td>
</tr>
<tr>
<td>$</td>
<td>匹配字符串的末尾</td>
</tr>
<tr>
<td>.</td>
<td>匹配任意字符，除了换行符，当re.DOTALL标记被指定时，则可以匹配包括换行符的任意字符</td>
</tr>
<tr>
<td>[…]</td>
<td>用来表示一组字符,单独列出：[amk] 匹配 ‘a’，’m’或’k’</td>
</tr>
<tr>
<td>[^…]</td>
<td>不在[]中的字符：[^abc] 匹配除了a,b,c之外的字符</td>
</tr>
<tr>
<td>re*</td>
<td>匹配0个或多个的表达式</td>
</tr>
<tr>
<td>re+</td>
<td>匹配1个或多个的表达式</td>
</tr>
<tr>
<td>re?</td>
<td>匹配0个或1个由前面的正则表达式定义的片段，非贪婪方式</td>
</tr>
<tr>
<td>re{ n}</td>
<td></td>
</tr>
<tr>
<td>re{ n,}</td>
<td>精确匹配n个前面表达式</td>
</tr>
<tr>
<td>re{ n, m}</td>
<td>匹配 n 到 m 次由前面的正则表达式定义的片段，贪婪方式</td>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td>(re)</td>
<td>G匹配括号内的表达式，也表示一个组</td>
</tr>
<tr>
<td>(?imx)</td>
<td>正则表达式包含三种可选标志：i, m, 或 x 。只影响括号中的区域</td>
</tr>
<tr>
<td>(?-imx)</td>
<td>正则表达式关闭 i, m, 或 x 可选标志。只影响括号中的区域</td>
</tr>
<tr>
<td>(?: re)</td>
<td>类似 (…), 但是不表示一个组</td>
</tr>
<tr>
<td>(?imx: re)</td>
<td>在括号中使用i, m, 或 x 可选标志</td>
</tr>
<tr>
<td>(?-imx: re)</td>
<td>在括号中不使用i, m, 或 x 可选标志</td>
</tr>
<tr>
<td>(?#…)</td>
<td>注释</td>
</tr>
<tr>
<td>(?= re)</td>
<td>前向肯定界定符。如果所含正则表达式，以 … 表示，在当前位置成功匹配时成功，否则失败。但一旦所含表达式已经尝试，匹配引擎根本没有提高；模式的剩余部分还要尝试界定符的右边。</td>
</tr>
<tr>
<td>(?! re)</td>
<td>前向否定界定符。与肯定界定符相反；当所含表达式不能在字符串当前位置匹配时成功</td>
</tr>
<tr>
<td>(?&gt; re)</td>
<td>匹配的独立模式，省去回溯</td>
</tr>
<tr>
<td>\w</td>
<td>匹配字母数字及下划线</td>
</tr>
<tr>
<td>\W</td>
<td>匹配非字母数字及下划线</td>
</tr>
<tr>
<td>\s</td>
<td>匹配任意空白字符，等价于 [\t\n\r\f].</td>
</tr>
<tr>
<td>\S</td>
<td>匹配任意非空字符</td>
</tr>
<tr>
<td>\d</td>
<td>匹配任意数字，等价于 [0-9]</td>
</tr>
<tr>
<td>\D</td>
<td>匹配任意非数字</td>
</tr>
<tr>
<td>\A</td>
<td>匹配字符串开始</td>
</tr>
<tr>
<td>\Z</td>
<td>匹配字符串结束，如果是存在换行，只匹配到换行前的结束字符串。c</td>
</tr>
<tr>
<td>\z</td>
<td>匹配字符串结束</td>
</tr>
<tr>
<td>\G</td>
<td>匹配最后匹配完成的位置</td>
</tr>
<tr>
<td>\b</td>
<td>匹配一个单词边界，也就是指单词和空格间的位置。例如， ‘er\b’ 可以匹配”never” 中的 ‘er’，但不能匹配 “verb” 中的 ‘er’</td>
</tr>
<tr>
<td>\B</td>
<td>匹配非单词边界。’er\B’ 能匹配 “verb” 中的 ‘er’，但不能匹配 “never” 中的 ‘er’</td>
</tr>
<tr>
<td>\n, \t, 等.</td>
<td>匹配一个换行符。匹配一个制表符。等</td>
</tr>
<tr>
<td>\1…\9</td>
<td>匹配第n个分组的内容</td>
</tr>
<tr>
<td>\10</td>
<td>匹配第n个分组的内容，如果它经匹配。否则指的是八进制字符码的表达式</td>
</tr>
<tr>
<td>[\u4e00-\u9fa5]</td>
<td>中文</td>
</tr>
</tbody></table>
<h2 id="2-正则表达式相关注解"><a href="#2-正则表达式相关注解" class="headerlink" title="2. 正则表达式相关注解"></a>2. 正则表达式相关注解</h2><h3 id="2-1-数量词的贪婪模式与非贪婪模式"><a href="#2-1-数量词的贪婪模式与非贪婪模式" class="headerlink" title="2.1. 数量词的贪婪模式与非贪婪模式"></a>2.1. 数量词的贪婪模式与非贪婪模式</h3><p>正则表达式通常用于在文本中查找匹配的字符串<br>Python里数量词默认是贪婪的（在少数语言里也可能是默认非贪婪），总是尝试匹配尽可能多的字符；非贪婪的则相反，总是尝试匹配尽可能少的字符</p>
<p>例如：正则表达式”ab* ”如果用于查找”abbbc”，将找到”abbb”。而如果使用非贪婪的数量词”ab*?”，将找到”a”</p>
<h3 id="2-2-常用方法"><a href="#2-2-常用方法" class="headerlink" title="2.2. 常用方法"></a>2.2. 常用方法</h3><ul>
<li>re.match<ul>
<li>re.match 尝试从字符串的起始位置匹配一个模式，如果不是起始位置匹配成功的话，match()就返回None</li>
<li>函数语法：re.match(pattern, string, flags=0)</li>
</ul>
</li>
<li>re.search<ul>
<li>re.search 扫描整个字符串并返回第一个成功的匹配。</li>
<li>函数语法：re.search(pattern, string, flags=0)</li>
</ul>
</li>
<li>re.sub<ul>
<li>re.sub 替换字符串re.sub(pattern,replace,string)</li>
</ul>
</li>
<li>re.findall<ul>
<li>re.findall 查找全部re.findall(pattern,string,flags=0)</li>
</ul>
</li>
</ul>
<h2 id="3-正则表达式修饰符-可选标志"><a href="#3-正则表达式修饰符-可选标志" class="headerlink" title="3. 正则表达式修饰符 - 可选标志"></a>3. 正则表达式修饰符 - 可选标志</h2><blockquote>
<p>正则表达式可以包含一些可选标志修饰符来控制匹配的模式。修饰符被指定为一个可选的标志。多个标志可以通过按位 OR(|) 它们来指定。如 re.I | re.M 被设置成 I 和 M 标志：</p>
</blockquote>
<table>
<thead>
<tr>
<th>修饰符</th>
<th>描述</th>
</tr>
</thead>
<tbody><tr>
<td>re.I</td>
<td>使匹配对大小写不敏感</td>
</tr>
<tr>
<td>re.L</td>
<td>做本地化识别（locale-aware）匹配</td>
</tr>
<tr>
<td>re.M</td>
<td></td>
</tr>
<tr>
<td>re.S</td>
<td>使 . 匹配包括换行在内的所有字符</td>
</tr>
<tr>
<td>re.U</td>
<td>根据Unicode字符集解析字符。这个标志影响 \w, \W, \b, \B</td>
</tr>
<tr>
<td>re.X</td>
<td>该标志通过给予你更灵活的格式以便你将正则表达式写得更易于理解</td>
</tr>
</tbody></table>
<h1 id="七、数据解析-Beautiful-Soup"><a href="#七、数据解析-Beautiful-Soup" class="headerlink" title="七、数据解析-Beautiful Soup"></a>七、数据解析-Beautiful Soup</h1><h2 id="1-Beautiful-Soup的简介"><a href="#1-Beautiful-Soup的简介" class="headerlink" title="1. Beautiful Soup的简介"></a>1. Beautiful Soup的简介</h2><blockquote>
<p>Beautiful Soup提供一些简单的、python式的函数用来处理导航、搜索、修改分析树等功能。它是一个工具箱，通过解析文档为用户提供需要抓取的数据，因为简单，所以不需要多少代码就可以写出一个完整的应用程序。</p>
</blockquote>
<blockquote>
<p>Beautiful Soup自动将输入文档转换为Unicode编码，输出文档转换为utf-8编码。你不需要考虑编码方式，除非文档没有指定一个编码方式，这时，Beautiful Soup就不能自动识别编码方式了。然后，你仅仅需要说明一下原始编码方式就可以了。</p>
</blockquote>
<blockquote>
<p>Beautiful Soup已成为和lxml、html6lib一样出色的python解释器，为用户灵活地提供不同的解析策略或强劲的速度</p>
</blockquote>
<p><a href="http://beautifulsoup.readthedocs.io/zh_CN/latest/" target="_blank" rel="noopener">官网</a><a href="http://beautifulsoup.readthedocs.io/zh_CN/latest/" target="_blank" rel="noopener">http://beautifulsoup.readthedocs.io/zh_CN/latest/</a></p>
<h2 id="2-Beautiful-Soup-安装"><a href="#2-Beautiful-Soup-安装" class="headerlink" title="2. Beautiful Soup 安装"></a>2. Beautiful Soup 安装</h2><blockquote>
<p>Beautiful Soup 3 目前已经停止开发，推荐在现在的项目中使用Beautiful Soup 4，不过它已经被移植到BS4了,也就是说导入时我们需要 import bs4</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">pip install beautifulsoup4</span><br></pre></td></tr></table></figure>

<blockquote>
<p>Beautiful Soup支持Python标准库中的HTML解析器,还支持一些第三方的解析器，如果我们不安装它，则 Python 会使用 Python默认的解析器，lxml 解析器更加强大，速度更快，推荐安装</p>
</blockquote>
<table>
<thead>
<tr>
<th>解析器</th>
<th>使用方法</th>
<th>优势</th>
<th>劣势</th>
</tr>
</thead>
<tbody><tr>
<td>Python标准库</td>
<td>BeautifulSoup(markup, “html.parser”)</td>
<td>1. Python的内置标准库  2. 执行速度适中 3.文档容错能力强</td>
<td>Python 2.7.3 or 3.2.2)前 的版本中文档容错能力差</td>
</tr>
<tr>
<td>lxml HTML 解析器</td>
<td>BeautifulSoup(markup, “lxml”)</td>
<td>1. 速度快 2.文档容错能力强</td>
<td>需要安装C语言库</td>
</tr>
<tr>
<td>lxml XML 解析器</td>
<td>BeautifulSoup(markup, [“lxml”, “xml”])  BeautifulSoup(markup, “xml”)</td>
<td>1. 速度快 2.唯一支持XML的解析器</td>
<td>需要安装C语言库</td>
</tr>
<tr>
<td>html5lib</td>
<td>BeautifulSoup(markup, “html5lib”)</td>
<td>1. 最好的容错性 2.以浏览器的方式解析文档 3.生成HTML5格式的文档 4.速度慢</td>
<td>不依赖外部扩展</td>
</tr>
</tbody></table>
<h2 id="3-创建-Beautiful-Soup-对象"><a href="#3-创建-Beautiful-Soup-对象" class="headerlink" title="3. 创建 Beautiful Soup 对象"></a>3. 创建 Beautiful Soup 对象</h2><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">from bs4 import BeautifulSoup</span><br><span class="line"># html为被解析内容</span><br><span class="line">bs &#x3D; BeautifulSoup(html,&quot;lxml&quot;)</span><br></pre></td></tr></table></figure>

<h2 id="4-四大对象种类"><a href="#4-四大对象种类" class="headerlink" title="4. 四大对象种类"></a>4. 四大对象种类</h2><blockquote>
<p>Beautiful Soup将复杂HTML文档转换成一个复杂的树形结构,每个节点都是Python对象,所有对象可以归纳为4种:</p>
</blockquote>
<ul>
<li>Tag</li>
<li>NavigableString</li>
<li>BeautifulSoup</li>
<li>Comment</li>
</ul>
<h3 id="4-1-Tag-是什么？"><a href="#4-1-Tag-是什么？" class="headerlink" title="4.1. Tag 是什么？"></a>4.1. Tag 是什么？</h3><p>通俗点讲就是 HTML 中的一个个标签</p>
<h3 id="4-2-NavigableString"><a href="#4-2-NavigableString" class="headerlink" title="4.2. NavigableString"></a>4.2. NavigableString</h3><p>获取内容</p>
<h3 id="4-3-BeautifulSoup"><a href="#4-3-BeautifulSoup" class="headerlink" title="4.3. BeautifulSoup"></a>4.3. BeautifulSoup</h3><blockquote>
<p>BeautifulSoup 对象表示的是一个文档的全部内容.大部分时候,可以把它当作 Tag 对象,它支持 遍历文档树 和 搜索文档树 中描述的大部分的方法.</p>
</blockquote>
<blockquote>
<p>因为 BeautifulSoup 对象并不是真正的HTML或XML的tag,所以它没有name和attribute属性.但有时查看它的 .name 属性是很方便的,所以 BeautifulSoup 对象包含了一个值为 “[document]” 的特殊属性 .name</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">print(soup.name)</span><br><span class="line">print(soup.head.name)</span><br><span class="line"># [document]</span><br><span class="line"># head</span><br></pre></td></tr></table></figure>

<h3 id="4-4-Comment"><a href="#4-4-Comment" class="headerlink" title="4.4. Comment"></a>4.4. Comment</h3><p>Comment对象是一个特殊类型的 NavigableString 对象，其实输出的内容仍然不包括注释符号，但是如果不好好处理它，可能会对我们的文本处理造成意想不到的麻烦</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">if type(soup.i.string) &#x3D;&#x3D; Comment:</span><br><span class="line">    print(soup.i.string)</span><br><span class="line">    print(soup.i.prettify())</span><br><span class="line">else:</span><br><span class="line">    print(soup.i.text)</span><br></pre></td></tr></table></figure>
<h2 id="5-搜索文档树"><a href="#5-搜索文档树" class="headerlink" title="5. 搜索文档树"></a>5. 搜索文档树</h2><blockquote>
<p>Beautiful Soup定义了很多搜索方法,这里着重介绍2个: find() 和 find_all() .其它方法的参数和用法类似</p>
</blockquote>
<h3 id="5-1-过滤器"><a href="#5-1-过滤器" class="headerlink" title="5.1. 过滤器"></a>5.1. 过滤器</h3><blockquote>
<p>介绍 find_all() 方法前,先介绍一下过滤器的类型 ,这些过滤器贯穿整个搜索的API.过滤器可以被用在tag的name中,节点的属性中,字符串中或他们的混合中</p>
</blockquote>
<h3 id="5-2-字符串"><a href="#5-2-字符串" class="headerlink" title="5.2. 字符串"></a>5.2. 字符串</h3><blockquote>
<p>最简单的过滤器是字符串.在搜索方法中传入一个字符串参数,Beautiful Soup会查找与字符串完整匹配的内容,下面的例子用于查找文档中所有的<div>标签</p>
</blockquote>
<blockquote>
<p>如果传入字节码参数,Beautiful Soup会当作UTF-8编码,可以传入一段Unicode 编码来避免Beautiful Soup解析编码出错</p>
</blockquote>
<h3 id="5-3-正则表达式"><a href="#5-3-正则表达式" class="headerlink" title="5.3. 正则表达式"></a>5.3. 正则表达式</h3><p>如果传入正则表达式作为参数,Beautiful Soup会通过正则表达式的 match() 来匹配内容</p>
<h3 id="5-4-列表"><a href="#5-4-列表" class="headerlink" title="5.4. 列表"></a>5.4. 列表</h3><blockquote>
<p>如果传入列表参数,Beautiful Soup会将与列表中任一元素匹配的内容返回</p>
</blockquote>
<h3 id="5-5-keyword"><a href="#5-5-keyword" class="headerlink" title="5.5. keyword"></a>5.5. keyword</h3><blockquote>
<p>如果一个指定名字的参数不是搜索内置的参数名,搜索时会把该参数当作指定名字tag的属性来搜索,如果包含一个名字为 id 的参数,Beautiful Soup会搜索每个tag的”id”属性</p>
</blockquote>
<h3 id="5-6-True"><a href="#5-6-True" class="headerlink" title="5.6. True"></a>5.6. True</h3><blockquote>
<p>True 可以匹配任何值,下面代码查找到所有的tag,但是不会返回字符串节点</p>
</blockquote>
<h3 id="5-7-按CSS搜索"><a href="#5-7-按CSS搜索" class="headerlink" title="5.7. 按CSS搜索"></a>5.7. 按CSS搜索</h3><blockquote>
<p>按照CSS类名搜索tag的功能非常实用,但标识CSS类名的关键字 class 在Python中是保留字,使用 class 做参数会导致语法错误.从Beautiful Soup的4.1.1版本开始,可以通过 class_ 参数搜索有指定CSS类名的tag</p>
</blockquote>
<h2 id="6-CSS选择器（扩展）"><a href="#6-CSS选择器（扩展）" class="headerlink" title="6. CSS选择器（扩展）"></a>6. CSS选择器（扩展）</h2><p>soup.select(参数)</p>
<table>
<thead>
<tr>
<th>表达式</th>
<th>说明</th>
</tr>
</thead>
<tbody><tr>
<td>tag</td>
<td>选择指定标签</td>
</tr>
<tr>
<td>*</td>
<td>选择所有节点</td>
</tr>
<tr>
<td>#id</td>
<td>选择id为container的节点</td>
</tr>
<tr>
<td>.class</td>
<td>选取所有class包含container的节点</td>
</tr>
<tr>
<td>li a</td>
<td>选取所有li下的所有a节点</td>
</tr>
<tr>
<td>ul + p</td>
<td>(兄弟)选择ul后面的第一个p元素</td>
</tr>
<tr>
<td>div#id &gt; ul</td>
<td>(父子)选取id为id的div的第一个ul子元素</td>
</tr>
<tr>
<td>table ~ div</td>
<td>选取与table相邻的所有div元素</td>
</tr>
<tr>
<td>a[title]</td>
<td>选取所有有title属性的a元素</td>
</tr>
<tr>
<td>a[class=”title”]</td>
<td>选取所有class属性为title值的a</td>
</tr>
<tr>
<td>a[href*=”sxt”]</td>
<td>选取所有href属性包含sxt的a元素</td>
</tr>
<tr>
<td>a[href^=”http”]</td>
<td>选取所有href属性值以http开头的a元素</td>
</tr>
<tr>
<td>a[href$=”.png”]</td>
<td>选取所有href属性值以.png结尾的a元素</td>
</tr>
<tr>
<td>input[type=”redio”]:checked</td>
<td>选取选中的hobby的元素</td>
</tr>
<tr>
<td>## 代码：</td>
<td></td>
</tr>
</tbody></table>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> bs4 <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">from</span> bs4.element <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"><span class="comment"># BeautifulSoup的使用</span></span><br><span class="line"></span><br><span class="line">str = <span class="string">'''</span></span><br><span class="line"><span class="string">&lt;title id="id"&gt;Title&lt;/title&gt;</span></span><br><span class="line"><span class="string">&lt;div class="info" style="float: left;"&gt;Welcome to LeiYang&lt;/div&gt;</span></span><br><span class="line"><span class="string">&lt;div class="info" style="float: right;"&gt;</span></span><br><span class="line"><span class="string">    &lt;span&gt;Good city&lt;/span&gt;</span></span><br><span class="line"><span class="string">    &lt;a href="https://www.baidu.com"&gt;百度&lt;/a&gt;</span></span><br><span class="line"><span class="string">    &lt;i&gt;&lt;!--注释--&gt;&lt;/i&gt;</span></span><br><span class="line"><span class="string">&lt;/div&gt;</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line">soup = BeautifulSoup(str, <span class="string">"lxml"</span>)</span><br><span class="line"><span class="comment"># 获取标签</span></span><br><span class="line">print(soup.title)</span><br><span class="line">print(soup.div)</span><br><span class="line"><span class="comment"># 获取所有属性</span></span><br><span class="line">print(soup.div.attrs)</span><br><span class="line"><span class="comment">#获取单个属性的值</span></span><br><span class="line">print(soup.div.get(<span class="string">"class"</span>))</span><br><span class="line">print(soup.div[<span class="string">"style"</span>])</span><br><span class="line">print(soup.a[<span class="string">"href"</span>])</span><br><span class="line">print(soup.a.get(<span class="string">"href"</span>))</span><br><span class="line"><span class="comment"># 获取内容</span></span><br><span class="line">print(soup.div.text)</span><br><span class="line">print(soup.div.string)</span><br><span class="line"><span class="comment">#Comment对象</span></span><br><span class="line"><span class="keyword">if</span> type(soup.i.string) == Comment:</span><br><span class="line">    print(soup.i.string)</span><br><span class="line">    print(soup.i.prettify())</span><br><span class="line"><span class="keyword">else</span>:</span><br><span class="line">    print(soup.i.text)</span><br><span class="line"></span><br><span class="line">print(<span class="string">"------------------find_all---------------------"</span>)</span><br><span class="line">print(soup.find_all(<span class="string">"div"</span>))</span><br><span class="line">print(soup.find_all(id=<span class="string">"id"</span>))</span><br><span class="line">print(soup.find_all(class_=<span class="string">"info"</span>))</span><br><span class="line">print(soup.find_all(attrs=&#123;<span class="string">"style"</span>: <span class="string">"float: left;"</span>&#125;))</span><br><span class="line">print(<span class="string">"------------------css---------------------"</span>)</span><br><span class="line">print(soup.select(<span class="string">"title"</span>))</span><br><span class="line">print(soup.select(<span class="string">"#id"</span>))</span><br><span class="line">print(soup.select(<span class="string">".info"</span>))</span><br><span class="line">print(soup.select(<span class="string">"div span"</span>))</span><br><span class="line">print(soup.select(<span class="string">"div &gt; span"</span>))</span><br><span class="line">print(soup.select(<span class="string">"div"</span>)[<span class="number">1</span>].select(<span class="string">"a"</span>))</span><br><span class="line">print(soup.select(<span class="string">"div"</span>)[<span class="number">0</span>].text)</span><br></pre></td></tr></table></figure>

<h1 id="八、数据解析-Xpath"><a href="#八、数据解析-Xpath" class="headerlink" title="八、数据解析-Xpath"></a>八、数据解析-Xpath</h1><h2 id="1-介绍-1"><a href="#1-介绍-1" class="headerlink" title="1. 介绍"></a>1. 介绍</h2><blockquote>
<p>之前 BeautifulSoup 的用法，这个已经是非常强大的库了，不过还有一些比较流行的解析库，例如 lxml，使用的是 Xpath 语法，同样是效率比较高的解析方法。如果大家对 BeautifulSoup 使用不太习惯的话，可以尝试下 Xpath</p>
</blockquote>
<p><a href="http://lxml.de/index.html" target="_blank" rel="noopener">官网</a> <a href="http://lxml.de/index.html" target="_blank" rel="noopener">http://lxml.de/index.html</a></p>
<h2 id="2-安装-1"><a href="#2-安装-1" class="headerlink" title="2. 安装"></a>2. 安装</h2><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">pip install lxml</span><br></pre></td></tr></table></figure>

<h2 id="3-XPath语法"><a href="#3-XPath语法" class="headerlink" title="3. XPath语法"></a>3. XPath语法</h2><blockquote>
<p>XPath 是一门在 XML 文档中查找信息的语言。XPath 可用来在 XML 文档中对元素和属性进行遍历。XPath 是 W3C XSLT 标准的主要元素，并且 XQuery 和 XPointer 都构建于 XPath 表达之上</p>
</blockquote>
<h3 id="3-1-节点的关系"><a href="#3-1-节点的关系" class="headerlink" title="3.1. 节点的关系"></a>3.1. 节点的关系</h3><ul>
<li>父（Parent）</li>
<li>子（Children）</li>
<li>同胞（Sibling）</li>
<li>先辈（Ancestor）</li>
<li>后代（Descendant）</li>
</ul>
<h3 id="3-2-选取节点"><a href="#3-2-选取节点" class="headerlink" title="3.2. 选取节点"></a>3.2. 选取节点</h3><h4 id="3-2-1-常用的路径表达式"><a href="#3-2-1-常用的路径表达式" class="headerlink" title="3.2.1. 常用的路径表达式"></a>3.2.1. 常用的路径表达式</h4><table>
<thead>
<tr>
<th>表达式</th>
<th>描述</th>
</tr>
</thead>
<tbody><tr>
<td>nodename</td>
<td>选取此节点的所有子节点</td>
</tr>
<tr>
<td>/</td>
<td>从根节点选取</td>
</tr>
<tr>
<td>//</td>
<td>从匹配选择的当前节点选择文档中的节点，而不考虑它们的位置</td>
</tr>
<tr>
<td>.</td>
<td>选取当前节点</td>
</tr>
<tr>
<td>..</td>
<td>选取当前节点的父节点</td>
</tr>
<tr>
<td>@</td>
<td>选取属性</td>
</tr>
</tbody></table>
<h4 id="3-2-2-通配符"><a href="#3-2-2-通配符" class="headerlink" title="3.2.2. 通配符"></a>3.2.2. 通配符</h4><p>XPath 通配符可用来选取未知的 XML 元素。</p>
<table>
<thead>
<tr>
<th>通配符</th>
<th>描述</th>
<th>举例</th>
<th>结果</th>
</tr>
</thead>
<tbody><tr>
<td>*</td>
<td>匹配任何元素节点</td>
<td>xpath(‘div/*’)</td>
<td>获取div下的所有子节点</td>
</tr>
<tr>
<td>@*</td>
<td>匹配任何属性节点</td>
<td>xpath(‘div[@*]’)</td>
<td>选取所有带属性的div节点</td>
</tr>
<tr>
<td>node()</td>
<td>匹配任何类型的节点</td>
<td></td>
<td></td>
</tr>
</tbody></table>
<h4 id="3-2-3-选取若干路径"><a href="#3-2-3-选取若干路径" class="headerlink" title="3.2.3. 选取若干路径"></a>3.2.3. 选取若干路径</h4><p>通过在路径表达式中使用“|”运算符，您可以选取若干个路径</p>
<table>
<thead>
<tr>
<th>表达式</th>
<th>结果</th>
</tr>
</thead>
<tbody><tr>
<td>xpath(‘//div`</td>
<td>`//table’)</td>
</tr>
</tbody></table>
<h4 id="3-2-4-谓语"><a href="#3-2-4-谓语" class="headerlink" title="3.2.4. 谓语"></a>3.2.4. 谓语</h4><p>谓语被嵌在方括号内，用来查找某个特定的节点或包含某个制定的值的节点</p>
<table>
<thead>
<tr>
<th>表达式</th>
<th>结果</th>
</tr>
</thead>
<tbody><tr>
<td>xpath(‘/body/div[1]’)</td>
<td>选取body下的第一个div节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[last()]’)</td>
<td>选取body下最后一个div节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[last()-1]’)</td>
<td>选取body下倒数第二个节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[positon()&lt;3]’)</td>
<td>选取body下前丙个div节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[@class]’)</td>
<td>选取body下带有class属性的div节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[@class=”main”]’)</td>
<td>选取body下class属性为main的div节点</td>
</tr>
<tr>
<td>xpath(‘/body/div[price&gt;35.00]’)</td>
<td>选取body下price元素大于35的div节点</td>
</tr>
</tbody></table>
<h4 id="3-2-5-XPath运算符"><a href="#3-2-5-XPath运算符" class="headerlink" title="3.2.5. XPath运算符"></a>3.2.5. XPath运算符</h4><table>
<thead>
<tr>
<th>运算符</th>
<th>描述</th>
<th>实例</th>
<th>返回值</th>
</tr>
</thead>
<tbody><tr>
<td></td>
<td></td>
<td>计算两个节点集</td>
<td>//book</td>
</tr>
<tr>
<td>+</td>
<td>加法</td>
<td>6 + 4</td>
<td>10</td>
</tr>
<tr>
<td>–</td>
<td>减法</td>
<td>6 – 4</td>
<td>2</td>
</tr>
<tr>
<td>*</td>
<td>乘法</td>
<td>6 * 4</td>
<td>24</td>
</tr>
<tr>
<td>div</td>
<td>除法</td>
<td>8 div 4</td>
<td>2</td>
</tr>
<tr>
<td>=</td>
<td>等于</td>
<td>price=9.80</td>
<td>如果 price 是 9.80，则返回 true。如果 price 是 9.90，则返回 false。</td>
</tr>
<tr>
<td>!=</td>
<td>不等于</td>
<td>price!=9.80</td>
<td>如果 price 是 9.90，则返回 true。如果 price 是 9.80，则返回 false。</td>
</tr>
<tr>
<td>&lt;</td>
<td>小于</td>
<td>price&lt;9.80</td>
<td>如果 price 是 9.00，则返回 true。如果 price 是 9.90，则返回 false。</td>
</tr>
<tr>
<td>&lt;=</td>
<td>小于或等于</td>
<td>price&lt;=9.80</td>
<td>如果 price 是 9.00，则返回 true。如果 price 是 9.90，则返回 false。</td>
</tr>
<tr>
<td>&gt;</td>
<td>大于</td>
<td>price&gt;9.80</td>
<td>如果 price 是 9.90，则返回 true。如果 price 是 9.80，则返回 false。</td>
</tr>
<tr>
<td>&gt;=</td>
<td>大于或等于</td>
<td>price&gt;=9.80</td>
<td>如果 price 是 9.90，则返回 true。如果 price 是 9.70，则返回 false。</td>
</tr>
<tr>
<td>or</td>
<td>或</td>
<td>price=9.80 or price=9.70</td>
<td>如果 price 是 9.80，则返回 true。如果 price 是 9.50，则返回 false。</td>
</tr>
<tr>
<td>and</td>
<td>与</td>
<td>price&gt;9.00 and price&lt;9.90</td>
<td>如果 price 是 9.80，则返回 true。如果 price 是 8.50，则返回 false。</td>
</tr>
<tr>
<td>mod</td>
<td>计算除法的余数</td>
<td>5 mod 2</td>
<td>1</td>
</tr>
</tbody></table>
<h4 id="3-3-使用"><a href="#3-3-使用" class="headerlink" title="3.3. 使用"></a>3.3. 使用</h4><h5 id="3-3-1-小例子"><a href="#3-3-1-小例子" class="headerlink" title="3.3.1. 小例子"></a>3.3.1. 小例子</h5><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line">text = <span class="string">'''</span></span><br><span class="line"><span class="string">&lt;div&gt;</span></span><br><span class="line"><span class="string">    &lt;ul&gt;</span></span><br><span class="line"><span class="string">         &lt;li class="item-0"&gt;&lt;a href="link1.html"&gt;first item&lt;/a&gt;&lt;/li&gt;</span></span><br><span class="line"><span class="string">         &lt;li class="item-1"&gt;&lt;a href="link2.html"&gt;second item&lt;/a&gt;&lt;/li&gt;</span></span><br><span class="line"><span class="string">         &lt;li class="item-inactive"&gt;&lt;a href="link3.html"&gt;third item&lt;/a&gt;&lt;/li&gt;</span></span><br><span class="line"><span class="string">         &lt;li class="item-1"&gt;&lt;a href="link4.html"&gt;fourth item&lt;/a&gt;&lt;/li&gt;</span></span><br><span class="line"><span class="string">         &lt;li class="item-0"&gt;&lt;a href="link5.html"&gt;fifth item&lt;/a&gt;</span></span><br><span class="line"><span class="string">     &lt;/ul&gt;</span></span><br><span class="line"><span class="string"> &lt;/div&gt;</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line">html = etree.HTML(text)</span><br><span class="line">result = etree.tostring(html)</span><br><span class="line">print(result)</span><br></pre></td></tr></table></figure>

<p>首先我们使用lxml的etree库，然后利用 etree.HTML初始化，然后我们将其打印出来。<br>其中，这里体现了lxml的一个非常实用的功能就是自动修正html代码，大家应该注意到了，最后一个li标签，其实我把尾标签删掉了，是不闭合的。不过，lxml因为继承了libxml2的特性，具有自动修正HTML代码的功能。<br>所以输出结果是这样的</p>
<figure class="highlight html"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">html</span>&gt;</span><span class="tag">&lt;<span class="name">body</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">div</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">ul</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-0"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link1.html"</span>&gt;</span>first item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-1"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link2.html"</span>&gt;</span>second item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-inactive"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link3.html"</span>&gt;</span>third item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-1"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link4.html"</span>&gt;</span>fourth item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-0"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link5.html"</span>&gt;</span>fifth item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">ul</span>&gt;</span></span><br><span class="line"> <span class="tag">&lt;/<span class="name">div</span>&gt;</span></span><br><span class="line"></span><br><span class="line"><span class="tag">&lt;/<span class="name">body</span>&gt;</span><span class="tag">&lt;/<span class="name">html</span>&gt;</span></span><br></pre></td></tr></table></figure>

<p>不仅补全了li标签，还添加了body，html标签。<br>文件读取<br>除了直接读取字符串，还支持从文件读取内容。比如我们新建一个文件叫做hello.html，内容为</p>
<figure class="highlight html"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">div</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">ul</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-0"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link1.html"</span>&gt;</span>first item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-1"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link2.html"</span>&gt;</span>second item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-inactive"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link3.html"</span>&gt;</span><span class="tag">&lt;<span class="name">span</span> <span class="attr">class</span>=<span class="string">"bold"</span>&gt;</span>third item<span class="tag">&lt;/<span class="name">span</span>&gt;</span><span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-1"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link4.html"</span>&gt;</span>fourth item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">li</span> <span class="attr">class</span>=<span class="string">"item-0"</span>&gt;</span><span class="tag">&lt;<span class="name">a</span> <span class="attr">href</span>=<span class="string">"link5.html"</span>&gt;</span>fifth item<span class="tag">&lt;/<span class="name">a</span>&gt;</span><span class="tag">&lt;/<span class="name">li</span>&gt;</span></span><br><span class="line">     <span class="tag">&lt;/<span class="name">ul</span>&gt;</span></span><br><span class="line"> <span class="tag">&lt;/<span class="name">div</span>&gt;</span></span><br></pre></td></tr></table></figure>

<p>利用parse方法来读取文件</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line">html = etree.parse(<span class="string">'hello.html'</span>)</span><br><span class="line">result = etree.tostring(html, pretty_print=<span class="literal">True</span>)</span><br><span class="line">print(result)</span><br></pre></td></tr></table></figure>

<p>同样可以得到相同的结果</p>
<h5 id="3-3-2-XPath具体使用"><a href="#3-3-2-XPath具体使用" class="headerlink" title="3.3.2. XPath具体使用"></a>3.3.2. XPath具体使用</h5><p>依然以上一段程序为例</p>
<ol>
<li>获取所有的 <code>&lt;li&gt;</code> 标签</li>
</ol>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line">html = etree.parse(<span class="string">'hello.html'</span>)</span><br><span class="line"><span class="keyword">print</span> (type(html))</span><br><span class="line">result = html.xpath(<span class="string">'//li'</span>)</span><br><span class="line"><span class="keyword">print</span> (result)</span><br><span class="line"><span class="keyword">print</span> (len(result))</span><br><span class="line"><span class="keyword">print</span> (type(result))</span><br><span class="line"><span class="keyword">print</span> (type(result[<span class="number">0</span>]))</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">&lt;type &#39;lxml.etree._ElementTree&#39;&gt;</span><br><span class="line">[&lt;Element li at 0x1014e0e18&gt;, &lt;Element li at 0x1014e0ef0&gt;, &lt;Element li at 0x1014e0f38&gt;, &lt;Element li at 0x1014e0f80&gt;, &lt;Element li at 0x1014e0fc8&gt;]</span><br><span class="line"></span><br><span class="line">&lt;type &#39;list&#39;&gt;</span><br><span class="line">&lt;type &#39;lxml.etree._Element&#39;&gt;</span><br></pre></td></tr></table></figure>

<p>可见，etree.parse 的类型是 ElementTree，通过调用 xpath 以后，得到了一个列表，包含了 5 个 <code>&lt;li&gt;</code> 元素，每个元素都是 Element 类型</p>
<ol start="2">
<li>获取<code>&lt;li&gt;</code>标签的所有 class</li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li&#x2F;@class&#39;)</span><br><span class="line">print (result)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">[&#39;item-0&#39;, &#39;item-1&#39;, &#39;item-inactive&#39;, &#39;item-1&#39;, &#39;item-0&#39;]</span><br></pre></td></tr></table></figure>


<ol start="3">
<li>获取 <code>&lt;li&gt;</code> 标签下 href 为 link1.html 的 <code>&lt;a&gt;</code> 标签</li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li&#x2F;a[@href&#x3D;&quot;link1.html&quot;]&#39;)</span><br><span class="line">print (result)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">[&lt;Element a at 0x10ffaae18&gt;]</span><br></pre></td></tr></table></figure>

<ol start="4">
<li>获取<code>&lt;li&gt;</code>标签下的所有 <code>&lt;span&gt;</code> 标签</li>
</ol>
<p><strong>注意</strong>: 这么写是不对的</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li&#x2F;span&#39;)</span><br><span class="line"></span><br><span class="line">#因为 &#x2F; 是用来获取子元素的，而 &lt;span&gt; 并不是 &lt;li&gt; 的子元素，所以，要用双斜杠</span><br><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li&#x2F;&#x2F;span&#39;)</span><br><span class="line">print(result)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">[&lt;Element span at 0x10d698e18&gt;]</span><br></pre></td></tr></table></figure>

<ol start="5">
<li>获取 <code>&lt;li&gt;</code> 标签下的所有 class，不包括<code>&lt;li&gt;</code></li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li&#x2F;a&#x2F;&#x2F;@class&#39;)</span><br><span class="line">print (resul)t</span><br><span class="line">#运行结果</span><br><span class="line">[&#39;blod&#39;]</span><br></pre></td></tr></table></figure>

<ol start="6">
<li>获取最后一个 <code>&lt;li&gt;</code> 的 <code>&lt;a&gt;</code> 的 href</li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li[last()]&#x2F;a&#x2F;@href&#39;)</span><br><span class="line">print (result)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">[&#39;link5.html&#39;]</span><br></pre></td></tr></table></figure>


<ol start="7">
<li>获取倒数第二个元素的内容</li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;li[last()-1]&#x2F;a&#39;)</span><br><span class="line">print (result[0].text)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">fourth item</span><br></pre></td></tr></table></figure>

<ol start="8">
<li>获取 class 为 bold 的标签名</li>
</ol>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">result &#x3D; html.xpath(&#39;&#x2F;&#x2F;*[@class&#x3D;&quot;bold&quot;]&#39;)</span><br><span class="line">print (result[0].tag)</span><br></pre></td></tr></table></figure>

<p>运行结果</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">span</span><br></pre></td></tr></table></figure>

<h2 id="4-Xpath案例"><a href="#4-Xpath案例" class="headerlink" title="4.Xpath案例"></a>4.Xpath案例</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># xpath的使用</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">demo01</span><span class="params">()</span>:</span></span><br><span class="line">    url = <span class="string">"https://www.qidian.com/rank/fengyun?style=1"</span></span><br><span class="line">    headers = &#123;</span><br><span class="line">        <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">    &#125;</span><br><span class="line">    response = requests.get(url, headers=headers)</span><br><span class="line">    html = etree.HTML(response.text)</span><br><span class="line">    names = html.xpath(<span class="string">"//h4/a/text()"</span>)</span><br><span class="line">    authors = html.xpath(<span class="string">"//p[@class='author']/a[1]/text()"</span>)</span><br><span class="line">    <span class="comment"># print(names)</span></span><br><span class="line">    <span class="comment"># print(authors)</span></span><br><span class="line">    <span class="comment"># for num in range(len(names)):</span></span><br><span class="line">    <span class="comment">#     print(f"&#123;names[num]&#125; : &#123;authors[num]&#125;")</span></span><br><span class="line"></span><br><span class="line">    <span class="keyword">for</span> name, author <span class="keyword">in</span> zip(names, authors):</span><br><span class="line">        print(<span class="string">f"<span class="subst">&#123;name&#125;</span> : <span class="subst">&#123;author&#125;</span>"</span>)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">demo02</span><span class="params">()</span>:</span></span><br><span class="line">    url = <span class="string">"https://www.kuaidaili.com/free/"</span></span><br><span class="line">    headers = &#123;</span><br><span class="line">        <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">    &#125;</span><br><span class="line">    response = requests.get(url, headers=headers)</span><br><span class="line">    html = etree.HTML(response.text)</span><br><span class="line">    tds = html.xpath(<span class="string">"//tbody/tr/td/text()"</span>)</span><br><span class="line">    trs_len = len(html.xpath(<span class="string">"//tbody/tr"</span>))</span><br><span class="line">    <span class="keyword">for</span> num <span class="keyword">in</span> range(trs_len):</span><br><span class="line">        ip = tds[num * <span class="number">7</span>]</span><br><span class="line">        port = tds[num * <span class="number">7</span> + <span class="number">1</span>]</span><br><span class="line">        type = tds[num * <span class="number">7</span> + <span class="number">3</span>]</span><br><span class="line">        print(<span class="string">f"<span class="subst">&#123;ip&#125;</span>:<span class="subst">&#123;port&#125;</span>   <span class="subst">&#123;type&#125;</span>"</span>)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    demo01()</span><br><span class="line">    demo02()</span><br></pre></td></tr></table></figure>

<h1 id="九、数据解析-Json与JsonPath"><a href="#九、数据解析-Json与JsonPath" class="headerlink" title="九、数据解析-Json与JsonPath"></a>九、数据解析-Json与JsonPath</h1><h2 id="1-Json与JsonPath"><a href="#1-Json与JsonPath" class="headerlink" title="1. Json与JsonPath"></a>1. Json与JsonPath</h2><p>JSON(JavaScript Object Notation) 是一种轻量级的数据交换格式，它使得人们很容易的进行阅读和编写。同时也方便了机器进行解析和生成。适用于进行数据交互的场景，比如网站前台与后台之间的数据交互。</p>
<p>JSON和XML的比较可谓不相上下。</p>
<p>Python 中自带了JSON模块，直接import json就可以使用了。</p>
<p>官方文档：<a href="http://docs.python.org/library/json.html" target="_blank" rel="noopener">http://docs.python.org/library/json.html</a></p>
<p>Json在线解析网站：<a href="http://www.json.cn/#" target="_blank" rel="noopener">http://www.json.cn/#</a></p>
<h2 id="2-JSON"><a href="#2-JSON" class="headerlink" title="2. JSON"></a>2. JSON</h2><p>json简单说就是javascript中的对象和数组，所以这两种结构就是对象和数组两种结构，通过这两种结构可以表示各种复杂的结构</p>
<ol>
<li><p>对象：对象在js中表示为{ }括起来的内容，数据结构为 { key：value, key：value, … }的键值对的结构，在面向对象的语言中，key为对象的属性，value为对应的属性值，所以很容易理解，取值方法为 对象.key 获取属性值，这个属性值的类型可以是数字、字符串、数组、对象这几种</p>
</li>
<li><p>数组：数组在js中是中括号[ ]括起来的内容，数据结构为 [“Python”, “javascript”, “C++”, …]，取值方式和所有语言中一样，使用索引获取，字段值的类型可以是 数字、字符串、数组、对象几种</p>
</li>
</ol>
<h2 id="3-Python中的json模块"><a href="#3-Python中的json模块" class="headerlink" title="3. Python中的json模块"></a>3. Python中的json模块</h2><blockquote>
<p>json模块提供了四个功能：dumps、dump、loads、load，用于字符串 和 python数据类型间进行转换</p>
</blockquote>
<h3 id="3-1-json-loads"><a href="#3-1-json-loads" class="headerlink" title="3.1. json.loads()"></a>3.1. json.loads()</h3><blockquote>
<p>把Json格式字符串解码转换成Python对象 </p>
</blockquote>
<p>从json到python的类型转化对照如下：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> json</span><br><span class="line"></span><br><span class="line">strList = <span class="string">'[1, 2, 3, 4]'</span></span><br><span class="line">strDict = <span class="string">'&#123;"city": "北京", "name": "范爷"&#125;'</span></span><br><span class="line">json.loads(strList) </span><br><span class="line"><span class="comment"># 转换结果 [1, 2, 3, 4]</span></span><br><span class="line">json.loads(strDict) <span class="comment"># json数据自动按Unicode存储</span></span><br><span class="line"><span class="comment"># 转换结果 &#123;u'city': u'\u5317\u4eac', u'name': u'\u5927\u732b'&#125;</span></span><br></pre></td></tr></table></figure>

<h3 id="3-2-json-dumps"><a href="#3-2-json-dumps" class="headerlink" title="3.2. json.dumps()"></a>3.2. json.dumps()</h3><blockquote>
<p>实现python类型转化为json字符串，返回一个str对象 把一个Python对象编码转换成Json字符串</p>
</blockquote>
<p>从python原始类型向json类型的转化对照如下：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> json</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">listStr = [<span class="number">1</span>, <span class="number">2</span>, <span class="number">3</span>, <span class="number">4</span>]</span><br><span class="line">tupleStr = (<span class="number">1</span>, <span class="number">2</span>, <span class="number">3</span>, <span class="number">4</span>)</span><br><span class="line">dictStr = &#123;<span class="string">"city"</span>: <span class="string">"北京"</span>, <span class="string">"name"</span>: <span class="string">"范爷"</span>&#125;</span><br><span class="line"></span><br><span class="line">json.dumps(listStr)</span><br><span class="line"><span class="comment"># 转换结果 '[1, 2, 3, 4]'</span></span><br><span class="line">json.dumps(tupleStr)</span><br><span class="line"><span class="comment"># 转换结果 '[1, 2, 3, 4]'</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 注意：json.dumps() 序列化时默认使用的ascii编码</span></span><br><span class="line"><span class="comment"># 添加参数 ensure_ascii=False 禁用ascii编码，按utf-8编码</span></span><br><span class="line"></span><br><span class="line">json.dumps(dictStr) </span><br><span class="line"><span class="comment"># 转换结果 '&#123;"city": "\\u5317\\u4eac", "name": "\\u5927\\u5218"&#125;'</span></span><br><span class="line"></span><br><span class="line">print(json.dumps(dictStr, ensure_ascii=<span class="literal">False</span>))</span><br><span class="line"><span class="comment"># 输出结果 &#123;"city": "北京", "name": "范爷"&#125;</span></span><br></pre></td></tr></table></figure>

<h3 id="3-3-json-dump"><a href="#3-3-json-dump" class="headerlink" title="3.3. json.dump()"></a>3.3. json.dump()</h3><blockquote>
<p>将Python内置类型序列化为json对象后写入文件</p>
</blockquote>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> json</span><br><span class="line"></span><br><span class="line">listStr = [&#123;<span class="string">"city"</span>: <span class="string">"北京"</span>&#125;, &#123;<span class="string">"name"</span>: <span class="string">"范爷"</span>&#125;]</span><br><span class="line">json.dump(listStr, open(<span class="string">"listStr.json"</span>,<span class="string">"w"</span>), ensure_ascii=<span class="literal">False</span>)</span><br><span class="line"></span><br><span class="line">dictStr = &#123;<span class="string">"city"</span>: <span class="string">"北京"</span>, <span class="string">"name"</span>: <span class="string">"范爷"</span>&#125;</span><br><span class="line">json.dump(dictStr, open(<span class="string">"dictStr.json"</span>,<span class="string">"w"</span>), ensure_ascii=<span class="literal">False</span>)</span><br></pre></td></tr></table></figure>

<h3 id="3-4-json-load"><a href="#3-4-json-load" class="headerlink" title="3.4. json.load()"></a>3.4. json.load()</h3><blockquote>
<p>读取文件中json形式的字符串元素 转化成python类型</p>
</blockquote>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> json</span><br><span class="line"></span><br><span class="line">strList = json.load(open(<span class="string">"listStr.json"</span>))</span><br><span class="line">print(strList)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 输出结果 [&#123;u'city': u'\u5317\u4eac'&#125;, &#123;u'name': u'\u5927\u5218'&#125;]</span></span><br><span class="line"></span><br><span class="line">strDict = json.load(open(<span class="string">"dictStr.json"</span>))</span><br><span class="line">print(strDict)</span><br><span class="line"><span class="comment"># 输出结果 &#123;u'city': u'\u5317\u4eac', u'name': u'\u5927\u5218'&#125;</span></span><br></pre></td></tr></table></figure>

<h2 id="4-JsonPath"><a href="#4-JsonPath" class="headerlink" title="4. JsonPath"></a>4. JsonPath</h2><p>JsonPath 是一种信息抽取类库，是从JSON文档中抽取指定信息的工具，提供多种语言实现版本，包括：Javascript, Python， PHP 和 Java。</p>
<p>JsonPath 对于 JSON 来说，相当于 XPATH 对于 XML。</p>
<p>安装方法：<code>pip install jsonpath</code></p>
<p>官方文档：<a href="http://goessner.net/articles/JsonPath" target="_blank" rel="noopener">http://goessner.net/articles/JsonPath</a></p>
<h2 id="5-JsonPath与XPath语法对比"><a href="#5-JsonPath与XPath语法对比" class="headerlink" title="5. JsonPath与XPath语法对比"></a>5. JsonPath与XPath语法对比</h2><p>Json结构清晰，可读性高，复杂度低，非常容易匹配，下表中对应了XPath的用法</p>
<table>
<thead>
<tr>
<th>XPath</th>
<th>JSONPath</th>
<th>描述</th>
</tr>
</thead>
<tbody><tr>
<td>/</td>
<td>$</td>
<td>根节点</td>
</tr>
<tr>
<td>.</td>
<td>@</td>
<td>现行节点</td>
</tr>
<tr>
<td>/</td>
<td>.or[]</td>
<td>取子节点</td>
</tr>
<tr>
<td>..</td>
<td>n/a</td>
<td>取父节点，Jsonpath未支持</td>
</tr>
<tr>
<td>//</td>
<td>..</td>
<td>就是不管位置，选择所有符合条件的条件</td>
</tr>
<tr>
<td>*</td>
<td>*</td>
<td>匹配所有元素节点</td>
</tr>
<tr>
<td>@</td>
<td>n/a</td>
<td>根据属性访问，Json不支持，因为Json是个Key-value递归结构，不需要。</td>
</tr>
<tr>
<td>[]</td>
<td>[]</td>
<td>迭代器标示（可以在里边做简单的迭代操作，如数组下标，根据内容选值等）</td>
</tr>
<tr>
<td>\</td>
<td>[,]</td>
<td>支持迭代器中做多选。</td>
</tr>
<tr>
<td>[]</td>
<td>?()</td>
<td>支持过滤操作.</td>
</tr>
<tr>
<td>n/a</td>
<td>()</td>
<td>支持表达式计算</td>
</tr>
<tr>
<td>()</td>
<td>n/a</td>
<td>分组，JsonPath不支持</td>
</tr>
</tbody></table>
<h2 id="6-示例"><a href="#6-示例" class="headerlink" title="6. 示例"></a>6. 示例</h2><p>我们以拉勾网城市JSON文件 <a href="http://www.lagou.com/lbs/getAllCitySearchLabels.json" target="_blank" rel="noopener">http://www.lagou.com/lbs/getAllCitySearchLabels.json</a> 为例，获取所有城市</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> jsonpath <span class="keyword">import</span> jsonpath</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> *</span><br><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"></span><br><span class="line"><span class="comment"># jsonpath的使用</span></span><br><span class="line"></span><br><span class="line">url = <span class="string">"https://www.lagou.com/lbs/getAllCitySearchLabels.json"</span></span><br><span class="line">headers = &#123;</span><br><span class="line">    <span class="string">"User-agent"</span>: UserAgent().chrome</span><br><span class="line">&#125;</span><br><span class="line">response = requests.get(url, headers=headers)</span><br><span class="line">names = jsonpath(response.json(), <span class="string">"$..name"</span>)</span><br><span class="line">codes = jsonpath(response.json(), <span class="string">"$..code"</span>)</span><br><span class="line"></span><br><span class="line"><span class="keyword">for</span> name, code <span class="keyword">in</span> zip(names, codes):</span><br><span class="line">    print(<span class="string">f"<span class="subst">&#123;name&#125;</span> : <span class="subst">&#123;code&#125;</span>"</span>)</span><br></pre></td></tr></table></figure>

<h2 id="7-注意事项"><a href="#7-注意事项" class="headerlink" title="7. 注意事项"></a>7. 注意事项</h2><ul>
<li><p>json.loads() 是把 Json格式字符串解码转换成Python对象，如果在json.loads的时候出错，要注意被解码的Json字符的编码。<br>如果传入的字符串的编码不是UTF-8的话，需要指定字符编码的参数 encoding</p>
 <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">dataDict &#x3D; json.loads(jsonStrGBK);</span><br></pre></td></tr></table></figure>
</li>
<li><p>dataJsonStr是JSON字符串，假设其编码本身是非UTF-8的话而是GBK 的，那么上述代码会导致出错，改为对应的：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">dataDict &#x3D; json.loads(jsonStrGBK, encoding&#x3D;&quot;GBK&quot;);</span><br></pre></td></tr></table></figure>
</li>
<li><p>如果 dataJsonStr通过encoding指定了合适的编码，但是其中又包含了其他编码的字符，则需要先去将dataJsonStr转换为Unicode，然后再指定编码格式调用json.loads()</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">dataJsonStrUni &#x3D; dataJsonStr.decode(&quot;GB2312&quot;); </span><br><span class="line">dataDict &#x3D; json.loads(dataJsonStrUni, encoding&#x3D;&quot;GB2312&quot;);</span><br></pre></td></tr></table></figure>

</li>
</ul>
<h3 id="7-1-字符串编码转换"><a href="#7-1-字符串编码转换" class="headerlink" title="7.1. 字符串编码转换"></a>7.1. 字符串编码转换</h3><p>这是中国程序员最苦逼的地方，什么乱码之类的几乎都是由汉字引起的</p>
<p>其实编码问题很好搞定，只要记住一点：</p>
<p><strong>任何平台的任何编码 都能和 Unicode 互相转换</strong></p>
<p>UTF-8 与 GBK 互相转换，那就先把UTF-8转换成Unicode，再从Unicode转换成GBK，反之同理。</p>
<h1 id="十、数据解析-PyQuery"><a href="#十、数据解析-PyQuery" class="headerlink" title="十、数据解析-PyQuery"></a>十、数据解析-PyQuery</h1><h2 id="1-pyquery"><a href="#1-pyquery" class="headerlink" title="1. pyquery"></a>1. pyquery</h2><h3 id="1-1-介绍"><a href="#1-1-介绍" class="headerlink" title="1.1. 介绍"></a>1.1. 介绍</h3><blockquote>
<p>如果你对CSS选择器与Jquery有有所了解，那么还有个解析库可以适合你–Jquery</p>
</blockquote>
<blockquote>
<p><a href="https://pythonhosted.org/pyquery/" target="_blank" rel="noopener">官网</a><a href="https://pythonhosted.org/pyquery/" target="_blank" rel="noopener">https://pythonhosted.org/pyquery/</a></p>
</blockquote>
<h3 id="1-2-安装"><a href="#1-2-安装" class="headerlink" title="1.2. 安装"></a>1.2. 安装</h3><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">pip install pyquery</span><br></pre></td></tr></table></figure>

<h3 id="1-3-使用方式"><a href="#1-3-使用方式" class="headerlink" title="1.3. 使用方式"></a>1.3. 使用方式</h3><h4 id="1-3-1-初始化方式"><a href="#1-3-1-初始化方式" class="headerlink" title="1.3.1. 初始化方式"></a>1.3.1. 初始化方式</h4><ul>
<li>字符串<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(str)</span><br><span class="line">print(doc(tagname))</span><br></pre></td></tr></table></figure></li>
<li>url<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(url=<span class="string">'http://www.baidu.com'</span>)</span><br><span class="line">print(doc(<span class="string">'title'</span>))</span><br></pre></td></tr></table></figure></li>
<li>文件<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(filename=<span class="string">'demo.html'</span>)</span><br><span class="line">print(doc(tagname))</span><br></pre></td></tr></table></figure>

</li>
</ul>
<h4 id="1-3-2-选择节点"><a href="#1-3-2-选择节点" class="headerlink" title="1.3.2. 选择节点"></a>1.3.2. 选择节点</h4><ul>
<li>获取当前节点<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(filename=<span class="string">'demo.html'</span>)</span><br><span class="line">doc(<span class="string">'#main #top'</span>)</span><br></pre></td></tr></table></figure></li>
<li>获取子节点<ul>
<li>在doc中一层层写出来</li>
<li>获取到父标签后使用children方法<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(filename=<span class="string">'demo.html'</span>)</span><br><span class="line">doc(<span class="string">'#main #top'</span>).children()</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li>获取父节点<ul>
<li>获取到当前节点后使用parent方法</li>
</ul>
</li>
<li>获取兄弟节点<ul>
<li>获取到当前节点后使用siblings方法<h4 id="1-3-3-获取属性"><a href="#1-3-3-获取属性" class="headerlink" title="1.3.3. 获取属性"></a>1.3.3. 获取属性</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(filename=<span class="string">'demo.html'</span>)</span><br><span class="line">a = doc(<span class="string">'#main #top'</span>)</span><br><span class="line">print(a.attrib[<span class="string">'href'</span>])</span><br></pre></td></tr></table></figure>
<h4 id="1-3-4-获取内容"><a href="#1-3-4-获取内容" class="headerlink" title="1.3.4. 获取内容"></a>1.3.4. 获取内容</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line">doc = pq(filename=<span class="string">'demo.html'</span>)</span><br><span class="line">div = doc(<span class="string">'#main #top'</span>)</span><br><span class="line">print(a.html())</span><br><span class="line">print(a.text())</span><br></pre></td></tr></table></figure>

</li>
</ul>
</li>
</ul>
<h4 id="1-3-5-样例"><a href="#1-3-5-样例" class="headerlink" title="1.3.5. 样例"></a>1.3.5. 样例</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> pyquery <span class="keyword">import</span> PyQuery <span class="keyword">as</span> pq</span><br><span class="line"><span class="comment"># 1.可加载一段HTML字符串，或一个HTML文件，或是一个url地址，</span></span><br><span class="line">d=pq(<span class="string">"&lt;html&gt;&lt;title&gt;hello&lt;/title&gt;&lt;/html&gt;"</span>)</span><br><span class="line">d=pq(filename=path_to_html_file)</span><br><span class="line">d=pq(url=<span class="string">'http://www.baidu.com'</span>)注意：此处url似乎必须写全</span><br><span class="line"> </span><br><span class="line"><span class="comment"># 2.html()和text() ——获取相应的HTML块或文本块，</span></span><br><span class="line">p=pq(<span class="string">"&lt;head&gt;&lt;title&gt;hello&lt;/title&gt;&lt;/head&gt;"</span>)</span><br><span class="line">p(<span class="string">'head'</span>).html()<span class="comment">#返回&lt;title&gt;hello&lt;/title&gt;</span></span><br><span class="line">p(<span class="string">'head'</span>).text()<span class="comment">#返回hello</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 3.根据HTML标签来获取元素，</span></span><br><span class="line">d=pq(<span class="string">'&lt;div&gt;&lt;p&gt;test 1&lt;/p&gt;&lt;p&gt;test 2&lt;/p&gt;&lt;/div&gt;'</span>)</span><br><span class="line">d(<span class="string">'p'</span>)<span class="comment">#返回[&lt;p&gt;,&lt;p&gt;]</span></span><br><span class="line"><span class="keyword">print</span> d(<span class="string">'p'</span>)<span class="comment">#返回&lt;p&gt;test 1&lt;/p&gt;&lt;p&gt;test 2&lt;/p&gt;</span></span><br><span class="line"><span class="keyword">print</span> d(<span class="string">'p'</span>).html()<span class="comment">#返回test 1</span></span><br><span class="line"><span class="comment"># 注意：当获取到的元素不只一个时，html()方法只返回首个元素的相应内容块</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 4.eq(index) ——根据给定的索引号得到指定元素。接上例，若想得到第二个p标签内的内容，则可以：</span></span><br><span class="line"><span class="keyword">print</span> d(<span class="string">'p'</span>).eq(<span class="number">1</span>).html() <span class="comment">#返回test 2</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 5.filter() ——根据类名、id名得到指定元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;div&gt;&lt;p id='1'&gt;test 1&lt;/p&gt;&lt;p class='2'&gt;test 2&lt;/p&gt;&lt;/div&gt;"</span>)</span><br><span class="line">d(<span class="string">'p'</span>).filter(<span class="string">'#1'</span>) <span class="comment">#返回[&lt;p#1&gt;]</span></span><br><span class="line">d(<span class="string">'p'</span>).filter(<span class="string">'.2'</span>) <span class="comment">#返回[&lt;p.2&gt;]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 6.find() ——查找嵌套元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;div&gt;&lt;p id='1'&gt;test 1&lt;/p&gt;&lt;p class='2'&gt;test 2&lt;/p&gt;&lt;/div&gt;"</span>)</span><br><span class="line">d(<span class="string">'div'</span>).find(<span class="string">'p'</span>)<span class="comment">#返回[&lt;p#1&gt;, &lt;p.2&gt;]</span></span><br><span class="line">d(<span class="string">'div'</span>).find(<span class="string">'p'</span>).eq(<span class="number">0</span>)<span class="comment">#返回[&lt;p#1&gt;]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment">#7.直接根据类名、id名获取元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;div&gt;&lt;p id='1'&gt;test 1&lt;/p&gt;&lt;p class='2'&gt;test 2&lt;/p&gt;&lt;/div&gt;"</span>)</span><br><span class="line">d(<span class="string">'#1'</span>).html()<span class="comment">#返回test 1</span></span><br><span class="line">d(<span class="string">'.2'</span>).html()<span class="comment">#返回test 2</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 8.获取属性值，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;p id='my_id'&gt;&lt;a href='http://hello.com'&gt;hello&lt;/a&gt;&lt;/p&gt;"</span>)</span><br><span class="line">d(<span class="string">'a'</span>).attr(<span class="string">'href'</span>)<span class="comment">#返回http://hello.com</span></span><br><span class="line">d(<span class="string">'p'</span>).attr(<span class="string">'id'</span>)<span class="comment">#返回my_id</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 9.修改属性值，例：</span></span><br><span class="line">d(<span class="string">'a'</span>).attr(<span class="string">'href'</span>, <span class="string">'http://baidu.com'</span>)把href属性修改为了baidu</span><br><span class="line"> </span><br><span class="line"><span class="comment"># 10.addClass(value) ——为元素添加类，例：</span></span><br><span class="line">d=pq(<span class="string">'&lt;div&gt;&lt;/div&gt;'</span>)</span><br><span class="line">d.addClass(<span class="string">'my_class'</span>)<span class="comment">#返回[&lt;div.my_class&gt;]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 11.hasClass(name) #返回判断元素是否包含给定的类，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;div class='my_class'&gt;&lt;/div&gt;"</span>)</span><br><span class="line">d.hasClass(<span class="string">'my_class'</span>)<span class="comment">#返回True</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 12.children(selector=None) ——获取子元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;span&gt;&lt;p id='1'&gt;hello&lt;/p&gt;&lt;p id='2'&gt;world&lt;/p&gt;&lt;/span&gt;"</span>)</span><br><span class="line">d.children()<span class="comment">#返回[&lt;p#1&gt;, &lt;p#2&gt;]</span></span><br><span class="line">d.children(<span class="string">'#2'</span>)<span class="comment">#返回[&lt;p#2&gt;]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 13.parents(selector=None)——获取父元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;span&gt;&lt;p id='1'&gt;hello&lt;/p&gt;&lt;p id='2'&gt;world&lt;/p&gt;&lt;/span&gt;"</span>)</span><br><span class="line">d(<span class="string">'p'</span>).parents()<span class="comment">#返回[&lt;span&gt;]</span></span><br><span class="line">d(<span class="string">'#1'</span>).parents(<span class="string">'span'</span>)<span class="comment">#返回[&lt;span&gt;]</span></span><br><span class="line">d(<span class="string">'#1'</span>).parents(<span class="string">'p'</span>)<span class="comment">#返回[]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 14.clone() ——返回一个节点的拷贝</span></span><br><span class="line"> </span><br><span class="line"><span class="comment">#15.empty() ——移除节点内容</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 16.nextAll(selector=None) ——返回后面全部的元素块，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;p id='1'&gt;hello&lt;/p&gt;&lt;p id='2'&gt;world&lt;/p&gt;&lt;img scr='' /&gt;"</span>)</span><br><span class="line">d(<span class="string">'p:first'</span>).nextAll()<span class="comment">#返回[&lt;p#2&gt;, &lt;img&gt;]</span></span><br><span class="line">d(<span class="string">'p:last'</span>).nextAll()<span class="comment">#返回[&lt;img&gt;]</span></span><br><span class="line"> </span><br><span class="line"><span class="comment"># 17.not_(selector) ——返回不匹配选择器的元素，例：</span></span><br><span class="line">d=pq(<span class="string">"&lt;p id='1'&gt;test 1&lt;/p&gt;&lt;p id='2'&gt;test 2&lt;/p&gt;"</span>)</span><br><span class="line">d(<span class="string">'p'</span>).not_(<span class="string">'#2'</span>)<span class="comment">#返回[&lt;p#1&gt;]</span></span><br></pre></td></tr></table></figure>

<h1 id="十一、爬虫之多线程"><a href="#十一、爬虫之多线程" class="headerlink" title="十一、爬虫之多线程"></a>十一、爬虫之多线程</h1><h2 id="1-引入"><a href="#1-引入" class="headerlink" title="1. 引入"></a>1. 引入</h2><blockquote>
<p>我们之前写的爬虫都是单个线程的？这怎么够？一旦一个地方卡到不动了，那不就永远等待下去了？为此我们可以使用多线程或者多进程来处理。</p>
</blockquote>
<blockquote>
<p>不建议你用这个，不过还是介绍下了，如果想看可以看看下面，不想浪费时间直接看</p>
</blockquote>
<h2 id="2-如何使用"><a href="#2-如何使用" class="headerlink" title="2. 如何使用"></a>2. 如何使用</h2><blockquote>
<p>爬虫使用多线程来处理网络请求，使用线程来处理URL队列中的url，然后将url返回的结果保存在另一个队列中，其它线程在读取这个队列中的数据，然后写到文件中去</p>
</blockquote>
<h2 id="3-主要组成部分"><a href="#3-主要组成部分" class="headerlink" title="3. 主要组成部分"></a>3. 主要组成部分</h2><h3 id="3-1-URL队列和结果队列"><a href="#3-1-URL队列和结果队列" class="headerlink" title="3.1. URL队列和结果队列"></a>3.1. URL队列和结果队列</h3><p>将将要爬去的url放在一个队列中，这里使用标准库Queue。访问url后的结果保存在结果队列中</p>
<p>初始化一个URL队列</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> queue <span class="keyword">import</span> Queue</span><br><span class="line">urls_queue = Queue()</span><br><span class="line">out_queue = Queue()</span><br></pre></td></tr></table></figure>

<h3 id="3-2-请求线程"><a href="#3-2-请求线程" class="headerlink" title="3.2. 请求线程"></a>3.2. 请求线程</h3><p>使用多个线程，不停的取URL队列中的url，并进行处理：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> threading</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">ThreadCrawl</span><span class="params">(threading.Thread)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, queue, out_queue)</span>:</span></span><br><span class="line">        threading.Thread.__init__(self)</span><br><span class="line">        self.queue = queue</span><br><span class="line">        self.out_queue = out_queue</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="keyword">while</span> <span class="literal">True</span>:</span><br><span class="line">            item = self.queue.get()</span><br></pre></td></tr></table></figure>

<p>如果队列为空，线程就会被阻塞，直到队列不为空。处理队列中的一条数据后，就需要通知队列已经处理完该条数据</p>
<h3 id="3-3-处理线程"><a href="#3-3-处理线程" class="headerlink" title="3.3. 处理线程"></a>3.3. 处理线程</h3><p>处理结果队列中的数据，并保存到文件中。如果使用多个线程的话，必须要给文件加上锁</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">lock = threading.Lock()</span><br><span class="line">f = codecs.open(<span class="string">'out.txt'</span>, <span class="string">'w'</span>, <span class="string">'utf8'</span>)</span><br></pre></td></tr></table></figure>

<p>当线程需要写入文件的时候，可以这样处理：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">with</span> lock:</span><br><span class="line">    f.write(something)</span><br></pre></td></tr></table></figure>


<h2 id="4-Queue模块中的常用方法"><a href="#4-Queue模块中的常用方法" class="headerlink" title="4. Queue模块中的常用方法:"></a>4. Queue模块中的常用方法:</h2><p>Python的Queue模块中提供了同步的、线程安全的队列类，包括FIFO（先入先出)队列Queue，LIFO（后入先出）队列LifoQueue，和优先级队列PriorityQueue。这些队列都实现了锁原语，能够在多线程中直接使用。可以使用队列来实现线程间的同步</p>
<ul>
<li>Queue.qsize() 返回队列的大小</li>
<li>Queue.empty() 如果队列为空，返回True,反之False</li>
<li>Queue.full() 如果队列满了，返回True,反之False</li>
<li>Queue.full 与 maxsize 大小对应</li>
<li>Queue.get([block[, timeout]])获取队列，timeout等待时间</li>
<li>Queue.get_nowait() 相当Queue.get(False)</li>
<li>Queue.put(item) 写入队列，timeout等待时间</li>
<li>Queue.put_nowait(item) 相当Queue.put(item, False)</li>
<li>Queue.task_done() 在完成一项工作之后，Queue.task_done()函数向任务已经完成的队列发送一个信号</li>
<li>Queue.join() 实际上意味着等到队列为空，再执行别的操作</li>
</ul>
<h2 id="5-糗事百科实例"><a href="#5-糗事百科实例" class="headerlink" title="5. 糗事百科实例"></a>5. 糗事百科实例</h2><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> threading <span class="keyword">import</span> Thread</span><br><span class="line"><span class="keyword">from</span> queue <span class="keyword">import</span> Queue</span><br><span class="line"><span class="keyword">from</span> fake_useragent <span class="keyword">import</span> UserAgent</span><br><span class="line"><span class="keyword">import</span> requests</span><br><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 多线程的使用</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 爬虫线程类</span></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">CrawlerInfo</span><span class="params">(Thread)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, url_queue, html_queue)</span>:</span></span><br><span class="line">        Thread.__init__(self)</span><br><span class="line">        self.url_queue = url_queue</span><br><span class="line">        self.html_queue = html_queue</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(self)</span>:</span></span><br><span class="line">        headers = &#123;</span><br><span class="line">            <span class="string">"User-Agent"</span>: UserAgent().chrome</span><br><span class="line">        &#125;</span><br><span class="line">        <span class="keyword">while</span> <span class="keyword">not</span> self.url_queue.empty():</span><br><span class="line">            response = requests.get(self.url_queue.get(), headers=headers)</span><br><span class="line">            <span class="keyword">if</span> response.status_code == <span class="number">200</span>:</span><br><span class="line">                self.html_queue.put(response.text)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="comment"># 解析类</span></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">ParseInfo</span><span class="params">(Thread)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, html_queue)</span>:</span></span><br><span class="line">        Thread.__init__(self)</span><br><span class="line">        self.html_queue = html_queue</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="keyword">while</span> <span class="keyword">not</span> self.html_queue.empty():</span><br><span class="line">            html = etree.HTML(self.html_queue.get())</span><br><span class="line">            span_contents = html.xpath(<span class="string">"//div[@class='content']/span[1]"</span>)</span><br><span class="line">            <span class="keyword">with</span> open(<span class="string">"duanzi.txt"</span>, <span class="string">"a"</span>, encoding=<span class="string">"utf-8"</span>) <span class="keyword">as</span> f:</span><br><span class="line">                <span class="keyword">for</span> span <span class="keyword">in</span> span_contents:</span><br><span class="line">                    info = span.xpath(<span class="string">"string(.)"</span>)</span><br><span class="line">                    f.write(info)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="comment"># 存储url队列</span></span><br><span class="line">    url_queue = Queue()</span><br><span class="line">    <span class="comment"># 存储结果内容队列</span></span><br><span class="line">    html_queue = Queue()</span><br><span class="line">    base_url = <span class="string">"https://www.qiushibaike.com/text/page/"</span></span><br><span class="line">    <span class="keyword">for</span> num <span class="keyword">in</span> range(<span class="number">1</span>, <span class="number">16</span>):</span><br><span class="line">        new_url = <span class="string">f"<span class="subst">&#123;base_url&#125;</span><span class="subst">&#123;num&#125;</span>"</span></span><br><span class="line">        url_queue.put(new_url)</span><br><span class="line">    <span class="comment"># 创建一个爬虫类</span></span><br><span class="line">    crawler_list = []</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">0</span>, <span class="number">3</span>):</span><br><span class="line">        crawler = CrawlerInfo(url_queue, html_queue)</span><br><span class="line">        crawler_list.append(crawler)</span><br><span class="line">        crawler.start()</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> crawler_list:</span><br><span class="line">        i.join()</span><br><span class="line">    parse_list = []</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">0</span>, <span class="number">3</span>):</span><br><span class="line">        parse = ParseInfo(html_queue)</span><br><span class="line">        parse_list.append(parse)</span><br><span class="line">        parse.start()</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> parse_list:</span><br><span class="line">        i.join()</span><br></pre></td></tr></table></figure>

<h1 id="十二、Selenium工具"><a href="#十二、Selenium工具" class="headerlink" title="十二、Selenium工具"></a>十二、Selenium工具</h1><h2 id="1-Selenium"><a href="#1-Selenium" class="headerlink" title="1. Selenium"></a>1. Selenium</h2><p>Selenium是一个Web的自动化测试工具，最初是为网站自动化测试而开发的，类型像我们玩游戏用的按键精灵，可以按指定的命令自动操作，不同是Selenium 可以直接运行在浏览器上，它支持所有主流的浏览器（包括PhantomJS这些无界面的浏览器）。<br>Selenium 可以根据我们的指令，让浏览器自动加载页面，获取需要的数据，甚至页面截屏，或者判断网站上某些动作是否发生。<br>Selenium 自己不带浏览器，不支持浏览器的功能，它需要与第三方浏览器结合在一起才能使用。但是我们有时候需要让它内嵌在代码中运行，所以我们可以用一个叫 PhantomJS 的工具代替真实的浏览器。</p>
<p>PyPI网站下载 Selenium库 <a href="https://pypi.python.org/simple/selenium" target="_blank" rel="noopener">https://pypi.python.org/simple/selenium</a> ，也可以用 第三方管理器 </p>
<p>pip用命令安装：<code>pip install selenium</code></p>
<p>Selenium 官方参考文档：<a href="http://selenium-python.readthedocs.io/index.html" target="_blank" rel="noopener">http://selenium-python.readthedocs.io/index.html</a></p>
<h2 id="2-PhantomJS"><a href="#2-PhantomJS" class="headerlink" title="2. PhantomJS"></a>2. PhantomJS</h2><p>PhantomJS 是一个基于Webkit的“无界面”(headless)浏览器，它会把网站加载到内存并执行页面上的 JavaScript，因为不会展示图形界面，所以运行起来比完整的浏览器要高效<br>如果我们把 Selenium 和 PhantomJS 结合在一起，就可以运行一个非常强大的网络爬虫了，这个爬虫可以处理 JavaScrip、Cookie、headers，以及任何我们真实用户需要做的事情</p>
<h3 id="2-1-注意：PhantomJS（python2）"><a href="#2-1-注意：PhantomJS（python2）" class="headerlink" title="2.1. 注意：PhantomJS（python2）"></a>2.1. 注意：PhantomJS（python2）</h3><p>只能从它的官方网站<a href="http://phantomjs.org/download.html" target="_blank" rel="noopener">http://phantomjs.org/download.html</a>) 下载。 因为 PhantomJS 是一个功能完善(虽然无界面)的浏览器而非一个 Python 库，所以它不需要像 Python 的其他库一样安装，但我们可以通过Selenium调用PhantomJS来直接使用。<br>PhantomJS 官方参考文档：<a href="http://phantomjs.org/documentation" target="_blank" rel="noopener">http://phantomjs.org/documentation</a></p>
<h3 id="2-2-python3使用的浏览器"><a href="#2-2-python3使用的浏览器" class="headerlink" title="2.2. python3使用的浏览器"></a>2.2. python3使用的浏览器</h3><p>随着Python3的普及，Selenium3也跟上了行程。而Selenium3最大的变化是去掉了Selenium RC，另外就是Webdriver从各自浏览器中脱离，必须单独下载</p>
<h4 id="2-2-1-安装Firefox-geckodriver"><a href="#2-2-1-安装Firefox-geckodriver" class="headerlink" title="2.2.1. 安装Firefox geckodriver"></a>2.2.1. 安装Firefox geckodriver</h4><p>安装firefox最新版本，添加Firefox可执行程序到系统环境变量。记得关闭firefox的自动更新</p>
<p>firefox下载地下：<a href="https://github.com/mozilla/geckodriver/releases" target="_blank" rel="noopener">https://github.com/mozilla/geckodriver/releases</a></p>
<p>将下载的geckodriver.exe 放到path路径下 D:\Python\Python36\</p>
<h4 id="2-2-2-安装ChromeDriver"><a href="#2-2-2-安装ChromeDriver" class="headerlink" title="2.2.2. 安装ChromeDriver"></a>2.2.2. 安装ChromeDriver</h4><p><a href="http://chromedriver.storage.googleapis.com/index.html" target="_blank" rel="noopener">http://chromedriver.storage.googleapis.com/index.html</a></p>
<blockquote>
<p>注意版本号要对应</p>
</blockquote>
<blockquote>
<p>下载下来的文件解压到<code>Python36\Scripts</code></p>
</blockquote>
<blockquote>
<p>chrome59版本以后可以变成无头的浏览器，加以下参数</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">options &#x3D; webdriver.ChromeOptions()</span><br><span class="line">options.add_argument(&#39;--headless&#39;)</span><br><span class="line">chrome &#x3D; webdriver.Chrome(chrome_options&#x3D;options)</span><br><span class="line">chrome.get(&quot;http:&#x2F;&#x2F;ww.baidu.com&quot;)</span><br></pre></td></tr></table></figure>

<h2 id="3-使用方式"><a href="#3-使用方式" class="headerlink" title="3. 使用方式"></a>3. 使用方式</h2><p>Selenium 库里有个叫 WebDriver 的 API。WebDriver 有点儿像可以加载网站的浏览器，但是它也可以像 BeautifulSoup 或者其他 Selector 对象一样用来查找页面元素，与页面上的元素进行交互 (发送文本、点击等)，以及执行其他动作来运行网络爬虫</p>
<h3 id="3-1-简单例子"><a href="#3-1-简单例子" class="headerlink" title="3.1. 简单例子"></a>3.1. 简单例子</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># 导入 webdriver</span></span><br><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"></span><br><span class="line"><span class="comment"># 要想调用键盘按键操作需要引入keys包</span></span><br><span class="line"><span class="keyword">from</span> selenium.webdriver.common.keys <span class="keyword">import</span> Keys</span><br><span class="line"></span><br><span class="line"><span class="comment"># 调用环境变量指定的PhantomJS浏览器创建浏览器对象</span></span><br><span class="line">driver = webdriver.PhantomJS()</span><br><span class="line"></span><br><span class="line"><span class="comment"># 如果没有在环境变量指定PhantomJS位置</span></span><br><span class="line"><span class="comment"># driver = webdriver.PhantomJS(executable_path="./phantomjs"))</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># get方法会一直等到页面被完全加载，然后才会继续程序，通常测试会在这里选择 time.sleep(2)</span></span><br><span class="line">driver.get(<span class="string">"http://www.baidu.com/"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 获取页面名为 wrapper的id标签的文本内容</span></span><br><span class="line">data = driver.find_element_by_id(<span class="string">"wrapper"</span>).text</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印数据内容</span></span><br><span class="line">print(data)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印页面标题 "百度一下，你就知道"</span></span><br><span class="line"><span class="keyword">print</span>（driver.title）</span><br><span class="line"></span><br><span class="line"><span class="comment"># 生成当前页面快照并保存</span></span><br><span class="line">driver.save_screenshot(<span class="string">"baidu.png"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># id="kw"是百度搜索输入框，输入字符串"长城"</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"kw"</span>).send_keys(<span class="string">"尚学堂"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># id="su"是百度搜索按钮，click() 是模拟点击</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"su"</span>).click()</span><br><span class="line"></span><br><span class="line"><span class="comment"># 获取新的页面快照</span></span><br><span class="line">driver.save_screenshot(<span class="string">"尚学.png"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 打印网页渲染后的源代码</span></span><br><span class="line">print(driver.page_source)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 获取当前页面Cookie</span></span><br><span class="line">print(driver.get_cookies())</span><br><span class="line"></span><br><span class="line"><span class="comment"># ctrl+a 全选输入框内容</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"kw"</span>).send_keys(Keys.CONTROL,<span class="string">'a'</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># ctrl+x 剪切输入框内容</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"kw"</span>).send_keys(Keys.CONTROL,<span class="string">'x'</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 输入框重新输入内容</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"kw"</span>).send_keys(<span class="string">"python爬虫"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 模拟Enter回车键</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"su"</span>).send_keys(Keys.RETURN)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 清除输入框内容</span></span><br><span class="line">driver.find_element_by_id(<span class="string">"kw"</span>).clear()</span><br><span class="line"></span><br><span class="line"><span class="comment"># 生成新的页面快照</span></span><br><span class="line">driver.save_screenshot(<span class="string">"python爬虫.png"</span>)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 获取当前url</span></span><br><span class="line">print(driver.current_url)</span><br><span class="line"></span><br><span class="line"><span class="comment"># 关闭当前页面，如果只有一个页面，会关闭浏览器</span></span><br><span class="line"><span class="comment"># driver.close()</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 关闭浏览器</span></span><br><span class="line">driver.quit()</span><br></pre></td></tr></table></figure>

<h2 id="4-页面操作"><a href="#4-页面操作" class="headerlink" title="4. 页面操作"></a>4. 页面操作</h2><h3 id="4-1-页面交互"><a href="#4-1-页面交互" class="headerlink" title="4.1. 页面交互"></a>4.1. 页面交互</h3><blockquote>
<p>仅仅抓取页面没有多大卵用，我们真正要做的是做到和页面交互，比如点击，输入等等。那么前提就是要找到页面中的元素。WebDriver提供了各种方法来寻找元素。例如下面有一个表单输入框</p>
</blockquote>
<figure class="highlight html"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">input</span> <span class="attr">type</span>=<span class="string">"text"</span> <span class="attr">name</span>=<span class="string">"passwd"</span> <span class="attr">id</span>=<span class="string">"passwd-id"</span> /&gt;</span></span><br></pre></td></tr></table></figure>

<h4 id="4-1-1-获取"><a href="#4-1-1-获取" class="headerlink" title="4.1.1. 获取"></a>4.1.1. <strong>获取</strong></h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">element = driver.find_element_by_id(<span class="string">"passwd-id"</span>)</span><br><span class="line">element = driver.find_element_by_name(<span class="string">"passwd"</span>)</span><br><span class="line">element = driver.find_elements_by_tag_name(<span class="string">"input"</span>)</span><br><span class="line">element = driver.find_element_by_xpath(<span class="string">"//input[@id='passwd-id']"</span>)</span><br></pre></td></tr></table></figure>

<p><strong>注意：</strong></p>
<ul>
<li><p>文本必须完全匹配才可以，所以这并不是一个很好的匹配方式</p>
</li>
<li><p>在用 xpath 的时候还需要注意的如果有多个元素匹配了 xpath，它只会返回第一个匹配的元素。如果没有找到，那么会抛出 NoSuchElementException 的异常</p>
</li>
</ul>
<h4 id="4-1-2-输入内容"><a href="#4-1-2-输入内容" class="headerlink" title="4.1.2. 输入内容"></a>4.1.2. 输入内容</h4><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">element.send_keys(&quot;some text&quot;)</span><br></pre></td></tr></table></figure>

<h4 id="4-1-3-模拟点击某个按键"><a href="#4-1-3-模拟点击某个按键" class="headerlink" title="4.1.3. 模拟点击某个按键"></a>4.1.3. 模拟点击某个按键</h4><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">element.send_keys(&quot;and some&quot;, Keys.ARROW_DOWN)</span><br></pre></td></tr></table></figure>

<h4 id="4-1-4-清空文本"><a href="#4-1-4-清空文本" class="headerlink" title="4.1.4. 清空文本"></a>4.1.4. 清空文本</h4><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">element.clear()</span><br></pre></td></tr></table></figure>

<h4 id="4-1-5-元素拖拽"><a href="#4-1-5-元素拖拽" class="headerlink" title="4.1.5. 元素拖拽"></a>4.1.5. 元素拖拽</h4><blockquote>
<p>要完成元素的拖拽，首先你需要指定被拖动的元素和拖动目标元素，然后利用 ActionChains 类来实现</p>
</blockquote>
<p>以下实现元素从 source 拖动到 target 的操作</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">element &#x3D; driver.find_element_by_name(&quot;source&quot;)</span><br><span class="line">target &#x3D; driver.find_element_by_name(&quot;target&quot;)</span><br><span class="line"> </span><br><span class="line">from selenium.webdriver import ActionChains</span><br><span class="line">action_chains &#x3D; ActionChains(driver)</span><br><span class="line">action_chains.drag_and_drop(element, target).perform()</span><br></pre></td></tr></table></figure>

<h4 id="4-1-6-历史记录"><a href="#4-1-6-历史记录" class="headerlink" title="4.1.6. 历史记录"></a>4.1.6. 历史记录</h4><blockquote>
<p>操作页面的前进和后退功能</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">driver.forward()</span><br><span class="line">driver.back()</span><br></pre></td></tr></table></figure>

<h4 id="4-1-7-处理滚动条"><a href="#4-1-7-处理滚动条" class="headerlink" title="4.1.7. 处理滚动条"></a>4.1.7. 处理滚动条</h4><blockquote>
<p>selenium并不是万能的，有时候页面上操作无法实现的，这时候就需要借助JS来完成了</p>
</blockquote>
<p>　　当页面上的元素超过一屏后，想操作屏幕下方的元素，是不能直接定位到，会报元素不可见的。这时候需要借助滚动条来拖动屏幕，使被操作的元素显示在当前的屏幕上。滚动条是无法直接用定位工具来定位的。selenium里面也没有直接的方法去控制滚动条，这时候只能借助J了，还好selenium提供了一个操作js的方法:execute_script()，可以直接执行js的脚本</p>
<h5 id="一-控制滚动条高度"><a href="#一-控制滚动条高度" class="headerlink" title="一. 控制滚动条高度"></a>一. 控制滚动条高度</h5><p>滚动条回到顶部：</p>
<pre><code>js=&quot;var q=document.getElementById(&apos;id&apos;).scrollTop=0&quot;
driver.execute_script(js)</code></pre><p>滚动条拉到底部：</p>
<pre><code>js=&quot;var q=document.documentElement.scrollTop=10000&quot;
driver.execute_script(js)</code></pre><p>可以修改scrollTop 的值，来定位右侧滚动条的位置，0是最上面，10000是最底部</p>
<p>以上方法在Firefox和IE浏览器上上是可以的，但是用Chrome浏览器，发现不管用。Chrome浏览器解决办法：</p>
<pre><code>js = &quot;var q=document.body.scrollTop=0&quot;
driver.execute_script(js)</code></pre><h5 id="二-横向滚动条"><a href="#二-横向滚动条" class="headerlink" title="二.横向滚动条"></a>二.横向滚动条</h5><p><strong>2.1 有时候浏览器页面需要左右滚动（一般屏幕最大化后，左右滚动的情况已经很少见了)</strong></p>
<p><strong>2.2 通过左边控制横向和纵向滚动条scrollTo(x, y)</strong></p>
<pre><code>js = &quot;window.scrollTo(100,400)&quot;
driver.execute_script(js)</code></pre><h5 id="三-元素聚焦"><a href="#三-元素聚焦" class="headerlink" title="三.元素聚焦"></a>三.元素聚焦</h5><p>虽然用上面的方法可以解决拖动滚动条的位置问题，但是有时候无法确定我需要操作的元素在什么位置，有可能每次打开的页面不一样，元素所在的位置也不一样，怎么办呢？这个时候我们可以先让页面直接跳到元素出现的位置，然后就可以操作了</p>
<p>同样需要借助JS去实现。 具体如下：</p>
<pre><code>target = driver.find_element_by_xxxx()
driver.execute_script(&quot;arguments[0].scrollIntoView();&quot;, target)</code></pre><h5 id="四-参考代码"><a href="#四-参考代码" class="headerlink" title="四. 参考代码"></a>四. 参考代码</h5><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> selenium <span class="keyword">import</span> webdriver</span><br><span class="line"><span class="keyword">from</span> lxml <span class="keyword">import</span> etree</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line">url = <span class="string">"https://search.jd.com/Search?keyword=%E7%AC%94%E8%AE%B0%E6%9C%AC&amp;enc=utf-8&amp;wq=%E7%AC%94%E8%AE%B0%E6%9C%AC&amp;pvid=845d019c94f6476ca5c4ffc24df6865a"</span></span><br><span class="line"><span class="comment"># 加载浏览器</span></span><br><span class="line">wd = webdriver.Firefox()</span><br><span class="line"><span class="comment"># 发送请求</span></span><br><span class="line">wd.get(url)</span><br><span class="line"><span class="comment"># 要执行的js</span></span><br><span class="line">js = <span class="string">"var q = document.documentElement.scrollTop=10000"</span></span><br><span class="line"><span class="comment"># 执行js</span></span><br><span class="line">wd.execute_script(js)</span><br><span class="line"></span><br><span class="line">time.sleep(<span class="number">3</span>)</span><br><span class="line"><span class="comment"># 解析数据</span></span><br><span class="line">e = etree.HTML(wd.page_source)</span><br><span class="line"><span class="comment"># 提取数据的xpath</span></span><br><span class="line">price_xpath = <span class="string">'//ul[@class="gl-warp clearfix"]//div[@class="p-price"]/strong/i/text()'</span></span><br><span class="line"><span class="comment"># 提取数据的</span></span><br><span class="line">infos = e.xpath(price_xpath)</span><br><span class="line"></span><br><span class="line">print(len(infos))</span><br><span class="line"><span class="comment"># 关闭浏览器</span></span><br><span class="line">wd.quit()</span><br></pre></td></tr></table></figure>

<h2 id="5-API"><a href="#5-API" class="headerlink" title="5. API"></a>5. API</h2><h3 id="5-1-元素选取"><a href="#5-1-元素选取" class="headerlink" title="5.1. 元素选取"></a>5.1. 元素选取</h3><h4 id="5-1-1-单个元素选取"><a href="#5-1-1-单个元素选取" class="headerlink" title="5.1.1. 单个元素选取"></a>5.1.1. 单个元素选取</h4><ul>
<li>find_element_by_id</li>
<li>find_element_by_name</li>
<li>find_element_by_xpath</li>
<li>find_element_by_link_text</li>
<li>find_element_by_partial_link_text</li>
<li>find_element_by_tag_name</li>
<li>find_element_by_class_name</li>
<li>find_element_by_css_selector</li>
</ul>
<h4 id="5-1-2-多个元素选取"><a href="#5-1-2-多个元素选取" class="headerlink" title="5.1.2. 多个元素选取"></a>5.1.2. 多个元素选取</h4><ul>
<li>find_elements_by_name</li>
<li>find_elements_by_xpath</li>
<li>find_elements_by_link_text</li>
<li>find_elements_by_partial_link_text</li>
<li>find_elements_by_tag_name</li>
<li>find_elements_by_class_name</li>
<li>find_elements_by_css_selector</li>
</ul>
<h4 id="5-1-3-利用-By-类来确定哪种选择方式"><a href="#5-1-3-利用-By-类来确定哪种选择方式" class="headerlink" title="5.1.3. 利用 By 类来确定哪种选择方式"></a>5.1.3. 利用 By 类来确定哪种选择方式</h4><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">from selenium.webdriver.common.by import By</span><br><span class="line"> </span><br><span class="line">driver.find_element(By.XPATH, &#39;&#x2F;&#x2F;button[text()&#x3D;&quot;Some text&quot;]&#39;)</span><br><span class="line">driver.find_elements(By.XPATH, &#39;&#x2F;&#x2F;button&#39;)</span><br></pre></td></tr></table></figure>

<p>By 类的一些属性如下</p>
<ul>
<li>ID = “id”</li>
<li>XPATH = “xpath”</li>
<li>LINK_TEXT = “link text”</li>
<li>PARTIAL_LINK_TEXT = “partial link text”</li>
<li>NAME = “name”</li>
<li>TAG_NAME = “tag name”</li>
<li>CLASS_NAME = “class name”</li>
<li>CSS_SELECTOR = “css selector”</li>
</ul>
<h2 id="6-等待"><a href="#6-等待" class="headerlink" title="6. 等待"></a>6. 等待</h2><h3 id="6-1-隐式等待"><a href="#6-1-隐式等待" class="headerlink" title="6.1. 隐式等待"></a>6.1. 隐式等待</h3><blockquote>
<p>到了一定的时间发现元素还没有加载，则继续等待我们指定的时间，如果超过了我们指定的时间还没有加载就会抛出异常，如果没有需要等待的时候就已经加载完毕就会立即执行</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">from selenium import webdriver</span><br><span class="line">url &#x3D; &#39;https:&#x2F;&#x2F;www.guazi.com&#x2F;nj&#x2F;buy&#x2F;&#39;</span><br><span class="line">driver &#x3D; webdriver.Chrome()</span><br><span class="line">driver.get(url)</span><br><span class="line">driver.implicitly_wait(100)</span><br><span class="line">print(driver.find_element_by_class_name(&#39;next&#39;))</span><br><span class="line">print(driver.page_source)</span><br></pre></td></tr></table></figure>

<h3 id="6-2-显示等待"><a href="#6-2-显示等待" class="headerlink" title="6.2. 显示等待"></a>6.2. 显示等待</h3><blockquote>
<p>指定一个等待条件，并且指定一个最长等待时间，会在这个时间内进行判断是否满足等待条件，如果成立就会立即返回，如果不成立，就会一直等待，直到等待你指定的最长等待时间，如果还是不满足，就会抛出异常，如果满足了就会正常返回</p>
</blockquote>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">url &#x3D; &#39;https:&#x2F;&#x2F;www.guazi.com&#x2F;nj&#x2F;buy&#x2F;&#39;</span><br><span class="line">driver &#x3D; webdriver.Chrome()</span><br><span class="line">driver.get(url)</span><br><span class="line">wait &#x3D; WebDriverWait(driver,10)</span><br><span class="line">wait.until(EC.presence_of_element_located((By.CLASS_NAME, &#39;next&#39;)))</span><br><span class="line">print(driver.page_source)</span><br></pre></td></tr></table></figure>

<ul>
<li>presence_of_element_located   <ul>
<li>元素加载出，传入定位元组，如(By.ID, ‘p’)</li>
</ul>
</li>
<li>presence_of_all_elements_located <ul>
<li>所有元素加载出</li>
</ul>
</li>
<li>element_to_be_clickable<ul>
<li>元素可点击</li>
</ul>
</li>
<li>element_located_to_be_selected<ul>
<li>元素可选择，传入定位元组 </li>
</ul>
</li>
</ul>
<h3 id="6-3-强制等待"><a href="#6-3-强制等待" class="headerlink" title="6.3. 强制等待"></a>6.3. 强制等待</h3><blockquote>
<p>使用 time.sleep</p>
</blockquote>
<h1 id="十三、Scrapy-框架"><a href="#十三、Scrapy-框架" class="headerlink" title="十三、Scrapy 框架"></a>十三、Scrapy 框架</h1><h2 id="1-Scrapy-框架介绍"><a href="#1-Scrapy-框架介绍" class="headerlink" title="1. Scrapy 框架介绍"></a>1. Scrapy 框架介绍</h2><ul>
<li><p>Scrapy是Python开发的一个快速,高层次的屏幕抓取和web抓取框架，用于抓取web站点并从页面中提取结构化的数据。Scrapy = Scrach+Python</p>
</li>
<li><p>Scrapy用途广泛，可以用于数据挖掘、监测和自动化测试、信息处理和历史档案等大量应用范围内抽取结构化数据的应用程序框架，广泛用于工业</p>
</li>
<li><p>Scrapy 使用Twisted 这个异步网络库来处理网络通讯，架构清晰，并且包含了各种中间件接口，可以灵活的完成各种需求。Scrapy是由Twisted写的一个受欢迎的Python事件驱动网络框架，它使用的是非堵塞的异步处理</p>
</li>
</ul>
<h3 id="1-1-为什么要使用Scrapy？"><a href="#1-1-为什么要使用Scrapy？" class="headerlink" title="1.1. 为什么要使用Scrapy？"></a>1.1. 为什么要使用Scrapy？</h3><ul>
<li>它更容易构建和大规模的抓取项目</li>
<li>它内置的机制被称为选择器，用于从网站（网页）上提取数据</li>
<li>它异步处理请求，速度十分快</li>
<li>它可以使用自动调节机制自动调整爬行速度</li>
<li>确保开发人员可访问性</li>
</ul>
<h3 id="1-2-Scrapy的特点"><a href="#1-2-Scrapy的特点" class="headerlink" title="1.2. Scrapy的特点"></a>1.2. Scrapy的特点</h3><ul>
<li>Scrapy是一个开源和免费使用的网络爬虫框架</li>
<li>Scrapy生成格式导出如：JSON，CSV和XML</li>
<li>Scrapy内置支持从源代码，使用XPath或CSS表达式的选择器来提取数据</li>
<li>Scrapy基于爬虫，允许以自动方式从网页中提取数据</li>
</ul>
<h3 id="1-3-Scrapy的优点"><a href="#1-3-Scrapy的优点" class="headerlink" title="1.3. Scrapy的优点"></a>1.3. Scrapy的优点</h3><ul>
<li>Scrapy很容易扩展，快速和功能强大；</li>
<li>这是一个跨平台应用程序框架（在Windows，Linux，Mac OS和BSD）。</li>
<li>Scrapy请求调度和异步处理；</li>
<li>Scrapy附带了一个名为Scrapyd的内置服务，它允许使用JSON Web服务上传项目和控制蜘蛛。</li>
<li>也能够刮削任何网站，即使该网站不具有原始数据访问API；</li>
</ul>
<h3 id="1-4-整体架构大致如下"><a href="#1-4-整体架构大致如下" class="headerlink" title="1.4. 整体架构大致如下:"></a>1.4. 整体架构大致如下:</h3><p><img src="https://images2015.cnblogs.com/blog/918906/201608/918906-20160830220006980-1873919293.png" alt="image"></p>
<blockquote>
<p>最简单的单个网页爬取流程是spiders &gt; scheduler &gt; downloader &gt; spiders &gt; item pipeline</p>
</blockquote>
<h3 id="1-5-Scrapy运行流程大概如下："><a href="#1-5-Scrapy运行流程大概如下：" class="headerlink" title="1.5. Scrapy运行流程大概如下："></a>1.5. Scrapy运行流程大概如下：</h3><ol>
<li>引擎从调度器中取出一个链接(URL)用于接下来的抓取</li>
<li>引擎把URL封装成一个请求(Request)传给下载器</li>
<li>下载器把资源下载下来，并封装成应答包(Response)</li>
<li>爬虫解析Response</li>
<li>解析出实体（Item）,则交给实体管道进行进一步的处理</li>
<li>解析出的是链接（URL）,则把URL交给调度器等待抓取</li>
</ol>
<h3 id="1-6-Scrapy主要包括了以下组件："><a href="#1-6-Scrapy主要包括了以下组件：" class="headerlink" title="1.6. Scrapy主要包括了以下组件："></a>1.6. Scrapy主要包括了以下组件：</h3><ul>
<li>引擎(Scrapy)<ul>
<li>用来处理整个系统的数据流处理, 触发事务(框架核心)</li>
</ul>
</li>
<li>调度器(Scheduler)<ul>
<li>用来接受引擎发过来的请求, 压入队列中, 并在引擎再次请求的时候返回. 可以想像成一个URL（抓取网页的网址或者说是链接）的优先队列, 由它来决定下一个要抓取的网址是什么, 同时去除重复的网址</li>
</ul>
</li>
<li>下载器(Downloader)<ul>
<li>用于下载网页内容, 并将网页内容返回给蜘蛛(Scrapy下载器是建立在twisted这个高效的异步模型上的)</li>
</ul>
</li>
<li>爬虫(Spiders)<ul>
<li>爬虫是主要干活的, 用于从特定的网页中提取自己需要的信息, 即所谓的实体(Item)。用户也可以从中提取出链接,让Scrapy继续抓取下一个页面</li>
</ul>
</li>
<li>项目管道(Pipeline)<ul>
<li>负责处理爬虫从网页中抽取的实体，主要的功能是持久化实体、验证实体的有效性、清除不需要的信息。当页面被爬虫解析后，将被发送到项目管道，并经过几个特定的次序处理数据。</li>
</ul>
</li>
<li>下载器中间件(Downloader Middlewares)<ul>
<li>位于Scrapy引擎和下载器之间的框架，主要是处理Scrapy引擎与下载器之间的请求及响应</li>
</ul>
</li>
<li>爬虫中间件(Spider Middlewares)<ul>
<li>介于Scrapy引擎和爬虫之间的框架，主要工作是处理蜘蛛的响应输入和请求输出</li>
</ul>
</li>
<li>调度中间件(Scheduler Middewares)<ul>
<li>介于Scrapy引擎和调度之间的中间件，从Scrapy引擎发送到调度的请求和响应</li>
</ul>
</li>
</ul>
<h2 id="2-安装-2"><a href="#2-安装-2" class="headerlink" title="2. 安装"></a>2. 安装</h2><p>1.利用pip install命令安装pywin32,pyopenssl.这两个包可在cmd安装成功</p>
<blockquote>
<p>pip install pywin32<br>pip install pyopenssl</p>
</blockquote>
<p>2.scrapy和twisted的whl包</p>
<p><a href="https://www.lfd.uci.edu/~gohlke/pythonlibs/" target="_blank" rel="noopener">https://www.lfd.uci.edu/~gohlke/pythonlibs/</a></p>
<p>twisted的whl包<a href="https://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted" target="_blank" rel="noopener">https://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted</a></p>
<p>scrapy的whl包<a href="https://www.lfd.uci.edu/~gohlke/pythonlibs/#scrapy" target="_blank" rel="noopener">https://www.lfd.uci.edu/~gohlke/pythonlibs/#scrapy</a></p>
<p>将两个包按对应python和32位还是64位的版本号进行下载</p>
<p>下载过两个包后对下载的文件进行 “在文件夹显示”，利用cmd命令中的<br>cd 文件所在路径 进入当前位置</p>
<p>先安装twisted，再安装scrapy</p>
<blockquote>
<p>pip install xxx.whl</p>
</blockquote>
<h2 id="3-使用"><a href="#3-使用" class="headerlink" title="3. 使用"></a>3. 使用</h2><h3 id="3-1-创建项目"><a href="#3-1-创建项目" class="headerlink" title="3.1. 创建项目"></a>3.1. 创建项目</h3><p>运行命令:<br><code>scrapy startproject myfrist（your_project_name）</code></p>
<p><img src="" alt="/blog/assets/img/pachong1.png"></p>
<p>文件说明：</p>
<table>
<thead>
<tr>
<th>名称</th>
<th>作用</th>
</tr>
</thead>
<tbody><tr>
<td>scrapy.cfg</td>
<td>项目的配置信息，主要为Scrapy命令行工具提供一个基础的配置信息。（真正爬虫相关的配置信息在settings.py文件中）</td>
</tr>
<tr>
<td>items.py</td>
<td>设置数据存储模板，用于结构化数据，如：Django的Model</td>
</tr>
<tr>
<td>pipelines</td>
<td>数据处理行为，如：一般结构化的数据持久化</td>
</tr>
<tr>
<td>settings.py</td>
<td>配置文件，如：递归的层数、并发数，延迟下载等</td>
</tr>
<tr>
<td>spiders</td>
<td>爬虫目录，如：创建文件，编写爬虫规则</td>
</tr>
</tbody></table>
<p>注意：一般创建爬虫文件时，以网站域名命名</p>
<h3 id="3-2-编写-spdier"><a href="#3-2-编写-spdier" class="headerlink" title="3.2. 编写 spdier"></a>3.2. 编写 spdier</h3><p>在spiders目录中新建 daidu_spider.py 文件</p>
<h4 id="3-2-1-注意"><a href="#3-2-1-注意" class="headerlink" title="3.2.1. 注意"></a>3.2.1. 注意</h4><ol>
<li>爬虫文件需要定义一个类，并继承scrapy.spiders.Spider</li>
<li>必须定义name，即爬虫名，如果没有name，会报错。因为源码中是这样定义的</li>
</ol>
<h4 id="3-2-2-编写内容"><a href="#3-2-2-编写内容" class="headerlink" title="3.2.2. 编写内容"></a>3.2.2. 编写内容</h4><blockquote>
<p>在这里可以告诉 scrapy 。要如何查找确切数据，这里必须要定义一些属性</p>
</blockquote>
<ul>
<li>name: 它定义了蜘蛛的唯一名称</li>
<li>allowed_domains: 它包含了蜘蛛抓取的基本URL；</li>
<li>start-urls: 蜘蛛开始爬行的URL列表；</li>
<li>parse(): 这是提取并解析刮下数据的方法；</li>
</ul>
<p>下面的代码演示了蜘蛛代码的样子：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> scrapy</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">DoubanSpider</span><span class="params">(scrapy.Spider)</span>:</span></span><br><span class="line">    name = <span class="string">'douban'</span></span><br><span class="line">    allwed_url = <span class="string">'douban.com'</span></span><br><span class="line">    start_urls = [</span><br><span class="line">        <span class="string">'https://movie.douban.com/top250/'</span></span><br><span class="line">    ]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        movie_name = response.xpath(<span class="string">"//div[@class='item']//a/span[1]/text()"</span>).extract()</span><br><span class="line">        movie_core = response.xpath(<span class="string">"//div[@class='star']/span[2]/text()"</span>).extract()</span><br><span class="line">        <span class="keyword">yield</span> &#123;</span><br><span class="line">            <span class="string">'movie_name'</span>:movie_name,</span><br><span class="line">            <span class="string">'movie_core'</span>:movie_core</span><br><span class="line">        &#125;</span><br></pre></td></tr></table></figure>

<h3 id="其他命令："><a href="#其他命令：" class="headerlink" title="其他命令："></a>其他命令：</h3><ul>
<li><p>创建爬虫</p>
  <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">scrapy genspider 爬虫名 爬虫的地址</span><br></pre></td></tr></table></figure>
</li>
<li><p>运行爬虫</p>
  <figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">scrapy crawl 爬虫名</span><br></pre></td></tr></table></figure>

<p>  也可以不在命令行运行(可以debug)</p>
  <figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">from</span> scrapy.cmdline <span class="keyword">import</span> execute</span><br><span class="line"></span><br><span class="line">execute(<span class="string">"scrapy crawl 爬虫名"</span>.split())</span><br></pre></td></tr></table></figure>

</li>
</ul>
<h2 id="4-数据提取与保存"><a href="#4-数据提取与保存" class="headerlink" title="4. 数据提取与保存"></a>4. 数据提取与保存</h2><h3 id="4-1-Scrapy提取项目"><a href="#4-1-Scrapy提取项目" class="headerlink" title="4.1. Scrapy提取项目"></a>4.1. Scrapy提取项目</h3><p>从网页中提取数据，Scrapy 使用基于 XPath 和 CSS 表达式的技术叫做选择器。以下是 XPath 表达式的一些例子：</p>
<ul>
<li>这将选择 HTML 文档中的 <code>&lt;head&gt;</code> 元素中的 <code>&lt;title&gt;</code> 元素<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">&#x2F;html&#x2F;head&#x2F;title</span><br></pre></td></tr></table></figure></li>
<li>这将选择 <code>&lt;title&gt;</code> 元素中的文本<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">&#x2F;html&#x2F;head&#x2F;title&#x2F;text()</span><br></pre></td></tr></table></figure></li>
<li>这将选择所有的 <code>&lt;td&gt;</code> 元素<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">&#x2F;&#x2F;td</span><br></pre></td></tr></table></figure></li>
<li>选择 div 包含一个属性 class=”slice” 的所有元素<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">&#x2F;&#x2F;div[@class&#x3D;”slice”]</span><br></pre></td></tr></table></figure>


</li>
</ul>
<p>选择器有四个基本的方法，如下所示：<br>|  方法               | 描述                                                    |<br>| ————— | ———————————————————– |<br>| extract()       | 它返回一个unicode字符串以及所选数据                         |<br>| extract_first() | 它返回第一个unicode字符串以及所选数据                       |<br>| re()            | 它返回Unicode字符串列表，当正则表达式被赋予作为参数时提取   |<br>| xpath()         | 它返回选择器列表，它代表由指定XPath表达式参数选择的节点     |<br>| css()           | 它返回选择器列表，它代表由指定CSS表达式作为参数所选择的节点 |</p>
<h3 id="4-2-Scrapy-Shell"><a href="#4-2-Scrapy-Shell" class="headerlink" title="4.2. Scrapy Shell"></a>4.2. Scrapy Shell</h3><p>如果使用选择器想快速的到到效果，我们可以使用Scrapy Shell</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">scrapy shell &quot;http:&#x2F;&#x2F;www.163.com&quot;</span><br></pre></td></tr></table></figure>
<p>注意windows系统必须使用双引号</p>
<h4 id="4-2-1-举例"><a href="#4-2-1-举例" class="headerlink" title="4.2.1. 举例"></a>4.2.1. 举例</h4><p>从一个普通的HTML网站提取数据，查看该网站得到的 XPath 的源代码。检测后，可以看到数据将在UL标签，并选择 li 标签中的 元素。</p>
<p>代码的下面行显示了不同类型的数据的提取：</p>
<ul>
<li>选择 li 标签内的数据：<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">response.xpath(&#39;&#x2F;&#x2F;ul&#x2F;li&#39;)</span><br></pre></td></tr></table></figure></li>
<li>对于选择描述：<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">response.xpath(&#39;&#x2F;&#x2F;ul&#x2F;li&#x2F;text()&#39;).extract()</span><br></pre></td></tr></table></figure></li>
<li>对于选择网站标题：<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">response.xpath(&#39;&#x2F;&#x2F;ul&#x2F;li&#x2F;a&#x2F;text()&#39;).extract()</span><br></pre></td></tr></table></figure></li>
<li>对于选择网站的链接：<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">response.xpath(&#39;&#x2F;&#x2F;ul&#x2F;li&#x2F;a&#x2F;@href&#39;).extract()</span><br></pre></td></tr></table></figure>

</li>
</ul>
<h3 id="4-3-数据的提取"><a href="#4-3-数据的提取" class="headerlink" title="4.3. 数据的提取"></a>4.3. 数据的提取</h3><h4 id="4-3-1-控制台打印"><a href="#4-3-1-控制台打印" class="headerlink" title="4.3.1. 控制台打印"></a>4.3.1. 控制台打印</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> scrapy</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">DoubanSpider</span><span class="params">(scrapy.Spider)</span>:</span></span><br><span class="line">    name = <span class="string">'douban'</span></span><br><span class="line">    allwed_url = <span class="string">'douban.com'</span></span><br><span class="line">    start_urls = [</span><br><span class="line">        <span class="string">'https://movie.douban.com/top250/'</span></span><br><span class="line">    ]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        movie_name = response.xpath(<span class="string">"//div[@class='item']//a/span[1]/text()"</span>).extract()</span><br><span class="line">        movie_core = response.xpath(<span class="string">"//div[@class='star']/span[2]/text()"</span>).extract()</span><br><span class="line">        <span class="keyword">yield</span> &#123;</span><br><span class="line">            <span class="string">'movie_name'</span>:movie_name,</span><br><span class="line">            <span class="string">'movie_core'</span>:movie_core</span><br><span class="line">        &#125;</span><br></pre></td></tr></table></figure>
<p>执行以上代码，我可以在控制看到：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br></pre></td><td class="code"><pre><span class="line">2020-07-04 16:35:33 [scrapy.utils.log] INFO: Scrapy 2.2.0 started (bot: ScrapyDemo)</span><br><span class="line">2020-07-04 16:35:33 [scrapy.utils.log] INFO: Versions: lxml 4.5.1.0, libxml2 2.9.5, cssselect 1.1.0, parsel 1.6.0, w3lib 1.22.0, Twisted 20.3.0, Python 3.8.3 (tags&#x2F;v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)], pyOpenSSL 19.1.0 (OpenSSL 1.1.1g  21 Apr 2020), cryptography 2.9.2, Platform Windows-10-10.0.18362-SP0</span><br><span class="line">2020-07-04 16:35:33 [scrapy.utils.log] DEBUG: Using reactor: twisted.internet.selectreactor.SelectReactor</span><br><span class="line">2020-07-04 16:35:33 [scrapy.crawler] INFO: Overridden settings:</span><br><span class="line">&#123;&#39;BOT_NAME&#39;: &#39;ScrapyDemo&#39;,</span><br><span class="line"> &#39;DOWNLOAD_DELAY&#39;: 3,</span><br><span class="line"> &#39;NEWSPIDER_MODULE&#39;: &#39;ScrapyDemo.spiders&#39;,</span><br><span class="line"> &#39;SPIDER_MODULES&#39;: [&#39;ScrapyDemo.spiders&#39;],</span><br><span class="line"> &#39;USER_AGENT&#39;: &#39;Mozilla&#x2F;5.0 (Windows NT 10.0; Win64; x64) AppleWebKit&#x2F;537.36 &#39;</span><br><span class="line">               &#39;(KHTML, like Gecko) Chrome&#x2F;83.0.4103.116 Safari&#x2F;537.36&#39;&#125;</span><br><span class="line">2020-07-04 16:35:33 [scrapy.extensions.telnet] INFO: Telnet Password: 54878adbf1f92ee6</span><br><span class="line">2020-07-04 16:35:33 [scrapy.middleware] INFO: Enabled extensions:</span><br><span class="line">[&#39;scrapy.extensions.corestats.CoreStats&#39;,</span><br><span class="line"> &#39;scrapy.extensions.telnet.TelnetConsole&#39;,</span><br><span class="line"> &#39;scrapy.extensions.logstats.LogStats&#39;]</span><br><span class="line">2020-07-04 16:35:33 [scrapy.middleware] INFO: Enabled downloader middlewares:</span><br><span class="line">[&#39;scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.useragent.UserAgentMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.retry.RetryMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.redirect.RedirectMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.cookies.CookiesMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.downloadermiddlewares.stats.DownloaderStats&#39;]</span><br><span class="line">2020-07-04 16:35:33 [scrapy.middleware] INFO: Enabled spider middlewares:</span><br><span class="line">[&#39;scrapy.spidermiddlewares.httperror.HttpErrorMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.spidermiddlewares.offsite.OffsiteMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.spidermiddlewares.referer.RefererMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.spidermiddlewares.urllength.UrlLengthMiddleware&#39;,</span><br><span class="line"> &#39;scrapy.spidermiddlewares.depth.DepthMiddleware&#39;]</span><br><span class="line">2020-07-04 16:35:33 [scrapy.middleware] INFO: Enabled item pipelines:</span><br><span class="line">[&#39;ScrapyDemo.pipelines.MoviesPipeline&#39;]</span><br><span class="line">2020-07-04 16:35:33 [scrapy.core.engine] INFO: Spider opened</span><br><span class="line">2020-07-04 16:35:33 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages&#x2F;min), scraped 0 items (at 0 items&#x2F;min)</span><br><span class="line">2020-07-04 16:35:33 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6023</span><br><span class="line">2020-07-04 16:35:34 [scrapy.downloadermiddlewares.redirect] DEBUG: Redirecting (301) to &lt;GET https:&#x2F;&#x2F;movie.douban.com&#x2F;top250&gt; from &lt;GET https:&#x2F;&#x2F;movie.douban.com&#x2F;top250&#x2F;&gt;</span><br><span class="line">2020-07-04 16:35:37 [scrapy.core.engine] DEBUG: Crawled (200) &lt;GET https:&#x2F;&#x2F;movie.douban.com&#x2F;top250&gt; (referer: None)</span><br><span class="line">2020-07-04 16:35:37 [scrapy.core.scraper] DEBUG: Scraped from &lt;200 https:&#x2F;&#x2F;movie.douban.com&#x2F;top250&gt;</span><br><span class="line">&#123;&#39;movie_name&#39;: [&#39;肖申克的救赎&#39;, &#39;霸王别姬&#39;, &#39;阿甘正传&#39;, &#39;这个杀手不太冷&#39;, &#39;美丽人生&#39;, &#39;泰坦尼克号&#39;, &#39;千与千寻&#39;, &#39;辛德勒的名单&#39;, &#39;盗梦空间&#39;, &#39;忠犬八公的故事&#39;, &#39;海上钢琴师&#39;, &#39;楚门的世界&#39;, &#39;三傻大闹宝莱坞&#39;, &#39;机器人总动员&#39;, &#39;放牛班的春天&#39;, &#39;星际穿越&#39;, &#39;大话西游之大圣娶亲&#39;, &#39;熔炉&#39;, &#39;疯狂动物城&#39;, &#39;无间道&#39;, &#39;龙猫&#39;, &#39;教父&#39;, &#39;当幸福来敲门&#39;, &#39;怦然心动&#39;, &#39;触不可及&#39;], &#39;movie_core&#39;: [&#39;9.7&#39;, &#39;9.6&#39;, &#39;9.5&#39;, &#39;9.4&#39;, &#39;9.5&#39;, &#39;9.4&#39;, &#39;9.4&#39;, &#39;9.5&#39;, &#39;9.3&#39;, &#39;9.4&#39;, &#39;9.3&#39;, &#39;9.3&#39;, &#39;9.2&#39;, &#39;9.3&#39;, &#39;9.3&#39;, &#39;9.3&#39;, &#39;9.2&#39;, &#39;9.3&#39;, &#39;9.2&#39;, &#39;9.2&#39;, &#39;9.2&#39;, &#39;9.3&#39;, &#39;9.1&#39;, &#39;9.1&#39;, &#39;9.2&#39;]&#125;</span><br><span class="line">2020-07-04 16:35:37 [scrapy.core.engine] INFO: Closing spider (finished)</span><br><span class="line">2020-07-04 16:35:37 [scrapy.statscollectors] INFO: Dumping Scrapy stats:</span><br><span class="line">&#123;&#39;downloader&#x2F;request_bytes&#39;: 632,</span><br><span class="line"> &#39;downloader&#x2F;request_count&#39;: 2,</span><br><span class="line"> &#39;downloader&#x2F;request_method_count&#x2F;GET&#39;: 2,</span><br><span class="line"> &#39;downloader&#x2F;response_bytes&#39;: 12995,</span><br><span class="line"> &#39;downloader&#x2F;response_count&#39;: 2,</span><br><span class="line"> &#39;downloader&#x2F;response_status_count&#x2F;200&#39;: 1,</span><br><span class="line"> &#39;downloader&#x2F;response_status_count&#x2F;301&#39;: 1,</span><br><span class="line"> &#39;elapsed_time_seconds&#39;: 3.94013,</span><br><span class="line"> &#39;finish_reason&#39;: &#39;finished&#39;,</span><br><span class="line"> &#39;finish_time&#39;: datetime.datetime(2020, 7, 4, 8, 35, 37, 892486),</span><br><span class="line"> &#39;item_scraped_count&#39;: 1,</span><br><span class="line"> &#39;log_count&#x2F;DEBUG&#39;: 3,</span><br><span class="line"> &#39;log_count&#x2F;INFO&#39;: 10,</span><br><span class="line"> &#39;response_received_count&#39;: 1,</span><br><span class="line"> &#39;scheduler&#x2F;dequeued&#39;: 2,</span><br><span class="line"> &#39;scheduler&#x2F;dequeued&#x2F;memory&#39;: 2,</span><br><span class="line"> &#39;scheduler&#x2F;enqueued&#39;: 2,</span><br><span class="line"> &#39;scheduler&#x2F;enqueued&#x2F;memory&#39;: 2,</span><br><span class="line"> &#39;start_time&#39;: datetime.datetime(2020, 7, 4, 8, 35, 33, 952356)&#125;</span><br><span class="line">2020-07-04 16:35:37 [scrapy.core.engine] INFO: Spider closed (finished)</span><br></pre></td></tr></table></figure>

<h3 id="4-4-数据以文件的方式输出"><a href="#4-4-数据以文件的方式输出" class="headerlink" title="4.4. 数据以文件的方式输出"></a>4.4. 数据以文件的方式输出</h3><h4 id="4-4-1-python原生方式"><a href="#4-4-1-python原生方式" class="headerlink" title="4.4.1. python原生方式"></a>4.4.1. python原生方式</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">with</span> open(<span class="string">"movie.txt"</span>, <span class="string">'wb'</span>) <span class="keyword">as</span> f:</span><br><span class="line">    <span class="keyword">for</span> n, c <span class="keyword">in</span> zip(movie_name, movie_core):</span><br><span class="line">        str = n+<span class="string">":"</span>+c+<span class="string">"\n"</span></span><br><span class="line">        f.write(str.encode())</span><br></pre></td></tr></table></figure>

<h4 id="4-4-2-以scrapy内置方式"><a href="#4-4-2-以scrapy内置方式" class="headerlink" title="4.4.2. 以scrapy内置方式"></a>4.4.2. 以scrapy内置方式</h4><p>scrapy 内置主要有四种：JSON，JSON lines，CSV，XML</p>
<p>我们将结果用最常用的JSON导出，命令如下：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">scrapy crawl dmoz -o douban.json -t json</span><br></pre></td></tr></table></figure>

<p>-o 后面是导出文件名，-t 后面是导出类型</p>
<h4 id="4-4-3-提取内容的封装Item"><a href="#4-4-3-提取内容的封装Item" class="headerlink" title="4.4.3. 提取内容的封装Item"></a>4.4.3. 提取内容的封装Item</h4><blockquote>
<p>Scrapy进程可通过使用蜘蛛提取来自网页中的数据。Scrapy使用Item类生成输出对象用于收刮数据</p>
</blockquote>
<blockquote>
<p>Item 对象是自定义的python字典，可以使用标准字典语法获取某个属性的值</p>
</blockquote>
<h5 id="4-4-3-1-定义"><a href="#4-4-3-1-定义" class="headerlink" title="4.4.3.1. 定义"></a>4.4.3.1. 定义</h5><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> scrapy</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">InfoItem</span><span class="params">(scrapy.Item)</span>:</span></span><br><span class="line">    <span class="comment"># define the fields for your item here like:</span></span><br><span class="line">    movie_name = scrapy.Field()</span><br><span class="line">    movie_core = scrapy.Field()</span><br></pre></td></tr></table></figure>

<h5 id="4-4-3-2-使用"><a href="#4-4-3-2-使用" class="headerlink" title="4.4.3.2. 使用"></a>4.4.3.2. 使用</h5><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">    movie_name = response.xpath(<span class="string">"//div[@class='item']//a/span[1]/text()"</span>).extract()</span><br><span class="line">    movie_core = response.xpath(<span class="string">"//div[@class='star']/span[2]/text()"</span>).extract()</span><br><span class="line">    </span><br><span class="line">    <span class="keyword">for</span> n, c <span class="keyword">in</span> zip(movie_name, movie_core):</span><br><span class="line">        movie = InfoItem()</span><br><span class="line">        movie[<span class="string">'movie_name'</span>] = n</span><br><span class="line">        movie[<span class="string">'movie_core'</span>] = c</span><br><span class="line">        <span class="keyword">yield</span> movie</span><br></pre></td></tr></table></figure>

<h2 id="5-Item-Pipeline"><a href="#5-Item-Pipeline" class="headerlink" title="5. Item Pipeline"></a>5. Item Pipeline</h2><h3 id="5-1-Item-Pipeline-介绍"><a href="#5-1-Item-Pipeline-介绍" class="headerlink" title="5.1. Item Pipeline 介绍"></a>5.1. Item Pipeline 介绍</h3><p>当Item 在Spider中被收集之后，就会被传递到Item Pipeline中进行处理</p>
<p>每个item pipeline组件是实现了简单的方法的python类，负责接收到item并通过它执行一些行为，同时也决定此Item是否继续通过pipeline,或者被丢弃而不再进行处理</p>
<p>item pipeline的主要作用：</p>
<ol>
<li>清理html数据</li>
<li>验证爬取的数据</li>
<li>去重并丢弃</li>
<li>讲爬取的结果保存到数据库中或文件中</li>
</ol>
<h3 id="5-2-编写自己的item-pipeline"><a href="#5-2-编写自己的item-pipeline" class="headerlink" title="5.2. 编写自己的item pipeline"></a>5.2. 编写自己的item pipeline</h3><h4 id="5-2-1-必须实现的函数"><a href="#5-2-1-必须实现的函数" class="headerlink" title="5.2.1. 必须实现的函数"></a>5.2.1. 必须实现的函数</h4><ul>
<li>process_item(self,item,spider)</li>
</ul>
<p>每个item piple组件是一个独立的pyhton类，必须实现以process_item(self,item,spider)方法</p>
<p>每个item pipeline组件都需要调用该方法，这个方法必须返回一个具有数据的dict,或者item对象，或者抛出DropItem异常，被丢弃的item将不会被之后的pipeline组件所处理</p>
<h4 id="5-2-2-可以选择实现"><a href="#5-2-2-可以选择实现" class="headerlink" title="5.2.2. 可以选择实现"></a>5.2.2. 可以选择实现</h4><ul>
<li><p>open_spider(self,spider)<br>表示当spider被开启的时候调用这个方法</p>
</li>
<li><p>close_spider(self,spider)<br>当spider关闭时候这个方法被调用</p>
</li>
</ul>
<h4 id="5-2-3-应用到项目"><a href="#5-2-3-应用到项目" class="headerlink" title="5.2.3. 应用到项目"></a>5.2.3. 应用到项目</h4><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> json</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MoviePipeline</span><span class="params">(object)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_item</span><span class="params">(self, item, spider)</span>:</span></span><br><span class="line">        json.dump(dict(item), open(<span class="string">'diban.json'</span>, <span class="string">'a'</span>, encoding=<span class="string">'utf-8'</span>), ensure_ascii=<span class="literal">False</span>)</span><br><span class="line">        <span class="keyword">return</span> item</span><br></pre></td></tr></table></figure>

<h4 id="注意："><a href="#注意：" class="headerlink" title="注意："></a>注意：</h4><p>写到pipeline后，要在settings中设置才可生效</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">ITEM_PIPELINES &#x3D; &#123;</span><br><span class="line">    &#39;spiderdemo1.pipelines.MoviePipeline&#39;: 300</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line">from pymongo import MongoClient</span><br><span class="line">from middle.settings import HOST</span><br><span class="line">from middle.settings import PORT</span><br><span class="line">from middle.settings import DB_NAME</span><br><span class="line">from middle.settings import SHEET_NAME</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">class MiddlePipeline(object):</span><br><span class="line">    def __init__(self):</span><br><span class="line">        client &#x3D; MongoClient(host&#x3D;HOST, port&#x3D;PORT)</span><br><span class="line">        my_db &#x3D; client[DB_NAME]</span><br><span class="line">        self.sheet &#x3D; my_db[SHEET_NAME]</span><br><span class="line"></span><br><span class="line">    def process_item(self, item, spider):</span><br><span class="line">        self.sheet.insert(dict(item))</span><br><span class="line">        return item</span><br></pre></td></tr></table></figure>

<h2 id="6-Scrapy内置设置"><a href="#6-Scrapy内置设置" class="headerlink" title="6. Scrapy内置设置"></a>6. Scrapy内置设置</h2><p>下面给出scrapy提供的常用内置设置列表,你可以在settings.py文件里面修改这些设置，以应用或者禁用这些设置项</p>
<ul>
<li><p>BOT_NAME</p>
<p>默认: ‘scrapybot’</p>
<p>Scrapy项目实现的bot的名字。用来构造默认 User-Agent，同时也用来log。<br>当你使用 startproject 命令创建项目时其也被自动赋值。</p>
</li>
<li><p>CONCURRENT_ITEMS</p>
<p>默认: 100</p>
<p>Item Processor(即 Item Pipeline) 同时处理(每个response的)item的最大值</p>
</li>
<li><p>CONCURRENT_REQUESTS</p>
<p>默认: 16</p>
<p>Scrapy downloader 并发请求(concurrent requests)的最大值。</p>
</li>
<li><p>CONCURRENT_REQUESTS_PER_DOMAIN</p>
<p>默认: 8</p>
<p>对单个网站进行并发请求的最大值。</p>
</li>
<li><p>CONCURRENT_REQUESTS_PER_IP</p>
<p>默认: 0</p>
<p>对单个IP进行并发请求的最大值。如果非0，则忽略 CONCURRENT_REQUESTS_PER_DOMAIN 设定， 使用该设定。 也就是说，并发限制将针对IP，而不是网站。</p>
<p>该设定也影响 DOWNLOAD_DELAY: 如果 CONCURRENT_REQUESTS_PER_IP 非0，下载延迟应用在IP而不是网站上。</p>
</li>
<li><p>DEFAULT_ITEM_CLASS</p>
<p>默认: ‘scrapy.item.Item’</p>
<p>the Scrapy shell 中实例化item使用的默认类</p>
</li>
<li><p>DEFAULT_REQUEST_HEADERS</p>
<p>默认:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">&#123;</span><br><span class="line">    &#39;Accept&#39;: &#39;text&#x2F;html,application&#x2F;xhtml+xml,application&#x2F;xml;q&#x3D;0.9,*&#x2F;*;q&#x3D;0.8&#39;,</span><br><span class="line">    &#39;Accept-Language&#39;: &#39;en&#39;,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>Scrapy HTTP Request使用的默认header。由 DefaultHeadersMiddleware 产生。</p>
</li>
<li><p>DOWNLOADER</p>
<p>默认: ‘scrapy.core.downloader.Downloader’</p>
<p>用于crawl的downloader.</p>
</li>
<li><p>DOWNLOADER_MIDDLEWARES</p>
<p>默认:: {}</p>
<p>保存项目中启用的下载中间件及其顺序的字典</p>
</li>
<li><p>DOWNLOAD_DELAY</p>
<p>默认: 0</p>
<p>下载器在下载同一个网站下一个页面前需要等待的时间。该选项可以用来限制爬取速度， 减轻服务器压力。同时也支持小数</p>
</li>
<li><p>DOWNLOAD_HANDLERS</p>
<p>默认: {}</p>
<p>保存项目中启用的下载处理器(request downloader handler)的字典</p>
</li>
<li><p>DOWNLOAD_TIMEOUT</p>
<p>默认: 180</p>
<p>下载器超时时间(单位: 秒)</p>
</li>
<li><p>EXTENSIONS</p>
<p>默认:{}</p>
<p>保存项目中启用的插件及其顺序的字典</p>
</li>
<li><p>ITEM_PIPELINES</p>
<p>默认: {}</p>
<p>保存项目中启用的pipeline及其顺序的字典。该字典默认为空，值(value)任意。 不过值(value)习惯设定在0-1000范围内</p>
</li>
<li><p>ITEM_PIPELINES_BASE</p>
<p>默认: {}</p>
<p>保存项目中默认启用的pipeline的字典。 永远不要在项目中修改该设定，而是修改 ITEM_PIPELINES</p>
</li>
<li><p>LOG_ENABLED</p>
<p>默认: True</p>
<p>是否启用logging</p>
</li>
<li><p>LOG_ENCODING</p>
<p>默认: ‘utf-8’</p>
<p>logging使用的编码。</p>
</li>
<li><p>LOG_FILE</p>
<p>默认: None</p>
<p>logging输出的文件名。如果为None，则使用标准错误输出(standard error)。</p>
</li>
<li><p>LOG_FORMAT</p>
<p>默认: ‘%(asctime)s [%(name)s] %(levelname)s: %(message)s’</p>
<p>日志的数据格式</p>
</li>
<li><p>LOG_DATEFORMAT</p>
<p>默认: ‘%Y-%m-%d %H:%M:%S’</p>
<p>日志的日期格式</p>
</li>
<li><p>LOG_LEVEL</p>
<p>默认: ‘DEBUG’</p>
<p>log的最低级别。可选的级别有: CRITICAL、 ERROR、WARNING、INFO、DEBUG</p>
</li>
<li><p>LOG_STDOUT</p>
<p>默认: False</p>
<p>如果为 True ，进程所有的标准输出(及错误)将会被重定向到log中</p>
</li>
<li><p>RANDOMIZE_DOWNLOAD_DELAY</p>
<p>默认: True</p>
<p>如果启用，当从相同的网站获取数据时，Scrapy将会等待一个随机的值 (0.5到1.5之间的一个随机值 * DOWNLOAD_DELAY)</p>
<p>该随机值降低了crawler被检测到(接着被block)的机会。某些网站会分析请求， 查找请求之间时间的相似性</p>
</li>
<li><p>REDIRECT_MAX_TIMES</p>
<p>默认: 20</p>
<p>定义request允许重定向的最大次数。超过该限制后该request直接返回获取到的结果。 对某些任务我们使用Firefox默认值</p>
</li>
<li><p>ROBOTSTXT_OBEY</p>
<p>默认: False</p>
<p>是否遵循robots协议</p>
</li>
<li><p>SCHEDULER<br>默认: ‘scrapy.core.scheduler.Scheduler’</p>
<p>用于爬取的调度器</p>
</li>
<li><p>SPIDER_MIDDLEWARES</p>
<p>默认: {}</p>
<p>保存项目中启用的下载中间件及其顺序的字典</p>
</li>
<li><p>USER_AGENT</p>
<p>默认: “Scrapy/VERSION (+<a href="http://scrapy.org)&quot;" target="_blank" rel="noopener">http://scrapy.org)&quot;</a></p>
<p>爬取的默认User-Agent，除非被覆盖</p>
</li>
</ul>
<h3 id="Scrapy默认BASE设置"><a href="#Scrapy默认BASE设置" class="headerlink" title="Scrapy默认BASE设置"></a>Scrapy默认BASE设置</h3><blockquote>
<p>scrapy对某些内部组件进行了默认设置，这些组件通常情况下是不能被修改的，但是我们在自定义了某些组件以后，比如我们设置了自定义的middleware中间件，需要按照一定的顺序把他添加到组件之中，这个时候需要参考scrapy的默认设置，因为这个顺序会影响scrapy的执行，下面列出了scrapy的默认基础设置</p>
</blockquote>
<p>注意：如果你想要修改以下的某些设置，应该避免直接修改下列内容，而是修改其对应的自定义内容，例如，你想修改下面的<code>DOWNLOADER_MIDDLEWARES_BASE</code>的内容，你应该去修改<code>DOWNLOADER_MIDDLEWARES</code>这个内容，只是去掉了_BASE而已，其他的也是类似这样</p>
<ul>
<li>DOWNLOADER_MIDDLEWARES_BASE</li>
</ul>
<p>默认:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line">&#123;</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware&#39;: 100,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware&#39;: 300,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware&#39;: 350,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.useragent.UserAgentMiddleware&#39;: 400,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.retry.RetryMiddleware&#39;: 500,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware&#39;: 550,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware&#39;: 580,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware&#39;: 590,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.redirect.RedirectMiddleware&#39;: 600,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.cookies.CookiesMiddleware&#39;: 700,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware&#39;: 750,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware&#39;: 830,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.stats.DownloaderStats&#39;: 850,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware&#39;: 900,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>包含Scrapy默认启用的下载中间件的字典。 永远不要在项目中修改该设定，而是修改 DOWNLOADER_MIDDLEWARES 。</p>
<ul>
<li>SPIDER_MIDDLEWARES_BASE</li>
</ul>
<p>默认:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">&#123;</span><br><span class="line">    &#39;scrapy.spidermiddlewares.httperror.HttpErrorMiddleware&#39;: 50,</span><br><span class="line">    &#39;scrapy.spidermiddlewares.offsite.OffsiteMiddleware&#39;: 500,</span><br><span class="line">    &#39;scrapy.spidermiddlewares.referer.RefererMiddleware&#39;: 700,</span><br><span class="line">    &#39;scrapy.spidermiddlewares.urllength.UrlLengthMiddleware&#39;: 800,</span><br><span class="line">    &#39;scrapy.spidermiddlewares.depth.DepthMiddleware&#39;: 900,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>保存项目中默认启用的spider中间件的字典。 永远不要在项目中修改该设定，而是修改 SPIDER_MIDDLEWARES 。<br>EXTENSIONS_BASE</p>
<p>默认:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line">&#123;</span><br><span class="line">    &#39;scrapy.extensions.corestats.CoreStats&#39;: 0,</span><br><span class="line">    &#39;scrapy.telnet.TelnetConsole&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.memusage.MemoryUsage&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.memdebug.MemoryDebugger&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.closespider.CloseSpider&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.feedexport.FeedExporter&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.logstats.LogStats&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.spiderstate.SpiderState&#39;: 0,</span><br><span class="line">    &#39;scrapy.extensions.throttle.AutoThrottle&#39;: 0,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>可用的插件列表。需要注意，有些插件需要通过设定来启用。默认情况下， 该设定包含所有稳定(stable)的内置插件。</p>
<ul>
<li>DOWNLOAD_HANDLERS_BASE</li>
</ul>
<p>默认:</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">&#123;</span><br><span class="line">    &#39;file&#39;: &#39;scrapy.core.downloader.handlers.file.FileDownloadHandler&#39;,</span><br><span class="line">    &#39;http&#39;: &#39;scrapy.core.downloader.handlers.http.HttpDownloadHandler&#39;,</span><br><span class="line">    &#39;https&#39;: &#39;scrapy.core.downloader.handlers.http.HttpDownloadHandler&#39;,</span><br><span class="line">    &#39;s3&#39;: &#39;scrapy.core.downloader.handlers.s3.S3DownloadHandler&#39;,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>保存项目中默认启用的下载处理器(request downloader handler)的字典。 永远不要在项目中修改该设定，而是修改 DOWNLOADER_HANDLERS 。</p>
<p>如果需要关闭上面的下载处理器，您必须在项目中的 DOWNLOAD_HANDLERS 设定中设置该处理器，并为其赋值为 None 。</p>
<p><strong>说明</strong></p>
<p>即使我们添加了一些我们自定义的组件，scrapy默认的base设置依然会被应用，这样说可能会一头雾水，简单地例子：</p>
<p>假如我们在middlewares.py文件中定义了一个中间件，名称为MyMiddleware，我们把它添加到settings.py文件里面的<code>DOWNLOADER_MIDDLEWARES</code>，且他的执行顺序我们设置为450，最终的设置内容就是：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">DOWNLOADER_MIDDLEWARES &#x3D; &#123;</span><br><span class="line">    &#39;cnblog.middlewares.MyMiddleware&#39;:450,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>我们再来看一下默认的<code>DOWNLOADER_MIDDLEWARES_BASE</code>的内容：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line">DOWNLOADER_MIDDLEWARES_BASE &#x3D;&#123;</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware&#39;: 100,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware&#39;: 300,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware&#39;: 350,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.useragent.UserAgentMiddleware&#39;: 400,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.retry.RetryMiddleware&#39;: 500,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware&#39;: 550,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware&#39;: 580,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware&#39;: 590,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.redirect.RedirectMiddleware&#39;: 600,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.cookies.CookiesMiddleware&#39;: 700,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware&#39;: 750,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware&#39;: 830,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.stats.DownloaderStats&#39;: 850,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware&#39;: 900,</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>这个时候，scrapy下载中间件的最终的执行顺序就是，把<code>DOWNLOADER_MIDDLEWARES</code>和<code>DOWNLOADER_MIDDLEWARES_BASE</code>里面的中间件按照顺序执行，<code>100&gt;300&gt;350&gt;400&gt;450&gt;500&gt;550&gt;580&gt;590&gt;600&gt;700&gt;750&gt;830&gt;850&gt;900</code>且全部执行，并不会因为我们定义了一个中间件，而使默认的中间件失效，也就是说，最终的结果其实是合并执行。</p>
<p>如果我们不想应用某一个默认的中间件，假如<code>&#39;scrapy.downloadermiddlewares.retry.RetryMiddleware&#39;: 500,</code>那么，就应该在<code>DOWNLOADER_MIDDLEWARES</code>里面把它的值设置为None，像下面这样：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">DOWNLOADER_MIDDLEWARES &#x3D; &#123;</span><br><span class="line">    &#39;cnblog.middlewares.MyMiddleware&#39;:450,</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.retry.RetryMiddleware&#39;:None，</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<h2 id="7-Spider-下载中间件-Middleware"><a href="#7-Spider-下载中间件-Middleware" class="headerlink" title="7. Spider 下载中间件(Middleware)"></a>7. Spider 下载中间件(Middleware)</h2><p>Spider 中间件(Middleware) 下载器中间件是介入到 Scrapy 的 spider 处理机制的钩子框架，您可以添加代码来处理发送给 Spiders 的 response 及 spider 产生的 item 和 request</p>
<h3 id="7-1-激活一个下载DOWNLOADER-MIDDLEWARES"><a href="#7-1-激活一个下载DOWNLOADER-MIDDLEWARES" class="headerlink" title="7.1. 激活一个下载DOWNLOADER_MIDDLEWARES"></a>7.1. 激活一个下载DOWNLOADER_MIDDLEWARES</h3><p>要激活一个下载器中间件组件，将其添加到 <code>DOWNLOADER_MIDDLEWARES</code>设置中，该设置是一个字典，其键是中间件类路径，它们的值是中间件命令</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">DOWNLOADER_MIDDLEWARES  &#x3D;  &#123; </span><br><span class="line">    &#39;myproject.middlewares.CustomDownloaderMiddleware&#39; ： 543 ，</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>该<code>DOWNLOADER_MIDDLEWARES</code>设置与<code>DOWNLOADER_MIDDLEWARES_BASEScrapy</code>中定义的设置（并不意味着被覆盖）合并， 然后按顺序排序，以获得最终的已启用中间件的排序列表：第一个中间件是靠近引擎的第一个中间件，最后一个是靠近引擎的中间件到下载器。换句话说，<code>process_request()</code> 每个中间件的方法将以增加中间件的顺序（100,200,300，…）<code>process_response()</code>被调用，并且每个中间件的方法将以降序调用</p>
<p>要决定分配给中间件的顺序，请参阅 <code>DOWNLOADER_MIDDLEWARES_BASE</code>设置并根据要插入中间件的位置选择一个值。顺序很重要，因为每个中间件都执行不同的操作，而您的中间件可能依赖于之前（或后续）正在使用的中间件</p>
<p>如果要禁用内置中间件（<code>DOWNLOADER_MIDDLEWARES_BASE</code>默认情况下已定义和启用的中间件 ），则必须在项目<code>DOWNLOADER_MIDDLEWARES</code>设置中定义它，并将“ 无” 作为其值。例如，如果您要禁用用户代理中间件</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">DOWNLOADER_MIDDLEWARES  &#x3D;  &#123; </span><br><span class="line">    &#39;myproject.middlewares.CustomDownloaderMiddleware&#39; ： 543 ，</span><br><span class="line">    &#39;scrapy.downloadermiddlewares.useragent.UserAgentMiddleware&#39; ： None ，</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>最后，请记住，某些中间件可能需要通过特定设置启用</p>
<h3 id="7-2-编写你自己的下载中间件"><a href="#7-2-编写你自己的下载中间件" class="headerlink" title="7.2. 编写你自己的下载中间件"></a>7.2. 编写你自己的下载中间件</h3><p>每个中间件组件都是一个Python类，它定义了一个或多个以下方法</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">class scrapy.downloadermiddlewares.DownloaderMiddleware</span><br></pre></td></tr></table></figure>

<blockquote>
<p>任何下载器中间件方法也可能返回一个延迟</p>
</blockquote>
<h4 id="7-2-1-process-request-self-request-spider"><a href="#7-2-1-process-request-self-request-spider" class="headerlink" title="7.2.1 process_request(self, request, spider)"></a>7.2.1 process_request(self, request, spider)</h4><blockquote>
<p>当每个request通过下载中间件时，该方法被调用</p>
</blockquote>
<p>process_request()必须返回其中之一</p>
<ul>
<li>返回 None<ul>
<li>Scrapy 将继续处理该 request，执行其他的中间件的相应方法，直到合适的下载器处理函数(download handler)被调用，该 request 被执行(其 response 被下载)</li>
</ul>
</li>
<li>返回一个 Response 对象<ul>
<li>Scrapy 将不会调用 任何 其他的 process_request()或 process_exception()方法，或相应地下载函数； 其将返回该 response。已安装的中间件的 process_response()方法则会在每个 response 返回时被调用</li>
</ul>
</li>
<li>返回一个 Request 对象<ul>
<li>Scrapy 则停止调用 process_request 方法并重新调度返回的 request。当新返回的 request 被执行后， 相应地中间件链将会根据下载的 response 被调用</li>
</ul>
</li>
<li>raise IgnoreRequest<ul>
<li>如果抛出 一个 IgnoreRequest 异常，则安装的下载中间件的 process_exception() 方法会被调用。如果没有任何一个方法处理该异常， 则 request 的 errback(Request.errback)方法会被调用。如果没有代码处理抛出的异常， 则该异常被忽略且不记录(不同于其他异常那样)</li>
</ul>
</li>
</ul>
<p>参数:</p>
<ul>
<li>request (Request 对象) – 处理的request</li>
<li>spider (Spider 对象) – 该request对应的spider</li>
</ul>
<h4 id="7-2-2-process-response-self-request-response-spider"><a href="#7-2-2-process-response-self-request-response-spider" class="headerlink" title="7.2.2 process_response(self, request, response, spider)"></a>7.2.2 process_response(self, request, response, spider)</h4><blockquote>
<p>当下载器完成http请求，传递响应给引擎的时候调用</p>
</blockquote>
<ul>
<li><p>process_request() 必须返回以下其中之一: 返回一个 Response 对象、 返回一个 Request 对象或raise一个 IgnoreRequest 异常</p>
<ul>
<li><p>如果其返回一个 Response (可以与传入的response相同，也可以是全新的对象)， 该response会被在链中的其他中间件的 process_response() 方法处理。</p>
</li>
<li><p>如果其返回一个 Request 对象，则中间件链停止， 返回的request会被重新调度下载。处理类似于 process_request() 返回request所做的那样。</p>
</li>
<li><p>如果其抛出一个 IgnoreRequest 异常，则调用request的errback(Request.errback)。 如果没有代码处理抛出的异常，则该异常被忽略且不记录(不同于其他异常那样)。</p>
</li>
</ul>
</li>
<li><p>参数:</p>
<ul>
<li>request (Request 对象) – response所对应的request</li>
<li>response (Response 对象) – 被处理的response</li>
<li>spider (Spider 对象) – response所对应的spider</li>
</ul>
</li>
</ul>
<h3 id="7-3-使用代理"><a href="#7-3-使用代理" class="headerlink" title="7.3. 使用代理"></a>7.3. 使用代理</h3><p>settings.py</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">PROXIES&#x3D;[</span><br><span class="line">    &#123;&quot;ip&quot;:&quot;122.236.158.78:8118&quot;&#125;,</span><br><span class="line">    &#123;&quot;ip&quot;:&quot;112.245.78.90:8118&quot;&#125;</span><br><span class="line">]</span><br><span class="line">DOWNLOADER_MIDDLEWARES &#x3D; &#123;</span><br><span class="line">    #&#39;xiaoshuo.middlewares.XiaoshuoDownloaderMiddleware&#39;: 543,</span><br><span class="line">    &#39;xiaoshuo.proxyMidde.ProxyMidde&#39;:100</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<p>创建一个midderwares</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">from xiaoshuo.settings import PROXIES</span><br><span class="line">import random</span><br><span class="line">class ProxyMidde(object):</span><br><span class="line">    def process_request(self, request, spider):</span><br><span class="line">            proxy &#x3D; random.choice(PROXIES)</span><br><span class="line">            request.meta[&#39;proxy&#39;]&#x3D;&#39;http:&#x2F;&#x2F;&#39;+proxy[&#39;ip&#39;]</span><br></pre></td></tr></table></figure>

<p>写一个spider测试</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">from scrapy import Spider</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">class ProxyIp(Spider):</span><br><span class="line">    name &#x3D; &#39;ip&#39;</span><br><span class="line">    #http:&#x2F;&#x2F;www.882667.com&#x2F;</span><br><span class="line">    start_urls &#x3D; [&#39;http:&#x2F;&#x2F;ip.cn&#39;]</span><br><span class="line"></span><br><span class="line">    def parse(self, response):</span><br><span class="line">        print(response.text)</span><br></pre></td></tr></table></figure>

<h3 id="7-4-使用动态UA"><a href="#7-4-使用动态UA" class="headerlink" title="7.4. 使用动态UA"></a>7.4. 使用动态UA</h3><figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"># 随机的User-Agent</span><br><span class="line">class RandomUserAgent(object):</span><br><span class="line">    def process_request(self, request, spider):</span><br><span class="line">        useragent &#x3D; random.choice(USER_AGENTS)</span><br><span class="line"></span><br><span class="line">        request.headers.setdefault(&quot;User-Agent&quot;, useragent)</span><br></pre></td></tr></table></figure>

<h2 id="8-Scrapy-Request和Response（请求和响应）"><a href="#8-Scrapy-Request和Response（请求和响应）" class="headerlink" title="8. Scrapy-Request和Response（请求和响应）"></a>8. Scrapy-Request和Response（请求和响应）</h2><p> Scrapy的Request和Response对象用于爬网网站。</p>
<p> 通常，Request对象在爬虫程序中生成并传递到系统，直到它们到达下载程序，后者执行请求并返回一个Response对象，该对象返回到发出请求的爬虫程序。</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">sequenceDiagram</span><br><span class="line">爬虫-&gt;&gt;Request: 创建</span><br><span class="line">Request-&gt;&gt;Response:获取下载数据</span><br><span class="line">Response-&gt;&gt;爬虫:数据</span><br></pre></td></tr></table></figure>

<h3 id="8-1-Request对象"><a href="#8-1-Request对象" class="headerlink" title="8.1. Request对象"></a>8.1. Request对象</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">scrapy</span>.<span class="title">http</span>.<span class="title">Request</span><span class="params">(url[, callback, method=<span class="string">'GET'</span>, headers, body, cookies, meta, encoding=<span class="string">'utf-8'</span>, priority=<span class="number">0</span>, dont_filter=False, errback])</span></span></span><br></pre></td></tr></table></figure>

<p>一个Request对象表示一个HTTP请求，它通常是在爬虫生成，并由下载执行，从而生成Response</p>
<ul>
<li><p>参数</p>
<ul>
<li><p>url（string） - 此请求的网址</p>
</li>
<li><p>callback（callable） - 将使用此请求的响应（一旦下载）作为其第一个参数调用的函数。有关更多信息，请参阅下面的将附加数据传递给回调函数。如果请求没有指定回调，parse()将使用spider的 方法。请注意，如果在处理期间引发异常，则会调用errback。</p>
</li>
<li><p>method（string） - 此请求的HTTP方法。默认为’GET’。可设置为”GET”, “POST”, “PUT”等，且保证字符串大写</p>
</li>
<li><p>meta（dict） - 属性的初始值Request.meta,在不同的请求之间传递数据使用</p>
</li>
<li><p>body（str或unicode） - 请求体。如果unicode传递了a，那么它被编码为 str使用传递的编码（默认为utf-8）。如果 body没有给出，则存储一个空字符串。不管这个参数的类型，存储的最终值将是一个str（不会是unicode或None）。</p>
</li>
<li><p>headers（dict） - 这个请求的头。dict值可以是字符串（对于单值标头）或列表（对于多值标头）。如果 None作为值传递，则不会发送HTTP头.一般不需要</p>
</li>
<li><p>encoding: 使用默认的 ‘utf-8’ 就行。</p>
</li>
<li><p>cookie（dict或list） - 请求cookie。这些可以以两种形式发送。</p>
<ul>
<li>使用dict：</li>
</ul>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">request_with_cookies = Request(url=<span class="string">"http://xxx/login.html"</span>,)</span><br></pre></td></tr></table></figure>
</li>
<li><p>使用列表：</p>
</li>
</ul>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">request_with_cookies = Request(url=<span class="string">"http://www.example.com"</span>,</span><br><span class="line">                             cookies=[&#123;<span class="string">'name'</span>: <span class="string">'currency'</span>,</span><br><span class="line">                                      <span class="string">'value'</span>: <span class="string">'USD'</span>,</span><br><span class="line">                                      <span class="string">'domain'</span>: <span class="string">'example.com'</span>,</span><br><span class="line">                                      <span class="string">'path'</span>: <span class="string">'/currency'</span>&#125;])</span><br></pre></td></tr></table></figure>

<p>后一种形式允许定制 cookie的属性domain和path属性。这只有在保存Cookie用于以后的请求时才有用</p>
</li>
</ul>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">request_with_cookies = Request(url=<span class="string">"http://www.example.com"</span>,</span><br><span class="line">                               cookies=&#123;<span class="string">'currency'</span>: <span class="string">'USD'</span>, <span class="string">'country'</span>: <span class="string">'UY'</span>&#125;,</span><br><span class="line">                               meta=&#123;<span class="string">'dont_merge_cookies'</span>: <span class="literal">True</span>&#125;)</span><br></pre></td></tr></table></figure>

<h4 id="将附加数据传递给回调函数"><a href="#将附加数据传递给回调函数" class="headerlink" title="将附加数据传递给回调函数"></a>将附加数据传递给回调函数</h4><p>请求的回调是当下载该请求的响应时将被调用的函数。将使用下载的Response对象作为其第一个参数来调用回调函数</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br></pre></td><td class="code"><pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse_page1</span><span class="params">(self, response)</span>:</span></span><br><span class="line">    item = MyItem()</span><br><span class="line">    item[<span class="string">'main_url'</span>] = response.url</span><br><span class="line">    request = scrapy.Request(<span class="string">"http://www.example.com/some_page.html"</span>,</span><br><span class="line">                             callback=self.parse_page2)</span><br><span class="line">    request.meta[<span class="string">'item'</span>] = item</span><br><span class="line">    <span class="keyword">return</span> request</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse_page2</span><span class="params">(self, response)</span>:</span></span><br><span class="line">    item = response.meta[<span class="string">'item'</span>]</span><br><span class="line">    item[<span class="string">'other_url'</span>] = response.url</span><br><span class="line">    <span class="keyword">return</span> item</span><br></pre></td></tr></table></figure>

<h3 id="8-2-请求子类-FormRequest对象"><a href="#8-2-请求子类-FormRequest对象" class="headerlink" title="8.2. 请求子类 FormRequest对象"></a>8.2. 请求子类 FormRequest对象</h3><p>FormRequest类扩展了Request具有处理HTML表单的功能的基础。它使用lxml.html表单 从Response对象的表单数据预填充表单字段</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">scrapy</span>.<span class="title">http</span>.<span class="title">FormRequest</span><span class="params">(url[, formdata, ...])</span></span></span><br></pre></td></tr></table></figure>

<p>本FormRequest类增加了新的构造函数的参数。其余的参数与Request类相同，这里没有记录</p>
<ul>
<li>参数：formdata（元组的dict或iterable） - 是一个包含HTML Form数据的字典（或（key，value）元组的迭代），它将被url编码并分配给请求的主体。</li>
</ul>
<p>该FormRequest对象支持除标准以下类方法Request的方法：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">classmethod from_response(response[, formname=<span class="literal">None</span>, formid=<span class="literal">None</span>, formnumber=<span class="number">0</span>, formdata=<span class="literal">None</span>, formxpath=<span class="literal">None</span>, formcss=<span class="literal">None</span>, clickdata=<span class="literal">None</span>, dont_click=<span class="literal">False</span>, ...])</span><br></pre></td></tr></table></figure>

<p>返回一个新FormRequest对象，其中的表单字段值已预先<code>&lt;form&gt;</code>填充在给定响应中包含的HTML 元素中.</p>
<p>参数：</p>
<ul>
<li>response（Responseobject） - 包含将用于预填充表单字段的HTML表单的响应</li>
<li>formname（string） - 如果给定，将使用name属性设置为此值的形式</li>
<li>formid（string） - 如果给定，将使用id属性设置为此值的形式</li>
<li>formxpath（string） - 如果给定，将使用匹配xpath的第一个表单</li>
<li>formcss（string） - 如果给定，将使用匹配css选择器的第一个形式</li>
<li>formnumber（integer） - 当响应包含多个表单时要使用的表单的数量。第一个（也是默认）是0</li>
<li>formdata（dict） - 要在表单数据中覆盖的字段。如果响应元素中已存在字段，则其值将被在此参数中传递的值覆盖</li>
<li>clickdata（dict） - 查找控件被点击的属性。如果没有提供，表单数据将被提交，模拟第一个可点击元素的点击。除了html属性，控件可以通过其相对于表单中其他提交表输入的基于零的索引，通过nr属性来标识</li>
<li>dont_click（boolean） - 如果为True，表单数据将在不点击任何元素的情况下提交</li>
</ul>
<h4 id="8-2-1-请求使用示例"><a href="#8-2-1-请求使用示例" class="headerlink" title="8.2.1 请求使用示例"></a>8.2.1 请求使用示例</h4><p>使用FormRequest通过HTTP POST发送数据</p>
<p>如果你想在你的爬虫中模拟HTML表单POST并发送几个键值字段，你可以返回一个FormRequest对象（从你的爬虫）像这样：</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">return</span> [FormRequest(url=<span class="string">"http://www.example.com/post/action"</span>,</span><br><span class="line">                    formdata=&#123;<span class="string">'name'</span>: <span class="string">'John Doe'</span>, <span class="string">'age'</span>: <span class="string">'27'</span>&#125;,</span><br><span class="line">                    callback=self.after_post)]</span><br></pre></td></tr></table></figure>

<p>使用FormRequest.from_response（）来模拟用户登录</p>
<p>网站通常通过元素（例如会话相关数据或认证令牌（用于登录页面））提供预填充的表单字段。进行剪贴时，您需要自动预填充这些字段，并且只覆盖其中的一些，例如用户名和密码。您可以使用 此作业的方法。这里有一个使用它的爬虫示例：</p>
<figure class="highlight plain"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">&lt;input type&#x3D;&quot;hidden&quot;&gt; FormRequest.from_response()</span><br></pre></td></tr></table></figure>

<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> scrapy</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">LoginSpider</span><span class="params">(scrapy.Spider)</span>:</span></span><br><span class="line">    name = <span class="string">'example.com'</span></span><br><span class="line">    start_urls = [<span class="string">'http://www.example.com/users/login.php'</span>]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        <span class="keyword">return</span> scrapy.FormRequest.from_response(</span><br><span class="line">            response,</span><br><span class="line">            formdata=&#123;<span class="string">'username'</span>: <span class="string">'john'</span>, <span class="string">'password'</span>: <span class="string">'secret'</span>&#125;,</span><br><span class="line">            callback=self.after_login</span><br><span class="line">        )</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">after_login</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        <span class="comment"># check login succeed before going on</span></span><br><span class="line">        <span class="keyword">if</span> <span class="string">"authentication failed"</span> <span class="keyword">in</span> response.body:</span><br><span class="line">            self.logger.error(<span class="string">"Login failed"</span>)</span><br><span class="line">            <span class="keyword">return</span></span><br><span class="line"></span><br><span class="line">        <span class="comment"># continue scraping with authenticated session...</span></span><br></pre></td></tr></table></figure>

<h3 id="8-3-响应对象"><a href="#8-3-响应对象" class="headerlink" title="8.3. 响应对象"></a>8.3. 响应对象</h3><figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">scrapy</span>.<span class="title">http</span>.<span class="title">Response</span><span class="params">(url[, status=<span class="number">200</span>, headers=None, body=<span class="string">b''</span>, flags=None, request=None])</span></span></span><br></pre></td></tr></table></figure>

<p>一个Response对象表示的HTTP响应，这通常是下载（由下载），并供给到爬虫进行处理</p>
<p>参数：</p>
<ul>
<li>url（string） - 此响应的URL</li>
<li>status（integer） - 响应的HTTP状态。默认为200</li>
<li>headers（dict） - 这个响应的头。dict值可以是字符串（对于单值标头）或列表（对于多值标头）</li>
<li>body（str） - 响应体。它必须是str，而不是unicode，除非你使用一个编码感知响应子类，如 TextResponse</li>
<li>flags（list） - 是一个包含属性初始值的 Response.flags列表。如果给定，列表将被浅复制</li>
<li>request（Requestobject） - 属性的初始值Response.request。这代表Request生成此响应</li>
</ul>
<h3 id="8-4-模拟登录"><a href="#8-4-模拟登录" class="headerlink" title="8.4. 模拟登录"></a>8.4. 模拟登录</h3><p><strong>用的函数：</strong></p>
<ul>
<li><p>start_requests()可以返回一个请求给爬虫的起始网站，这个返回的请求相当于start_urls，start_requests()返回的请求会替代start_urls里的请求</p>
</li>
<li><p>Request()get请求，可以设置，url、cookie、回调函数</p>
</li>
<li><p>FormRequest.from_response()表单post提交，第一个必须参数，上一次响应cookie的response对象，其他参数，cookie、url、表单内容等</p>
</li>
<li><p>yield Request()可以将一个新的请求返回给爬虫执行</p>
</li>
</ul>
<p><strong>在发送请求时cookie的操作，</strong></p>
<ul>
<li>meta={‘cookiejar’:1}表示开启cookie记录，首次请求时写在Request()里</li>
<li>meta={‘cookiejar’:response.meta[‘cookiejar’]}表示使用上一次response的cookie，写在FormRequest.from_response()里post授权</li>
<li>meta={‘cookiejar’:True}表示使用授权后的cookie访问需要登录查看的页面</li>
</ul>
<p><strong>获取Scrapy框架Cookies</strong></p>
<p><strong>样例代码</strong></p>
<p><code>start_requests()</code>方法，可以返回一个请求给爬虫的起始网站，这个返回的请求相当于start_urls，start_requests()返回的请求会替代start_urls里的请求</p>
<p>在发送请求时cookie的操作</p>
<p><code>meta={&#39;cookiejar&#39;:1}</code>表示开启cookie记录，首次请求时写在Request()里</p>
<p><code>meta={&#39;cookiejar&#39;:response.meta[&#39;cookiejar&#39;]}</code>表示使用上一次response的cookie，写在Request里post授权</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">import</span> scrapy</span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">SxtSpiderSpider</span><span class="params">(scrapy.Spider)</span>:</span></span><br><span class="line">    name = <span class="string">'demo'</span></span><br><span class="line">    allowed_domains = [<span class="string">'demo'</span>]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">start_requests</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="keyword">return</span> [Request(<span class="string">'http://www.xxx/login.html'</span>, meta=&#123;<span class="string">'cookiejar'</span>: <span class="number">1</span>&#125;, callback=self.parse)]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        formdata = &#123;</span><br><span class="line">            <span class="string">"user"</span>: <span class="string">"user"</span>, <span class="string">"password"</span>: <span class="string">"password"</span></span><br><span class="line">        &#125;</span><br><span class="line">        <span class="keyword">return</span> FormRequest(formdata=formdata,</span><br><span class="line">                           url=<span class="string">'http://www.xzxx/login.html'</span>,</span><br><span class="line">                           meta=&#123;<span class="string">'cookiejar'</span>: response.meta[<span class="string">'cookiejar'</span>]&#125;,</span><br><span class="line">                           callback=self.login_after)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">login_after</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        <span class="keyword">yield</span> scrapy.Request(<span class="string">'http://www.xxxx/user.html'</span>,</span><br><span class="line">                             meta=&#123;<span class="string">"cookiejar"</span>: response.meta[<span class="string">'cookiejar'</span>]&#125;,</span><br><span class="line">                             callback=self.next)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">next</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        print(response.text)</span><br></pre></td></tr></table></figure>
    </div>

    
    
    

    
    <div>    
      
        <ul class="post-copyright">
          <li class="post-copyright-link">
          <strong>本文作者：</strong>
          <a href="/" title="欢迎访问 Chenyumeng 的个人博客">Chenyumeng</a>
          </li>

          <li class="post-copyright-link">
            <strong>本文标题：</strong>
            <a href="https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/" title="Python爬虫学习记录">Python爬虫学习记录</a>
          </li>

          <li class="post-copyright-link">
            <strong>本文链接：</strong>
            <a href="https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/" title="Python爬虫学习记录">https://ipvb.gitee.io/blog/2020/07/04/Python%E7%88%AC%E8%99%AB%E5%AD%A6%E4%B9%A0%E8%AE%B0%E5%BD%95/</a>
          </li>

          <li class="post-copyright-date">
            <strong>发布时间： </strong>2020年7月4日 - 16时07分
          </li>  

          <li class="post-copyright-license">
            <strong>版权声明： </strong>
            本文由 Chenyumeng 原创。
          </li>
        </ul>
      
    </div>
      

        <div class="reward-container">
  <div></div>
  <button onclick="var qr = document.getElementById('qr'); qr.style.display = (qr.style.display === 'none') ? 'block' : 'none';">
    打赏
  </button>
  <div id="qr" style="display: none;">
      
      <div style="display: inline-block;">
        <img src="/blog/images/wechatpay.png" alt="Chenyumeng 微信支付">
        <p>微信支付</p>
      </div>
      
      <div style="display: inline-block;">
        <img src="/blog/images/alipay.png" alt="Chenyumeng 支付宝">
        <p>支付宝</p>
      </div>

  </div>
</div>


      <div>
        
          <div>
    
        <div style="text-align:center;color: #ccc;font-size:14px;">-------------　　　　本文结束　<i class="fa fa-heart"></i>　感谢您的阅读　　　　-------------</div>
    
</div>
        
      </div>

      <footer class="post-footer">
          
          <div class="post-tags">
            
              <a href="/blog/" rel="tag"><i class="fa fa-tag"></i> </a>
            
          </div>

        


        
    <div class="post-nav">
      <div class="post-nav-item">
    <a href="/blog/2020/05/23/Java%20NIO%20Selector/" rel="prev" title="Java NIO Selector">
      <i class="fa fa-chevron-left"></i> Java NIO Selector
    </a></div>
      <div class="post-nav-item"></div>
    </div>
      </footer>
    
  </article>
  
  
  

    <!-- 引入share.js -->
    
      <div data-weibo-title="分享到微博" data-qq-title="分享到QQ" data-douban-title="分享到豆瓣" class="social-share" class="share-component" data-disabled="twitter,facebook" data-description="Share.js - 一键分享到微博，QQ空间，腾讯微博，人人，豆瓣">分享到：</div>
    
  </div>


          </div>
          
    
  <div class="comments">
    <div id="lv-container" data-id="city" data-uid="MTAyMC80OTExNy8yNTYxMg=="></div>
  </div>
  

<script>
  window.addEventListener('tabs:register', () => {
    let { activeClass } = CONFIG.comments;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      let commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>

        </div>
          
  
  <div class="toggle sidebar-toggle">
    <span class="toggle-line toggle-line-first"></span>
    <span class="toggle-line toggle-line-middle"></span>
    <span class="toggle-line toggle-line-last"></span>
  </div>

  <aside class="sidebar">
    <div class="sidebar-inner">

      <ul class="sidebar-nav motion-element">
        <li class="sidebar-nav-toc">
          文章目录
        </li>
        <li class="sidebar-nav-overview">
          站点概览
        </li>
      </ul>

      <!--noindex-->
      <div class="post-toc-wrap sidebar-panel">
          <div class="post-toc motion-element"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#一、Python爬虫介绍"><span class="nav-number">1.</span> <span class="nav-text">一、Python爬虫介绍</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-什么是爬虫？"><span class="nav-number">1.1.</span> <span class="nav-text">1. 什么是爬虫？</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-有什么作用？"><span class="nav-number">1.2.</span> <span class="nav-text">2. 有什么作用？</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-合法性"><span class="nav-number">1.3.</span> <span class="nav-text">3. 合法性</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-爬虫基本套路"><span class="nav-number">1.4.</span> <span class="nav-text">4. 爬虫基本套路</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-python爬虫"><span class="nav-number">1.5.</span> <span class="nav-text">5. python爬虫</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#二、工具的使用"><span class="nav-number">2.</span> <span class="nav-text">二、工具的使用</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-常用的工具"><span class="nav-number">2.1.</span> <span class="nav-text">1. 常用的工具</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#三、爬取数据-urllib库"><span class="nav-number">3.</span> <span class="nav-text">三、爬取数据-urllib库</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-快速入门"><span class="nav-number">3.1.</span> <span class="nav-text">1. 快速入门</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-urllib库常用方法"><span class="nav-number">3.2.</span> <span class="nav-text">2. urllib库常用方法</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-Request对象"><span class="nav-number">3.3.</span> <span class="nav-text">3. Request对象</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-Get-请求"><span class="nav-number">3.4.</span> <span class="nav-text">4. Get 请求</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-Post-请求"><span class="nav-number">3.5.</span> <span class="nav-text">5. Post 请求</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-响应的编码"><span class="nav-number">3.6.</span> <span class="nav-text">6. 响应的编码</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#7-Ajax的请求获取数据"><span class="nav-number">3.7.</span> <span class="nav-text">7. Ajax的请求获取数据</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#8-请求-SSL证书验证"><span class="nav-number">3.8.</span> <span class="nav-text">8. 请求 SSL证书验证</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#9-伪装自己"><span class="nav-number">3.9.</span> <span class="nav-text">9. 伪装自己</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#9-1-设置请求头"><span class="nav-number">3.9.1.</span> <span class="nav-text">9.1. 设置请求头</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#9-2-设置代理Proxy"><span class="nav-number">3.9.2.</span> <span class="nav-text">9.2. 设置代理Proxy</span></a></li></ol></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#四、Cookie和URLError"><span class="nav-number">4.</span> <span class="nav-text">四、Cookie和URLError</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-Cookie"><span class="nav-number">4.1.</span> <span class="nav-text">1. Cookie</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#1-1-Opener"><span class="nav-number">4.1.1.</span> <span class="nav-text">1.1. Opener</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-2-Cookielib"><span class="nav-number">4.1.2.</span> <span class="nav-text">1.2. Cookielib</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-URLError"><span class="nav-number">4.2.</span> <span class="nav-text">2. URLError</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#五、数据爬取-高级-requests库"><span class="nav-number">5.</span> <span class="nav-text">五、数据爬取(高级)-requests库</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-介绍"><span class="nav-number">5.1.</span> <span class="nav-text">1. 介绍</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-安装"><span class="nav-number">5.2.</span> <span class="nav-text">2. 安装</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-基本请求"><span class="nav-number">5.3.</span> <span class="nav-text">3. 基本请求</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-获取响应信息"><span class="nav-number">5.4.</span> <span class="nav-text">4. 获取响应信息</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#六、数据解析-正则"><span class="nav-number">6.</span> <span class="nav-text">六、数据解析-正则</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-提取数据"><span class="nav-number">6.1.</span> <span class="nav-text">1. 提取数据</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-正则表达式相关注解"><span class="nav-number">6.2.</span> <span class="nav-text">2. 正则表达式相关注解</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#2-1-数量词的贪婪模式与非贪婪模式"><span class="nav-number">6.2.1.</span> <span class="nav-text">2.1. 数量词的贪婪模式与非贪婪模式</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#2-2-常用方法"><span class="nav-number">6.2.2.</span> <span class="nav-text">2.2. 常用方法</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-正则表达式修饰符-可选标志"><span class="nav-number">6.3.</span> <span class="nav-text">3. 正则表达式修饰符 - 可选标志</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#七、数据解析-Beautiful-Soup"><span class="nav-number">7.</span> <span class="nav-text">七、数据解析-Beautiful Soup</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-Beautiful-Soup的简介"><span class="nav-number">7.1.</span> <span class="nav-text">1. Beautiful Soup的简介</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-Beautiful-Soup-安装"><span class="nav-number">7.2.</span> <span class="nav-text">2. Beautiful Soup 安装</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-创建-Beautiful-Soup-对象"><span class="nav-number">7.3.</span> <span class="nav-text">3. 创建 Beautiful Soup 对象</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-四大对象种类"><span class="nav-number">7.4.</span> <span class="nav-text">4. 四大对象种类</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#4-1-Tag-是什么？"><span class="nav-number">7.4.1.</span> <span class="nav-text">4.1. Tag 是什么？</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-2-NavigableString"><span class="nav-number">7.4.2.</span> <span class="nav-text">4.2. NavigableString</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-3-BeautifulSoup"><span class="nav-number">7.4.3.</span> <span class="nav-text">4.3. BeautifulSoup</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-4-Comment"><span class="nav-number">7.4.4.</span> <span class="nav-text">4.4. Comment</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-搜索文档树"><span class="nav-number">7.5.</span> <span class="nav-text">5. 搜索文档树</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#5-1-过滤器"><span class="nav-number">7.5.1.</span> <span class="nav-text">5.1. 过滤器</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-2-字符串"><span class="nav-number">7.5.2.</span> <span class="nav-text">5.2. 字符串</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-3-正则表达式"><span class="nav-number">7.5.3.</span> <span class="nav-text">5.3. 正则表达式</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-4-列表"><span class="nav-number">7.5.4.</span> <span class="nav-text">5.4. 列表</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-5-keyword"><span class="nav-number">7.5.5.</span> <span class="nav-text">5.5. keyword</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-6-True"><span class="nav-number">7.5.6.</span> <span class="nav-text">5.6. True</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-7-按CSS搜索"><span class="nav-number">7.5.7.</span> <span class="nav-text">5.7. 按CSS搜索</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-CSS选择器（扩展）"><span class="nav-number">7.6.</span> <span class="nav-text">6. CSS选择器（扩展）</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#八、数据解析-Xpath"><span class="nav-number">8.</span> <span class="nav-text">八、数据解析-Xpath</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-介绍-1"><span class="nav-number">8.1.</span> <span class="nav-text">1. 介绍</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-安装-1"><span class="nav-number">8.2.</span> <span class="nav-text">2. 安装</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-XPath语法"><span class="nav-number">8.3.</span> <span class="nav-text">3. XPath语法</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1-节点的关系"><span class="nav-number">8.3.1.</span> <span class="nav-text">3.1. 节点的关系</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-2-选取节点"><span class="nav-number">8.3.2.</span> <span class="nav-text">3.2. 选取节点</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-1-常用的路径表达式"><span class="nav-number">8.3.2.1.</span> <span class="nav-text">3.2.1. 常用的路径表达式</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-2-通配符"><span class="nav-number">8.3.2.2.</span> <span class="nav-text">3.2.2. 通配符</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-3-选取若干路径"><span class="nav-number">8.3.2.3.</span> <span class="nav-text">3.2.3. 选取若干路径</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-4-谓语"><span class="nav-number">8.3.2.4.</span> <span class="nav-text">3.2.4. 谓语</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-5-XPath运算符"><span class="nav-number">8.3.2.5.</span> <span class="nav-text">3.2.5. XPath运算符</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-3-使用"><span class="nav-number">8.3.2.6.</span> <span class="nav-text">3.3. 使用</span></a><ol class="nav-child"><li class="nav-item nav-level-5"><a class="nav-link" href="#3-3-1-小例子"><span class="nav-number">8.3.2.6.1.</span> <span class="nav-text">3.3.1. 小例子</span></a></li><li class="nav-item nav-level-5"><a class="nav-link" href="#3-3-2-XPath具体使用"><span class="nav-number">8.3.2.6.2.</span> <span class="nav-text">3.3.2. XPath具体使用</span></a></li></ol></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-Xpath案例"><span class="nav-number">8.4.</span> <span class="nav-text">4.Xpath案例</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#九、数据解析-Json与JsonPath"><span class="nav-number">9.</span> <span class="nav-text">九、数据解析-Json与JsonPath</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-Json与JsonPath"><span class="nav-number">9.1.</span> <span class="nav-text">1. Json与JsonPath</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-JSON"><span class="nav-number">9.2.</span> <span class="nav-text">2. JSON</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-Python中的json模块"><span class="nav-number">9.3.</span> <span class="nav-text">3. Python中的json模块</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1-json-loads"><span class="nav-number">9.3.1.</span> <span class="nav-text">3.1. json.loads()</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-2-json-dumps"><span class="nav-number">9.3.2.</span> <span class="nav-text">3.2. json.dumps()</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-3-json-dump"><span class="nav-number">9.3.3.</span> <span class="nav-text">3.3. json.dump()</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-4-json-load"><span class="nav-number">9.3.4.</span> <span class="nav-text">3.4. json.load()</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-JsonPath"><span class="nav-number">9.4.</span> <span class="nav-text">4. JsonPath</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-JsonPath与XPath语法对比"><span class="nav-number">9.5.</span> <span class="nav-text">5. JsonPath与XPath语法对比</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-示例"><span class="nav-number">9.6.</span> <span class="nav-text">6. 示例</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#7-注意事项"><span class="nav-number">9.7.</span> <span class="nav-text">7. 注意事项</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#7-1-字符串编码转换"><span class="nav-number">9.7.1.</span> <span class="nav-text">7.1. 字符串编码转换</span></a></li></ol></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#十、数据解析-PyQuery"><span class="nav-number">10.</span> <span class="nav-text">十、数据解析-PyQuery</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-pyquery"><span class="nav-number">10.1.</span> <span class="nav-text">1. pyquery</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#1-1-介绍"><span class="nav-number">10.1.1.</span> <span class="nav-text">1.1. 介绍</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-2-安装"><span class="nav-number">10.1.2.</span> <span class="nav-text">1.2. 安装</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-3-使用方式"><span class="nav-number">10.1.3.</span> <span class="nav-text">1.3. 使用方式</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3-1-初始化方式"><span class="nav-number">10.1.3.1.</span> <span class="nav-text">1.3.1. 初始化方式</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3-2-选择节点"><span class="nav-number">10.1.3.2.</span> <span class="nav-text">1.3.2. 选择节点</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3-3-获取属性"><span class="nav-number">10.1.3.3.</span> <span class="nav-text">1.3.3. 获取属性</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3-4-获取内容"><span class="nav-number">10.1.3.4.</span> <span class="nav-text">1.3.4. 获取内容</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#1-3-5-样例"><span class="nav-number">10.1.3.5.</span> <span class="nav-text">1.3.5. 样例</span></a></li></ol></li></ol></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#十一、爬虫之多线程"><span class="nav-number">11.</span> <span class="nav-text">十一、爬虫之多线程</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-引入"><span class="nav-number">11.1.</span> <span class="nav-text">1. 引入</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-如何使用"><span class="nav-number">11.2.</span> <span class="nav-text">2. 如何使用</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-主要组成部分"><span class="nav-number">11.3.</span> <span class="nav-text">3. 主要组成部分</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1-URL队列和结果队列"><span class="nav-number">11.3.1.</span> <span class="nav-text">3.1. URL队列和结果队列</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-2-请求线程"><span class="nav-number">11.3.2.</span> <span class="nav-text">3.2. 请求线程</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-3-处理线程"><span class="nav-number">11.3.3.</span> <span class="nav-text">3.3. 处理线程</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-Queue模块中的常用方法"><span class="nav-number">11.4.</span> <span class="nav-text">4. Queue模块中的常用方法:</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-糗事百科实例"><span class="nav-number">11.5.</span> <span class="nav-text">5. 糗事百科实例</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#十二、Selenium工具"><span class="nav-number">12.</span> <span class="nav-text">十二、Selenium工具</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-Selenium"><span class="nav-number">12.1.</span> <span class="nav-text">1. Selenium</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-PhantomJS"><span class="nav-number">12.2.</span> <span class="nav-text">2. PhantomJS</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#2-1-注意：PhantomJS（python2）"><span class="nav-number">12.2.1.</span> <span class="nav-text">2.1. 注意：PhantomJS（python2）</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#2-2-python3使用的浏览器"><span class="nav-number">12.2.2.</span> <span class="nav-text">2.2. python3使用的浏览器</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#2-2-1-安装Firefox-geckodriver"><span class="nav-number">12.2.2.1.</span> <span class="nav-text">2.2.1. 安装Firefox geckodriver</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#2-2-2-安装ChromeDriver"><span class="nav-number">12.2.2.2.</span> <span class="nav-text">2.2.2. 安装ChromeDriver</span></a></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-使用方式"><span class="nav-number">12.3.</span> <span class="nav-text">3. 使用方式</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1-简单例子"><span class="nav-number">12.3.1.</span> <span class="nav-text">3.1. 简单例子</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-页面操作"><span class="nav-number">12.4.</span> <span class="nav-text">4. 页面操作</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#4-1-页面交互"><span class="nav-number">12.4.1.</span> <span class="nav-text">4.1. 页面交互</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-1-获取"><span class="nav-number">12.4.1.1.</span> <span class="nav-text">4.1.1. 获取</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-2-输入内容"><span class="nav-number">12.4.1.2.</span> <span class="nav-text">4.1.2. 输入内容</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-3-模拟点击某个按键"><span class="nav-number">12.4.1.3.</span> <span class="nav-text">4.1.3. 模拟点击某个按键</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-4-清空文本"><span class="nav-number">12.4.1.4.</span> <span class="nav-text">4.1.4. 清空文本</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-5-元素拖拽"><span class="nav-number">12.4.1.5.</span> <span class="nav-text">4.1.5. 元素拖拽</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-6-历史记录"><span class="nav-number">12.4.1.6.</span> <span class="nav-text">4.1.6. 历史记录</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-1-7-处理滚动条"><span class="nav-number">12.4.1.7.</span> <span class="nav-text">4.1.7. 处理滚动条</span></a><ol class="nav-child"><li class="nav-item nav-level-5"><a class="nav-link" href="#一-控制滚动条高度"><span class="nav-number">12.4.1.7.1.</span> <span class="nav-text">一. 控制滚动条高度</span></a></li><li class="nav-item nav-level-5"><a class="nav-link" href="#二-横向滚动条"><span class="nav-number">12.4.1.7.2.</span> <span class="nav-text">二.横向滚动条</span></a></li><li class="nav-item nav-level-5"><a class="nav-link" href="#三-元素聚焦"><span class="nav-number">12.4.1.7.3.</span> <span class="nav-text">三.元素聚焦</span></a></li><li class="nav-item nav-level-5"><a class="nav-link" href="#四-参考代码"><span class="nav-number">12.4.1.7.4.</span> <span class="nav-text">四. 参考代码</span></a></li></ol></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-API"><span class="nav-number">12.5.</span> <span class="nav-text">5. API</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#5-1-元素选取"><span class="nav-number">12.5.1.</span> <span class="nav-text">5.1. 元素选取</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#5-1-1-单个元素选取"><span class="nav-number">12.5.1.1.</span> <span class="nav-text">5.1.1. 单个元素选取</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#5-1-2-多个元素选取"><span class="nav-number">12.5.1.2.</span> <span class="nav-text">5.1.2. 多个元素选取</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#5-1-3-利用-By-类来确定哪种选择方式"><span class="nav-number">12.5.1.3.</span> <span class="nav-text">5.1.3. 利用 By 类来确定哪种选择方式</span></a></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-等待"><span class="nav-number">12.6.</span> <span class="nav-text">6. 等待</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#6-1-隐式等待"><span class="nav-number">12.6.1.</span> <span class="nav-text">6.1. 隐式等待</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#6-2-显示等待"><span class="nav-number">12.6.2.</span> <span class="nav-text">6.2. 显示等待</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#6-3-强制等待"><span class="nav-number">12.6.3.</span> <span class="nav-text">6.3. 强制等待</span></a></li></ol></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#十三、Scrapy-框架"><span class="nav-number">13.</span> <span class="nav-text">十三、Scrapy 框架</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#1-Scrapy-框架介绍"><span class="nav-number">13.1.</span> <span class="nav-text">1. Scrapy 框架介绍</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#1-1-为什么要使用Scrapy？"><span class="nav-number">13.1.1.</span> <span class="nav-text">1.1. 为什么要使用Scrapy？</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-2-Scrapy的特点"><span class="nav-number">13.1.2.</span> <span class="nav-text">1.2. Scrapy的特点</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-3-Scrapy的优点"><span class="nav-number">13.1.3.</span> <span class="nav-text">1.3. Scrapy的优点</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-4-整体架构大致如下"><span class="nav-number">13.1.4.</span> <span class="nav-text">1.4. 整体架构大致如下:</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-5-Scrapy运行流程大概如下："><span class="nav-number">13.1.5.</span> <span class="nav-text">1.5. Scrapy运行流程大概如下：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#1-6-Scrapy主要包括了以下组件："><span class="nav-number">13.1.6.</span> <span class="nav-text">1.6. Scrapy主要包括了以下组件：</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#2-安装-2"><span class="nav-number">13.2.</span> <span class="nav-text">2. 安装</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#3-使用"><span class="nav-number">13.3.</span> <span class="nav-text">3. 使用</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#3-1-创建项目"><span class="nav-number">13.3.1.</span> <span class="nav-text">3.1. 创建项目</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#3-2-编写-spdier"><span class="nav-number">13.3.2.</span> <span class="nav-text">3.2. 编写 spdier</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-1-注意"><span class="nav-number">13.3.2.1.</span> <span class="nav-text">3.2.1. 注意</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#3-2-2-编写内容"><span class="nav-number">13.3.2.2.</span> <span class="nav-text">3.2.2. 编写内容</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#其他命令："><span class="nav-number">13.3.3.</span> <span class="nav-text">其他命令：</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#4-数据提取与保存"><span class="nav-number">13.4.</span> <span class="nav-text">4. 数据提取与保存</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#4-1-Scrapy提取项目"><span class="nav-number">13.4.1.</span> <span class="nav-text">4.1. Scrapy提取项目</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-2-Scrapy-Shell"><span class="nav-number">13.4.2.</span> <span class="nav-text">4.2. Scrapy Shell</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#4-2-1-举例"><span class="nav-number">13.4.2.1.</span> <span class="nav-text">4.2.1. 举例</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-3-数据的提取"><span class="nav-number">13.4.3.</span> <span class="nav-text">4.3. 数据的提取</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#4-3-1-控制台打印"><span class="nav-number">13.4.3.1.</span> <span class="nav-text">4.3.1. 控制台打印</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#4-4-数据以文件的方式输出"><span class="nav-number">13.4.4.</span> <span class="nav-text">4.4. 数据以文件的方式输出</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#4-4-1-python原生方式"><span class="nav-number">13.4.4.1.</span> <span class="nav-text">4.4.1. python原生方式</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-4-2-以scrapy内置方式"><span class="nav-number">13.4.4.2.</span> <span class="nav-text">4.4.2. 以scrapy内置方式</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#4-4-3-提取内容的封装Item"><span class="nav-number">13.4.4.3.</span> <span class="nav-text">4.4.3. 提取内容的封装Item</span></a><ol class="nav-child"><li class="nav-item nav-level-5"><a class="nav-link" href="#4-4-3-1-定义"><span class="nav-number">13.4.4.3.1.</span> <span class="nav-text">4.4.3.1. 定义</span></a></li><li class="nav-item nav-level-5"><a class="nav-link" href="#4-4-3-2-使用"><span class="nav-number">13.4.4.3.2.</span> <span class="nav-text">4.4.3.2. 使用</span></a></li></ol></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#5-Item-Pipeline"><span class="nav-number">13.5.</span> <span class="nav-text">5. Item Pipeline</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#5-1-Item-Pipeline-介绍"><span class="nav-number">13.5.1.</span> <span class="nav-text">5.1. Item Pipeline 介绍</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#5-2-编写自己的item-pipeline"><span class="nav-number">13.5.2.</span> <span class="nav-text">5.2. 编写自己的item pipeline</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#5-2-1-必须实现的函数"><span class="nav-number">13.5.2.1.</span> <span class="nav-text">5.2.1. 必须实现的函数</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#5-2-2-可以选择实现"><span class="nav-number">13.5.2.2.</span> <span class="nav-text">5.2.2. 可以选择实现</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#5-2-3-应用到项目"><span class="nav-number">13.5.2.3.</span> <span class="nav-text">5.2.3. 应用到项目</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#注意："><span class="nav-number">13.5.2.4.</span> <span class="nav-text">注意：</span></a></li></ol></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#6-Scrapy内置设置"><span class="nav-number">13.6.</span> <span class="nav-text">6. Scrapy内置设置</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#Scrapy默认BASE设置"><span class="nav-number">13.6.1.</span> <span class="nav-text">Scrapy默认BASE设置</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#7-Spider-下载中间件-Middleware"><span class="nav-number">13.7.</span> <span class="nav-text">7. Spider 下载中间件(Middleware)</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#7-1-激活一个下载DOWNLOADER-MIDDLEWARES"><span class="nav-number">13.7.1.</span> <span class="nav-text">7.1. 激活一个下载DOWNLOADER_MIDDLEWARES</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#7-2-编写你自己的下载中间件"><span class="nav-number">13.7.2.</span> <span class="nav-text">7.2. 编写你自己的下载中间件</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#7-2-1-process-request-self-request-spider"><span class="nav-number">13.7.2.1.</span> <span class="nav-text">7.2.1 process_request(self, request, spider)</span></a></li><li class="nav-item nav-level-4"><a class="nav-link" href="#7-2-2-process-response-self-request-response-spider"><span class="nav-number">13.7.2.2.</span> <span class="nav-text">7.2.2 process_response(self, request, response, spider)</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#7-3-使用代理"><span class="nav-number">13.7.3.</span> <span class="nav-text">7.3. 使用代理</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#7-4-使用动态UA"><span class="nav-number">13.7.4.</span> <span class="nav-text">7.4. 使用动态UA</span></a></li></ol></li><li class="nav-item nav-level-2"><a class="nav-link" href="#8-Scrapy-Request和Response（请求和响应）"><span class="nav-number">13.8.</span> <span class="nav-text">8. Scrapy-Request和Response（请求和响应）</span></a><ol class="nav-child"><li class="nav-item nav-level-3"><a class="nav-link" href="#8-1-Request对象"><span class="nav-number">13.8.1.</span> <span class="nav-text">8.1. Request对象</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#将附加数据传递给回调函数"><span class="nav-number">13.8.1.1.</span> <span class="nav-text">将附加数据传递给回调函数</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#8-2-请求子类-FormRequest对象"><span class="nav-number">13.8.2.</span> <span class="nav-text">8.2. 请求子类 FormRequest对象</span></a><ol class="nav-child"><li class="nav-item nav-level-4"><a class="nav-link" href="#8-2-1-请求使用示例"><span class="nav-number">13.8.2.1.</span> <span class="nav-text">8.2.1 请求使用示例</span></a></li></ol></li><li class="nav-item nav-level-3"><a class="nav-link" href="#8-3-响应对象"><span class="nav-number">13.8.3.</span> <span class="nav-text">8.3. 响应对象</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#8-4-模拟登录"><span class="nav-number">13.8.4.</span> <span class="nav-text">8.4. 模拟登录</span></a></li></ol></li></ol></li></ol></div>
      </div>
      <!--/noindex-->

      <div class="site-overview-wrap sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
    <img class="site-author-image" itemprop="image" alt="Chenyumeng"
      src="/blog/images/touxiang.JPG">
  <p class="site-author-name" itemprop="name">Chenyumeng</p>
  <div class="site-description" itemprop="description">用来记录自己学习中所遇到的问题以及如何解决和自己所学知识的理解</div>
</div>
<div class="site-state-wrap motion-element">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/blog/archives/">
        
          <span class="site-state-item-count">32</span>
          <span class="site-state-item-name">日志</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/blog/categories/">
          
        <span class="site-state-item-count">1</span>
        <span class="site-state-item-name">分类</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/blog/tags/">
          
        <span class="site-state-item-count">29</span>
        <span class="site-state-item-name">标签</span></a>
      </div>
  </nav>
</div>
  <div class="links-of-author motion-element">
      <span class="links-of-author-item">
        <a href="https://github.com/chen-yumeng" title="GitHub → https:&#x2F;&#x2F;github.com&#x2F;chen-yumeng" rel="noopener" target="_blank"><i class="fa fa-fw fa-github"></i>GitHub</a>
      </span>
      <span class="links-of-author-item">
        <a href="https://weibo.com/u/3123221441" title="Weibo → https:&#x2F;&#x2F;weibo.com&#x2F;u&#x2F;3123221441" rel="noopener" target="_blank"><i class="fa fa-fw fa-weibo"></i>Weibo</a>
      </span>
  </div>


  <div class="links-of-blogroll motion-element">
    <div class="links-of-blogroll-title">
      <i class="fa fa-fw fa-link"></i>
      Links
    </div>
    <ul class="links-of-blogroll-list">
        <li class="links-of-blogroll-item">
          <a href="https://github.com/" title="https:&#x2F;&#x2F;github.com&#x2F;" rel="noopener" target="_blank">Github</a>
        </li>
    </ul>
  </div>

      </div>

    </div>
  </aside>
  <div id="sidebar-dimmer"></div>


      </div>
    </main>

    <footer class="footer">
      <div class="footer-inner">
        

<div class="copyright">
  
  &copy; 
  <span itemprop="copyrightYear">2020</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">Chenyumeng</span>
    <span class="post-meta-divider">|</span>
    <span class="post-meta-item-icon">
      <i class="fa fa-area-chart"></i>
    </span>
    <span title="站点总字数">390k</span>
    <span class="post-meta-divider">|</span>
    <span class="post-meta-item-icon">
      <i class="fa fa-coffee"></i>
    </span>
    <span title="站点阅读时长">5:55</span>
</div>

        






  <script>
  function leancloudSelector(url) {
    url = encodeURI(url);
    return document.getElementById(url).querySelector('.leancloud-visitors-count');
  }
  if (CONFIG.page.isPost) {
    function addCount(Counter) {
      var visitors = document.querySelector('.leancloud_visitors');
      var url = decodeURI(visitors.id);
      var title = visitors.dataset.flagTitle;

      Counter('get', '/classes/Counter?where=' + encodeURIComponent(JSON.stringify({ url })))
        .then(response => response.json())
        .then(({ results }) => {
          if (results.length > 0) {
            var counter = results[0];
              leancloudSelector(url).innerText = counter.time + 1;
            Counter('put', '/classes/Counter/' + counter.objectId, { time: { '__op': 'Increment', 'amount': 1 } })
              .then(response => response.json())
              .catch(error => {
                console.error('Failed to save visitor count', error);
              })
          } else {
              Counter('post', '/classes/Counter', { title, url, time: 1 })
                .then(response => response.json())
                .then(() => {
                  leancloudSelector(url).innerText = 1;
                })
                .catch(error => {
                  console.error('Failed to create', error);
                });
          }
        })
        .catch(error => {
          console.error('LeanCloud Counter Error', error);
        });
    }
  } else {
    function showTime(Counter) {
      var visitors = document.querySelectorAll('.leancloud_visitors');
      var entries = [...visitors].map(element => {
        return decodeURI(element.id);
      });

      Counter('get', '/classes/Counter?where=' + encodeURIComponent(JSON.stringify({ url: { '$in': entries } })))
        .then(response => response.json())
        .then(({ results }) => {
          for (let url of entries) {
            let target = results.find(item => item.url === url);
            leancloudSelector(url).innerText = target ? target.time : 0;
          }
        })
        .catch(error => {
          console.error('LeanCloud Counter Error', error);
        });
    }
  }

  fetch('https://app-router.leancloud.cn/2/route?appId=')
    .then(response => response.json())
    .then(({ api_server }) => {
      var Counter = (method, url, data) => {
        return fetch(`https://${api_server}/1.1${url}`, {
          method,
          headers: {
            'X-LC-Id'     : '',
            'X-LC-Key'    : '',
            'Content-Type': 'application/json',
          },
          body: JSON.stringify(data)
        });
      };
      if (CONFIG.page.isPost) {
        if (CONFIG.hostname !== location.hostname) return;
        addCount(Counter);
      } else if (document.querySelectorAll('.post-title-link').length >= 1) {
        showTime(Counter);
      }
    });
  </script>


      </div>
    </footer>
  </div>

  
  <script src="/blog/lib/anime.min.js"></script>
  <script src="/blog/lib/velocity/velocity.min.js"></script>
  <script src="/blog/lib/velocity/velocity.ui.min.js"></script>

<script src="/blog/js/utils.js"></script>

<script src="/blog/js/motion.js"></script>


<script src="/blog/js/schemes/pisces.js"></script>


<script src="/blog/js/next-boot.js"></script>




  




  
<script src="/blog/js/local-search.js"></script>













  

  

<script>
NexT.utils.loadComments(document.querySelector('#lv-container'), () => {
  window.livereOptions = {
    refer: location.pathname.replace(CONFIG.root, '').replace('index.html', '')
  };
  (function(d, s) {
    var j, e = d.getElementsByTagName(s)[0];
    if (typeof LivereTower === 'function') { return; }
    j = d.createElement(s);
    j.src = 'https://cdn-city.livere.com/js/embed.dist.js';
    j.async = true;
    e.parentNode.insertBefore(j, e);
  })(document, 'script');
});
</script>

  <script src="/dist/js/social-share.min.js"></script>
</body>
</html>
