

<!DOCTYPE html>
<html class="writer-html5" lang="zh" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>蜘蛛 &mdash; Scrapy 2.3.0 文档</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster.custom.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster.bundle.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-shadow.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-punk.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-noir.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-light.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-borderless.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/micromodal.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/sphinx_rtd_theme.css" type="text/css" />

  
  
  
  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script src="../_static/js/hoverxref.js"></script>
        <script src="../_static/js/tooltipster.bundle.min.js"></script>
        <script src="../_static/js/micromodal.min.js"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="索引" href="../genindex.html" />
    <link rel="search" title="搜索" href="../search.html" />
    <link rel="next" title="选择器" href="selectors.html" />
    <link rel="prev" title="命令行工具" href="commands.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home" alt="Documentation Home"> Scrapy
          

          
          </a>

          
            
            
              <div class="version">
                2.3
              </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">第一步</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intro/overview.html">Scrapy一目了然</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/install.html">安装指南</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/tutorial.html">Scrapy 教程</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/examples.html">实例</a></li>
</ul>
<p class="caption"><span class="caption-text">基本概念</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="commands.html">命令行工具</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">蜘蛛</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#scrapy-spider">scrapy.Spider</a></li>
<li class="toctree-l2"><a class="reference internal" href="#spider-arguments">蜘蛛论点</a></li>
<li class="toctree-l2"><a class="reference internal" href="#generic-spiders">类蜘蛛</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#crawlspider">CrawlSpider</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#crawling-rules">爬行规则</a></li>
<li class="toctree-l4"><a class="reference internal" href="#crawlspider-example">爬行蜘蛛示例</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="#xmlfeedspider">XMLFeedSpider</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#xmlfeedspider-example">XmlFeedSpider示例</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="#csvfeedspider">CSVFeedSpider</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#csvfeedspider-example">CSVFeedspider示例</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="#sitemapspider">SitemapSpider</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#sitemapspider-examples">SiteMapSpider示例</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="selectors.html">选择器</a></li>
<li class="toctree-l1"><a class="reference internal" href="items.html">项目</a></li>
<li class="toctree-l1"><a class="reference internal" href="loaders.html">项目加载器</a></li>
<li class="toctree-l1"><a class="reference internal" href="shell.html">Scrapy shell</a></li>
<li class="toctree-l1"><a class="reference internal" href="item-pipeline.html">项目管道</a></li>
<li class="toctree-l1"><a class="reference internal" href="feed-exports.html">Feed 导出</a></li>
<li class="toctree-l1"><a class="reference internal" href="request-response.html">请求和响应</a></li>
<li class="toctree-l1"><a class="reference internal" href="link-extractors.html">链接提取器</a></li>
<li class="toctree-l1"><a class="reference internal" href="settings.html">设置</a></li>
<li class="toctree-l1"><a class="reference internal" href="exceptions.html">例外情况</a></li>
</ul>
<p class="caption"><span class="caption-text">内置服务</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="logging.html">登录</a></li>
<li class="toctree-l1"><a class="reference internal" href="stats.html">统计数据集合</a></li>
<li class="toctree-l1"><a class="reference internal" href="email.html">发送电子邮件</a></li>
<li class="toctree-l1"><a class="reference internal" href="telnetconsole.html">远程登录控制台</a></li>
<li class="toctree-l1"><a class="reference internal" href="webservice.html">Web服务</a></li>
</ul>
<p class="caption"><span class="caption-text">解决具体问题</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../faq.html">常见问题</a></li>
<li class="toctree-l1"><a class="reference internal" href="debug.html">调试spiders</a></li>
<li class="toctree-l1"><a class="reference internal" href="contracts.html">蜘蛛合约</a></li>
<li class="toctree-l1"><a class="reference internal" href="practices.html">常用做法</a></li>
<li class="toctree-l1"><a class="reference internal" href="broad-crawls.html">宽爬行</a></li>
<li class="toctree-l1"><a class="reference internal" href="developer-tools.html">使用浏览器的开发人员工具进行抓取</a></li>
<li class="toctree-l1"><a class="reference internal" href="dynamic-content.html">选择动态加载的内容</a></li>
<li class="toctree-l1"><a class="reference internal" href="leaks.html">调试内存泄漏</a></li>
<li class="toctree-l1"><a class="reference internal" href="media-pipeline.html">下载和处理文件和图像</a></li>
<li class="toctree-l1"><a class="reference internal" href="deploy.html">部署蜘蛛</a></li>
<li class="toctree-l1"><a class="reference internal" href="autothrottle.html">AutoThrottle 扩展</a></li>
<li class="toctree-l1"><a class="reference internal" href="benchmarking.html">标杆管理</a></li>
<li class="toctree-l1"><a class="reference internal" href="jobs.html">作业：暂停和恢复爬行</a></li>
<li class="toctree-l1"><a class="reference internal" href="coroutines.html">协同程序</a></li>
<li class="toctree-l1"><a class="reference internal" href="asyncio.html">asyncio</a></li>
</ul>
<p class="caption"><span class="caption-text">扩展Scrapy</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="architecture.html">体系结构概述</a></li>
<li class="toctree-l1"><a class="reference internal" href="downloader-middleware.html">下载器中间件</a></li>
<li class="toctree-l1"><a class="reference internal" href="spider-middleware.html">蜘蛛中间件</a></li>
<li class="toctree-l1"><a class="reference internal" href="extensions.html">扩展</a></li>
<li class="toctree-l1"><a class="reference internal" href="api.html">核心API</a></li>
<li class="toctree-l1"><a class="reference internal" href="signals.html">信号</a></li>
<li class="toctree-l1"><a class="reference internal" href="exporters.html">条目导出器</a></li>
</ul>
<p class="caption"><span class="caption-text">其余所有</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../news.html">发行说明</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributing.html">为 Scrapy 贡献</a></li>
<li class="toctree-l1"><a class="reference internal" href="../versioning.html">版本控制和API稳定性</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">Scrapy</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>蜘蛛</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="spiders">
<span id="topics-spiders"></span><h1>蜘蛛<a class="headerlink" href="#spiders" title="永久链接至标题">¶</a></h1>
<p>spider是定义一个特定站点（或一组站点）如何被抓取的类，包括如何执行抓取（即跟踪链接）以及如何从页面中提取结构化数据（即抓取项）。换言之，spider是为特定站点（或者在某些情况下，一组站点）定义爬行和解析页面的自定义行为的地方。</p>
<p>对于蜘蛛来说，抓取周期是这样的：</p><script async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<ins class="adsbygoogle"
     style="display:block; text-align:center;"
     data-ad-layout="in-article"
     data-ad-format="fluid"
     data-ad-client="ca-pub-1466963416408457"
     data-ad-slot="8850786025"></ins>
<script>
     (adsbygoogle = window.adsbygoogle || []).push({});
</script>
<ol class="arabic">
<li><p>首先生成对第一个URL进行爬网的初始请求，然后指定一个回调函数，该函数使用从这些请求下载的响应进行调用。</p>
<p>要执行的第一个请求是通过调用 <a class="reference internal" href="#scrapy.spiders.Spider.start_requests" title="scrapy.spiders.Spider.start_requests"><code class="xref py py-meth docutils literal notranslate"><span class="pre">start_requests()</span></code></a> （默认）生成的方法 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对于中指定的URL <a class="reference internal" href="#scrapy.spiders.Spider.start_urls" title="scrapy.spiders.Spider.start_urls"><code class="xref py py-attr docutils literal notranslate"><span class="pre">start_urls</span></code></a> 以及 <a class="reference internal" href="#scrapy.spiders.Spider.parse" title="scrapy.spiders.Spider.parse"><code class="xref py py-attr docutils literal notranslate"><span class="pre">parse</span></code></a> 方法作为请求的回调函数。</p>
</li>
<li><p>在回调函数中，解析响应（网页）并返回 <a class="reference internal" href="items.html#topics-items"><span class="std std-ref">item objects</span></a> ， <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对象，或这些对象中的一个不可重复的对象。这些请求还将包含回调（可能相同），然后由scrappy下载，然后由指定的回调处理它们的响应。</p></li>
<li><p>在回调函数中，解析页面内容，通常使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">选择器</span></a> （但您也可以使用beautifulsoup、lxml或任何您喜欢的机制）并使用解析的数据生成项。</p></li>
<li><p>最后，从spider返回的项目通常被持久化到数据库（在某些 <a class="reference internal" href="item-pipeline.html#topics-item-pipeline"><span class="std std-ref">Item Pipeline</span></a> ）或者使用 <a class="reference internal" href="feed-exports.html#topics-feed-exports"><span class="std std-ref">Feed 导出</span></a> .</p></li>
</ol>
<p>尽管这个循环（或多或少）适用于任何类型的蜘蛛，但是为了不同的目的，有不同类型的默认蜘蛛被捆绑成  Scrapy   。我们将在这里讨论这些类型。</p>
<span class="target" id="module-scrapy.spiders"></span><div class="section" id="scrapy-spider">
<span id="topics-spiders-ref"></span><h2>scrapy.Spider<a class="headerlink" href="#scrapy-spider" title="永久链接至标题">¶</a></h2>
<dl class="py class">
<dt id="scrapy.spiders.Spider">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">Spider</code><a class="reference internal" href="../_modules/scrapy/spiders.html#Spider"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Spider" title="永久链接至目标">¶</a></dt>
<dd><p>这是最简单的蜘蛛，也是每个蜘蛛都必须继承的蜘蛛（包括与碎屑捆绑在一起的蜘蛛，还有你自己写的蜘蛛）。它不提供任何特殊功能。它只是提供了一个默认值 <a class="reference internal" href="#scrapy.spiders.Spider.start_requests" title="scrapy.spiders.Spider.start_requests"><code class="xref py py-meth docutils literal notranslate"><span class="pre">start_requests()</span></code></a> 从发送请求的实现 <a class="reference internal" href="#scrapy.spiders.Spider.start_urls" title="scrapy.spiders.Spider.start_urls"><code class="xref py py-attr docutils literal notranslate"><span class="pre">start_urls</span></code></a> spider属性并调用spider的方法 <code class="docutils literal notranslate"><span class="pre">parse</span></code> 对于每个结果响应。</p>
<dl class="py attribute">
<dt id="scrapy.spiders.Spider.name">
<code class="sig-name descname">name</code><a class="headerlink" href="#scrapy.spiders.Spider.name" title="永久链接至目标">¶</a></dt>
<dd><p>定义此蜘蛛名称的字符串。spider名称是scrappy定位（和实例化）spider的方式，因此它必须是唯一的。但是，没有什么可以阻止您实例化同一个蜘蛛的多个实例。这是最重要的蜘蛛属性，也是必需的。</p>
<p>如果蜘蛛 爬取  一个域，通常的做法是在域后命名蜘蛛，无论有没有 <a class="reference external" href="https://en.wikipedia.org/wiki/Top-level_domain">TLD</a> . 例如，一只爬行的蜘蛛 <code class="docutils literal notranslate"><span class="pre">mywebsite.com</span></code> 经常被称为 <code class="docutils literal notranslate"><span class="pre">mywebsite</span></code> .</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.allowed_domains">
<code class="sig-name descname">allowed_domains</code><a class="headerlink" href="#scrapy.spiders.Spider.allowed_domains" title="永久链接至目标">¶</a></dt>
<dd><p>包含允许此蜘蛛爬行的域的字符串的可选列表。对于不属于此列表（或其子域）中指定的域名的URL请求，如果 <a class="reference internal" href="spider-middleware.html#scrapy.spidermiddlewares.offsite.OffsiteMiddleware" title="scrapy.spidermiddlewares.offsite.OffsiteMiddleware"><code class="xref py py-class docutils literal notranslate"><span class="pre">OffsiteMiddleware</span></code></a> 启用。</p>
<p>假设您的目标URL是 <code class="docutils literal notranslate"><span class="pre">https://www.example.com/1.html</span></code> 然后添加 <code class="docutils literal notranslate"><span class="pre">'example.com'</span></code> 列在名单上。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.start_urls">
<code class="sig-name descname">start_urls</code><a class="headerlink" href="#scrapy.spiders.Spider.start_urls" title="永久链接至目标">¶</a></dt>
<dd><p>当没有指定特定的URL时，蜘蛛将从中开始爬行的URL列表。所以，下载的第一页将是这里列出的那些。随后 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 将从包含在起始URL中的数据依次生成。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.custom_settings">
<code class="sig-name descname">custom_settings</code><a class="headerlink" href="#scrapy.spiders.Spider.custom_settings" title="永久链接至目标">¶</a></dt>
<dd><p>运行此spider时，将从项目范围配置中重写的设置字典。它必须被定义为类属性，因为在实例化之前更新了设置。</p>
<p>有关可用内置设置的列表，请参阅： <a class="reference internal" href="settings.html#topics-settings-ref"><span class="std std-ref">内置设置参考</span></a> .</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.crawler">
<code class="sig-name descname">crawler</code><a class="headerlink" href="#scrapy.spiders.Spider.crawler" title="永久链接至目标">¶</a></dt>
<dd><p>此属性由 <a class="reference internal" href="item-pipeline.html#from_crawler" title="from_crawler"><code class="xref py py-meth docutils literal notranslate"><span class="pre">from_crawler()</span></code></a> 初始化类后的类方法，并链接到 <a class="reference internal" href="api.html#scrapy.crawler.Crawler" title="scrapy.crawler.Crawler"><code class="xref py py-class docutils literal notranslate"><span class="pre">Crawler</span></code></a> 此蜘蛛实例绑定到的对象。</p>
<p>Crawler封装了项目中的许多组件，用于它们的单入口访问（例如扩展、中间件、信号管理器等）。见 <a class="reference internal" href="api.html#topics-api-crawler"><span class="std std-ref">爬虫API</span></a> 了解更多。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.settings">
<code class="sig-name descname">settings</code><a class="headerlink" href="#scrapy.spiders.Spider.settings" title="永久链接至目标">¶</a></dt>
<dd><p>用于运行此蜘蛛的配置。这是一个 <a class="reference internal" href="api.html#scrapy.settings.Settings" title="scrapy.settings.Settings"><code class="xref py py-class docutils literal notranslate"><span class="pre">Settings</span></code></a> 实例，请参见 <a class="reference internal" href="settings.html#topics-settings"><span class="std std-ref">设置</span></a> 有关此主题的详细介绍。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.Spider.logger">
<code class="sig-name descname">logger</code><a class="headerlink" href="#scrapy.spiders.Spider.logger" title="永久链接至目标">¶</a></dt>
<dd><p>用蜘蛛创建的python记录器 <a class="reference internal" href="#scrapy.spiders.Spider.name" title="scrapy.spiders.Spider.name"><code class="xref py py-attr docutils literal notranslate"><span class="pre">name</span></code></a> . 您可以使用它通过它发送日志消息，如中所述 <a class="reference internal" href="logging.html#topics-logging-from-spiders"><span class="std std-ref">从蜘蛛记录</span></a> .</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.Spider.from_crawler">
<code class="sig-name descname">from_crawler</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">crawler</span></em>, <em class="sig-param"><span class="o">*</span><span class="n">args</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders.html#Spider.from_crawler"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Spider.from_crawler" title="永久链接至目标">¶</a></dt>
<dd><p>这是Scrapy用来创建蜘蛛的类方法。</p>
<p>您可能不需要直接重写它，因为默认实现充当 <code class="xref py py-meth docutils literal notranslate"><span class="pre">__init__()</span></code> 方法，使用给定参数调用它 <code class="docutils literal notranslate"><span class="pre">args</span></code> 和命名参数 <code class="docutils literal notranslate"><span class="pre">kwargs</span></code> .</p>
<p>尽管如此，此方法设置了 <a class="reference internal" href="#scrapy.spiders.Spider.crawler" title="scrapy.spiders.Spider.crawler"><code class="xref py py-attr docutils literal notranslate"><span class="pre">crawler</span></code></a> 和 <a class="reference internal" href="#scrapy.spiders.Spider.settings" title="scrapy.spiders.Spider.settings"><code class="xref py py-attr docutils literal notranslate"><span class="pre">settings</span></code></a> 新实例中的属性，以便稍后在蜘蛛代码中访问它们。</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>crawler</strong> (<a class="reference internal" href="api.html#scrapy.crawler.Crawler" title="scrapy.crawler.Crawler"><code class="xref py py-class docutils literal notranslate"><span class="pre">Crawler</span></code></a> instance) -- 蜘蛛将被绑到的爬行器</p></li>
<li><p><strong>args</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#list" title="(在 Python v3.9)"><em>list</em></a>) -- 传递给的参数 <code class="xref py py-meth docutils literal notranslate"><span class="pre">__init__()</span></code> 方法</p></li>
<li><p><strong>kwargs</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#dict" title="(在 Python v3.9)"><em>dict</em></a>) -- 传递给的关键字参数 <code class="xref py py-meth docutils literal notranslate"><span class="pre">__init__()</span></code> 方法</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.Spider.start_requests">
<code class="sig-name descname">start_requests</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders.html#Spider.start_requests"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Spider.start_requests" title="永久链接至目标">¶</a></dt>
<dd><p>此方法必须返回一个iterable，其中包含对此spider进行爬网的第一个请求。当蜘蛛被打开爬取的时候，它被称为 Scrapy。Scrapy只调用一次，因此可以安全地实现 <a class="reference internal" href="#scrapy.spiders.Spider.start_requests" title="scrapy.spiders.Spider.start_requests"><code class="xref py py-meth docutils literal notranslate"><span class="pre">start_requests()</span></code></a> 作为发电机。</p>
<p>默认实现生成 <code class="docutils literal notranslate"><span class="pre">Request(url,</span> <span class="pre">dont_filter=True)</span></code> 对于每个URL <a class="reference internal" href="#scrapy.spiders.Spider.start_urls" title="scrapy.spiders.Spider.start_urls"><code class="xref py py-attr docutils literal notranslate"><span class="pre">start_urls</span></code></a> .</p>
<p>如果要更改用于开始抓取域的请求，这是要重写的方法。例如，如果您需要从使用POST请求登录开始，可以执行以下操作：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;myspider&#39;</span>

    <span class="k">def</span> <span class="nf">start_requests</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="p">[</span><span class="n">scrapy</span><span class="o">.</span><span class="n">FormRequest</span><span class="p">(</span><span class="s2">&quot;http://www.example.com/login&quot;</span><span class="p">,</span>
                                   <span class="n">formdata</span><span class="o">=</span><span class="p">{</span><span class="s1">&#39;user&#39;</span><span class="p">:</span> <span class="s1">&#39;john&#39;</span><span class="p">,</span> <span class="s1">&#39;pass&#39;</span><span class="p">:</span> <span class="s1">&#39;secret&#39;</span><span class="p">},</span>
                                   <span class="n">callback</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">logged_in</span><span class="p">)]</span>

    <span class="k">def</span> <span class="nf">logged_in</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="c1"># here you would extract links to follow and return Requests for</span>
        <span class="c1"># each of them, with another callback</span>
        <span class="k">pass</span>
</pre></div>
</div>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.Spider.parse">
<code class="sig-name descname">parse</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders.html#Spider.parse"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Spider.parse" title="永久链接至目标">¶</a></dt>
<dd><p>这是Scrapy在请求未指定回调时用来处理下载响应的默认回调。</p>
<p>这个 <code class="docutils literal notranslate"><span class="pre">parse</span></code> 方法负责处理响应，并返回 爬取  的数据和/或更多的URL。其他请求回调与 <a class="reference internal" href="#scrapy.spiders.Spider" title="scrapy.spiders.Spider"><code class="xref py py-class docutils literal notranslate"><span class="pre">Spider</span></code></a> 班级。</p>
<p>此方法以及任何其他请求回调都必须返回 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 和/或 <a class="reference internal" href="items.html#topics-items"><span class="std std-ref">item objects</span></a> .</p>
<dl class="field-list simple">
<dt class="field-odd">参数</dt>
<dd class="field-odd"><p><strong>response</strong> (<a class="reference internal" href="request-response.html#scrapy.http.Response" title="scrapy.http.Response"><code class="xref py py-class docutils literal notranslate"><span class="pre">Response</span></code></a>) -- 解析的响应</p>
</dd>
</dl>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.Spider.log">
<code class="sig-name descname">log</code><span class="sig-paren">(</span><em class="sig-param">message</em><span class="optional">[</span>, <em class="sig-param">level</em>, <em class="sig-param">component</em><span class="optional">]</span><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders.html#Spider.log"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Spider.log" title="永久链接至目标">¶</a></dt>
<dd><p>通过Spider的 <a class="reference internal" href="#scrapy.spiders.Spider.logger" title="scrapy.spiders.Spider.logger"><code class="xref py py-attr docutils literal notranslate"><span class="pre">logger</span></code></a> ，保持向后兼容性。有关详细信息，请参阅 <a class="reference internal" href="logging.html#topics-logging-from-spiders"><span class="std std-ref">从蜘蛛记录</span></a> .</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.Spider.closed">
<code class="sig-name descname">closed</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">reason</span></em><span class="sig-paren">)</span><a class="headerlink" href="#scrapy.spiders.Spider.closed" title="永久链接至目标">¶</a></dt>
<dd><p>蜘蛛关闭时调用。此方法为 <a class="reference internal" href="signals.html#std-signal-spider_closed"><code class="xref std std-signal docutils literal notranslate"><span class="pre">spider_closed</span></code></a> 信号。</p>
</dd></dl>

</dd></dl>

<p>我们来看一个例子：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>


<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span>
        <span class="s1">&#39;http://www.example.com/1.html&#39;</span><span class="p">,</span>
        <span class="s1">&#39;http://www.example.com/2.html&#39;</span><span class="p">,</span>
        <span class="s1">&#39;http://www.example.com/3.html&#39;</span><span class="p">,</span>
    <span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">logger</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;A response from </span><span class="si">%s</span><span class="s1"> just arrived!&#39;</span><span class="p">,</span> <span class="n">response</span><span class="o">.</span><span class="n">url</span><span class="p">)</span>
</pre></div>
</div>
<p>从单个回调返回多个请求和项目：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span>
        <span class="s1">&#39;http://www.example.com/1.html&#39;</span><span class="p">,</span>
        <span class="s1">&#39;http://www.example.com/2.html&#39;</span><span class="p">,</span>
        <span class="s1">&#39;http://www.example.com/3.html&#39;</span><span class="p">,</span>
    <span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">h3</span> <span class="ow">in</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//h3&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">getall</span><span class="p">():</span>
            <span class="k">yield</span> <span class="p">{</span><span class="s2">&quot;title&quot;</span><span class="p">:</span> <span class="n">h3</span><span class="p">}</span>

        <span class="k">for</span> <span class="n">href</span> <span class="ow">in</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//a/@href&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">getall</span><span class="p">():</span>
            <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="n">response</span><span class="o">.</span><span class="n">urljoin</span><span class="p">(</span><span class="n">href</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse</span><span class="p">)</span>
</pre></div>
</div>
<p>而不是 <a class="reference internal" href="#scrapy.spiders.Spider.start_urls" title="scrapy.spiders.Spider.start_urls"><code class="xref py py-attr docutils literal notranslate"><span class="pre">start_urls</span></code></a> 你可以用 <a class="reference internal" href="#scrapy.spiders.Spider.start_requests" title="scrapy.spiders.Spider.start_requests"><code class="xref py py-meth docutils literal notranslate"><span class="pre">start_requests()</span></code></a> 直接；给数据更多的结构，你可以使用 <a class="reference internal" href="items.html#scrapy.item.Item" title="scrapy.item.Item"><code class="xref py py-class docutils literal notranslate"><span class="pre">Item</span></code></a> 物体：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>
<span class="kn">from</span> <span class="nn">myproject.items</span> <span class="kn">import</span> <span class="n">MyItem</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">start_requests</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="s1">&#39;http://www.example.com/1.html&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse</span><span class="p">)</span>
        <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="s1">&#39;http://www.example.com/2.html&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse</span><span class="p">)</span>
        <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="s1">&#39;http://www.example.com/3.html&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">h3</span> <span class="ow">in</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//h3&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">getall</span><span class="p">():</span>
            <span class="k">yield</span> <span class="n">MyItem</span><span class="p">(</span><span class="n">title</span><span class="o">=</span><span class="n">h3</span><span class="p">)</span>

        <span class="k">for</span> <span class="n">href</span> <span class="ow">in</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//a/@href&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">getall</span><span class="p">():</span>
            <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="n">response</span><span class="o">.</span><span class="n">urljoin</span><span class="p">(</span><span class="n">href</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="section" id="spider-arguments">
<span id="spiderargs"></span><h2>蜘蛛论点<a class="headerlink" href="#spider-arguments" title="永久链接至标题">¶</a></h2>
<p>蜘蛛可以接受改变其行为的论据。spider参数的一些常见用途是定义起始URL或将爬行限制在站点的某些部分，但它们可以用于配置spider的任何功能。</p>
<p>蜘蛛参数通过 <a class="reference internal" href="commands.html#std-command-crawl"><code class="xref std std-command docutils literal notranslate"><span class="pre">crawl</span></code></a> 命令使用 <code class="docutils literal notranslate"><span class="pre">-a</span></code> 选项。例如：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">scrapy</span> <span class="n">crawl</span> <span class="n">myspider</span> <span class="o">-</span><span class="n">a</span> <span class="n">category</span><span class="o">=</span><span class="n">electronics</span>
</pre></div>
</div>
<p>蜘蛛可以在它们的 <cite>__init__</cite> 方法：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;myspider&#39;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">category</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MySpider</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span><span class="sa">f</span><span class="s1">&#39;http://www.example.com/categories/</span><span class="si">{</span><span class="n">category</span><span class="si">}</span><span class="s1">&#39;</span><span class="p">]</span>
        <span class="c1"># ...</span>
</pre></div>
</div>
<p>默认值 <cite>__init__</cite> 方法将获取任何spider参数，并将其作为属性复制到spider。上面的例子也可以写如下：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Spider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;myspider&#39;</span>

    <span class="k">def</span> <span class="nf">start_requests</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">yield</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="sa">f</span><span class="s1">&#39;http://www.example.com/categories/</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">category</span><span class="si">}</span><span class="s1">&#39;</span><span class="p">)</span>
</pre></div>
</div>
<p>请记住，spider参数只是字符串。蜘蛛本身不会进行任何解析。如果你要设置 <code class="docutils literal notranslate"><span class="pre">start_urls</span></code> 属性来自命令行，您必须使用类似的 <a class="reference external" href="https://docs.python.org/3/library/ast.html#ast.literal_eval" title="(在 Python v3.9)"><code class="xref py py-func docutils literal notranslate"><span class="pre">ast.literal_eval()</span></code></a> 或 <a class="reference external" href="https://docs.python.org/3/library/json.html#json.loads" title="(在 Python v3.9)"><code class="xref py py-func docutils literal notranslate"><span class="pre">json.loads()</span></code></a> 然后将其设置为属性。否则，您将在 <code class="docutils literal notranslate"><span class="pre">start_urls</span></code> 字符串（一个非常常见的Python陷阱），导致每个字符被视为一个单独的URL。</p>
<p>有效的用例是设置 <a class="reference internal" href="downloader-middleware.html#scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware" title="scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware"><code class="xref py py-class docutils literal notranslate"><span class="pre">HttpAuthMiddleware</span></code></a> 或用户代理 <a class="reference internal" href="downloader-middleware.html#scrapy.downloadermiddlewares.useragent.UserAgentMiddleware" title="scrapy.downloadermiddlewares.useragent.UserAgentMiddleware"><code class="xref py py-class docutils literal notranslate"><span class="pre">UserAgentMiddleware</span></code></a> ：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">scrapy</span> <span class="n">crawl</span> <span class="n">myspider</span> <span class="o">-</span><span class="n">a</span> <span class="n">http_user</span><span class="o">=</span><span class="n">myuser</span> <span class="o">-</span><span class="n">a</span> <span class="n">http_pass</span><span class="o">=</span><span class="n">mypassword</span> <span class="o">-</span><span class="n">a</span> <span class="n">user_agent</span><span class="o">=</span><span class="n">mybot</span>
</pre></div>
</div>
<p>蜘蛛参数也可以通过scrapyD传递 <code class="docutils literal notranslate"><span class="pre">schedule.json</span></code> 应用程序编程接口。见 <a class="reference external" href="https://scrapyd.readthedocs.io/en/latest/">Scrapyd documentation</a> .</p>
</div>
<div class="section" id="generic-spiders">
<span id="builtin-spiders"></span><h2>类蜘蛛<a class="headerlink" href="#generic-spiders" title="永久链接至标题">¶</a></h2>
<p>Scrapy附带了一些有用的通用蜘蛛，您可以使用它们来对蜘蛛进行子类化。他们的目标是为一些常见的抓取案例提供方便的功能，比如根据特定规则跟踪站点上的所有链接，从 <a class="reference external" href="https://www.sitemaps.org/index.html">Sitemaps</a> 或分析XML/CSV源。</p>
<p>对于以下蜘蛛中使用的示例，我们假设您有一个项目 <code class="docutils literal notranslate"><span class="pre">TestItem</span></code> 宣布为 <code class="docutils literal notranslate"><span class="pre">myproject.items</span></code> 模块：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>

<span class="k">class</span> <span class="nc">TestItem</span><span class="p">(</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Item</span><span class="p">):</span>
    <span class="nb">id</span> <span class="o">=</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Field</span><span class="p">()</span>
    <span class="n">name</span> <span class="o">=</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Field</span><span class="p">()</span>
    <span class="n">description</span> <span class="o">=</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Field</span><span class="p">()</span>
</pre></div>
</div>
<div class="section" id="crawlspider">
<h3>CrawlSpider<a class="headerlink" href="#crawlspider" title="永久链接至标题">¶</a></h3>
<dl class="py class">
<dt id="scrapy.spiders.CrawlSpider">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">CrawlSpider</code><a class="reference internal" href="../_modules/scrapy/spiders/crawl.html#CrawlSpider"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.CrawlSpider" title="永久链接至目标">¶</a></dt>
<dd><p>这是最常用的爬行常规网站的蜘蛛，因为它通过定义一组规则为跟踪链接提供了一种方便的机制。它可能不是最适合您的特定网站或项目的，但它对于某些情况来说已经足够通用了，因此您可以从它开始，并根据需要覆盖它以获得更多的自定义功能，或者只实现您自己的蜘蛛。</p>
<p>除了从spider继承的属性（必须指定），这个类还支持一个新的属性：</p>
<dl class="py attribute">
<dt id="scrapy.spiders.CrawlSpider.rules">
<code class="sig-name descname">rules</code><a class="headerlink" href="#scrapy.spiders.CrawlSpider.rules" title="永久链接至目标">¶</a></dt>
<dd><p>这是一个（或多个）列表 <a class="reference internal" href="#scrapy.spiders.Rule" title="scrapy.spiders.Rule"><code class="xref py py-class docutils literal notranslate"><span class="pre">Rule</span></code></a> 物体。各 <a class="reference internal" href="#scrapy.spiders.Rule" title="scrapy.spiders.Rule"><code class="xref py py-class docutils literal notranslate"><span class="pre">Rule</span></code></a> 定义对网站进行爬行的特定行为。规则对象如下所述。如果多个规则与同一链接匹配，则将根据在该属性中定义的顺序使用第一个规则。</p>
</dd></dl>

<p>这个蜘蛛还公开了一个可重写的方法：</p>
<dl class="py method">
<dt id="scrapy.spiders.CrawlSpider.parse_start_url">
<code class="sig-name descname">parse_start_url</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em>, <em class="sig-param"><span class="o">**</span><span class="n">kwargs</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/crawl.html#CrawlSpider.parse_start_url"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.CrawlSpider.parse_start_url" title="永久链接至目标">¶</a></dt>
<dd><p>为spider中的url生成的每个响应调用此方法 <code class="docutils literal notranslate"><span class="pre">start_urls</span></code> 属性。它允许解析初始响应，并且必须返回 <a class="reference internal" href="items.html#topics-items"><span class="std std-ref">item object</span></a> ，A <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对象，或包含任何对象的iterable。</p>
</dd></dl>

</dd></dl>

<div class="section" id="crawling-rules">
<h4>爬行规则<a class="headerlink" href="#crawling-rules" title="永久链接至标题">¶</a></h4>
<dl class="py class">
<dt id="scrapy.spiders.Rule">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">Rule</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">link_extractor</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">callback</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">cb_kwargs</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">follow</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">process_links</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">process_request</span><span class="o">=</span><span class="default_value">None</span></em>, <em class="sig-param"><span class="n">errback</span><span class="o">=</span><span class="default_value">None</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/crawl.html#Rule"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.Rule" title="永久链接至目标">¶</a></dt>
<dd><p><code class="docutils literal notranslate"><span class="pre">link_extractor</span></code> 是一个 <a class="reference internal" href="link-extractors.html#topics-link-extractors"><span class="std std-ref">Link Extractor</span></a> 对象，该对象定义如何从每个已爬网页提取链接。每个生成的链接将用于生成 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对象，其中包含链接的文本 <code class="docutils literal notranslate"><span class="pre">meta</span></code> 字典（在 <code class="docutils literal notranslate"><span class="pre">link_text</span></code> 键）。如果省略，将使用没有参数创建的默认链接提取器，从而导致提取所有链接。</p>
<p><code class="docutils literal notranslate"><span class="pre">callback</span></code> 对用指定的链接提取程序提取的每个链接调用的可调用或字符串（在这种情况下，将使用具有该名称的spider对象中的方法）。此回调接收 <a class="reference internal" href="request-response.html#scrapy.http.Response" title="scrapy.http.Response"><code class="xref py py-class docutils literal notranslate"><span class="pre">Response</span></code></a> 作为第一个参数，必须返回 <a class="reference internal" href="items.html#topics-items"><span class="std std-ref">item objects</span></a> 和/或 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对象（或其任何子类）。如上所述，收到 <a class="reference internal" href="request-response.html#scrapy.http.Response" title="scrapy.http.Response"><code class="xref py py-class docutils literal notranslate"><span class="pre">Response</span></code></a> 对象将包含生成 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 在其 <code class="docutils literal notranslate"><span class="pre">meta</span></code> 字典（在 <code class="docutils literal notranslate"><span class="pre">link_text</span></code> 关键）</p>
<p><code class="docutils literal notranslate"><span class="pre">cb_kwargs</span></code> 是包含要传递给回调函数的关键字参数的dict。</p>
<p><code class="docutils literal notranslate"><span class="pre">follow</span></code> 是一个布尔值，用于指定是否从使用此规则提取的每个响应中遵循链接。如果 <code class="docutils literal notranslate"><span class="pre">callback</span></code> 没有 <code class="docutils literal notranslate"><span class="pre">follow</span></code> 默认为 <code class="docutils literal notranslate"><span class="pre">True</span></code> ，否则默认为 <code class="docutils literal notranslate"><span class="pre">False</span></code> .</p>
<p><code class="docutils literal notranslate"><span class="pre">process_links</span></code> 是一个可调用的，或一个字符串（在这种情况下，将使用具有该名称的蜘蛛对象中的方法），对于使用指定的 <code class="docutils literal notranslate"><span class="pre">link_extractor</span></code> . 这主要用于过滤目的。</p>
<p><code class="docutils literal notranslate"><span class="pre">process_request</span></code> 是可调用的（或字符串，在这种情况下，将使用具有该名称的spider对象中的方法），它将为 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 按此规则提取。此可调用文件应将所述请求作为第一个参数，并且 <a class="reference internal" href="request-response.html#scrapy.http.Response" title="scrapy.http.Response"><code class="xref py py-class docutils literal notranslate"><span class="pre">Response</span></code></a> 从中发出请求作为第二个参数。它必须返回 <code class="docutils literal notranslate"><span class="pre">Request</span></code> 对象或 <code class="docutils literal notranslate"><span class="pre">None</span></code> （过滤掉请求）。</p>
<p><code class="docutils literal notranslate"><span class="pre">errback</span></code> 在处理规则生成的请求时引发任何异常时要调用的可调用或字符串（在这种情况下，将使用来自spider对象的具有该名称的方法）。它收到一个 <a class="reference external" href="https://twistedmatrix.com/documents/current/api/twisted.python.failure.Failure.html" title="(在 Twisted v2.0)"><code class="xref py py-class docutils literal notranslate"><span class="pre">Twisted</span> <span class="pre">Failure</span></code></a> 实例作为第一个参数。</p>
</dd></dl>

<div class="admonition warning">
<p class="admonition-title">警告</p>
<p>由于其内部实现，在编写时必须显式设置新请求的回调 <a class="reference internal" href="#scrapy.spiders.CrawlSpider" title="scrapy.spiders.CrawlSpider"><code class="xref py py-class docutils literal notranslate"><span class="pre">CrawlSpider</span></code></a> -基于蜘蛛；否则会发生意外行为。</p>
<div class="versionadded">
<p><span class="versionmodified added">2.0 新版功能: </span>这个 <em>错误</em> 参数。</p>
</div>
</div>
</div>
<div class="section" id="crawlspider-example">
<h4>爬行蜘蛛示例<a class="headerlink" href="#crawlspider-example" title="永久链接至标题">¶</a></h4>
<p>现在让我们来看一个例子，爬行蜘蛛的规则是：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">scrapy</span>
<span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">CrawlSpider</span><span class="p">,</span> <span class="n">Rule</span>
<span class="kn">from</span> <span class="nn">scrapy.linkextractors</span> <span class="kn">import</span> <span class="n">LinkExtractor</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">CrawlSpider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com&#39;</span><span class="p">]</span>

    <span class="n">rules</span> <span class="o">=</span> <span class="p">(</span>
        <span class="c1"># Extract links matching &#39;category.php&#39; (but not matching &#39;subsection.php&#39;)</span>
        <span class="c1"># and follow links from them (since no callback means follow=True by default).</span>
        <span class="n">Rule</span><span class="p">(</span><span class="n">LinkExtractor</span><span class="p">(</span><span class="n">allow</span><span class="o">=</span><span class="p">(</span><span class="s1">&#39;category\.php&#39;</span><span class="p">,</span> <span class="p">),</span> <span class="n">deny</span><span class="o">=</span><span class="p">(</span><span class="s1">&#39;subsection\.php&#39;</span><span class="p">,</span> <span class="p">))),</span>

        <span class="c1"># Extract links matching &#39;item.php&#39; and parse them with the spider&#39;s method parse_item</span>
        <span class="n">Rule</span><span class="p">(</span><span class="n">LinkExtractor</span><span class="p">(</span><span class="n">allow</span><span class="o">=</span><span class="p">(</span><span class="s1">&#39;item\.php&#39;</span><span class="p">,</span> <span class="p">)),</span> <span class="n">callback</span><span class="o">=</span><span class="s1">&#39;parse_item&#39;</span><span class="p">),</span>
    <span class="p">)</span>

    <span class="k">def</span> <span class="nf">parse_item</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">logger</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;Hi, this is an item page! </span><span class="si">%s</span><span class="s1">&#39;</span><span class="p">,</span> <span class="n">response</span><span class="o">.</span><span class="n">url</span><span class="p">)</span>
        <span class="n">item</span> <span class="o">=</span> <span class="n">scrapy</span><span class="o">.</span><span class="n">Item</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;id&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//td[@id=&quot;item_id&quot;]/text()&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">re</span><span class="p">(</span><span class="sa">r</span><span class="s1">&#39;ID: (\d+)&#39;</span><span class="p">)</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//td[@id=&quot;item_name&quot;]/text()&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;description&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//td[@id=&quot;item_description&quot;]/text()&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;link_text&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">meta</span><span class="p">[</span><span class="s1">&#39;link_text&#39;</span><span class="p">]</span>
        <span class="n">url</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//td[@id=&quot;additional_data&quot;]/@href&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="k">return</span> <span class="n">response</span><span class="o">.</span><span class="n">follow</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse_additional_page</span><span class="p">,</span> <span class="n">cb_kwargs</span><span class="o">=</span><span class="nb">dict</span><span class="p">(</span><span class="n">item</span><span class="o">=</span><span class="n">item</span><span class="p">))</span>

    <span class="k">def</span> <span class="nf">parse_additional_page</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">,</span> <span class="n">item</span><span class="p">):</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;additional_data&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;//p[@id=&quot;additional_data&quot;]/text()&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="k">return</span> <span class="n">item</span>
</pre></div>
</div>
<p>这个蜘蛛会开始对example.com的主页进行爬行，收集类别链接和项目链接，并用 <code class="docutils literal notranslate"><span class="pre">parse_item</span></code> 方法。对于每个项目响应，将使用xpath从HTML中提取一些数据，并且 <a class="reference internal" href="items.html#scrapy.item.Item" title="scrapy.item.Item"><code class="xref py py-class docutils literal notranslate"><span class="pre">Item</span></code></a> 会装满它的。</p>
</div>
</div>
<div class="section" id="xmlfeedspider">
<h3>XMLFeedSpider<a class="headerlink" href="#xmlfeedspider" title="永久链接至标题">¶</a></h3>
<dl class="py class">
<dt id="scrapy.spiders.XMLFeedSpider">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">XMLFeedSpider</code><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#XMLFeedSpider"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider" title="永久链接至目标">¶</a></dt>
<dd><p>XMLFeedSpider是为解析XML提要而设计的，它通过使用特定的节点名对这些提要进行迭代。迭代器可以从以下选项中选择： <code class="docutils literal notranslate"><span class="pre">iternodes</span></code> ， <code class="docutils literal notranslate"><span class="pre">xml</span></code> 和 <code class="docutils literal notranslate"><span class="pre">html</span></code> .  建议使用 <code class="docutils literal notranslate"><span class="pre">iternodes</span></code> 由于性能原因，迭代器 <code class="docutils literal notranslate"><span class="pre">xml</span></code> 和 <code class="docutils literal notranslate"><span class="pre">html</span></code> 迭代器一次生成整个DOM以便解析它。然而，使用 <code class="docutils literal notranslate"><span class="pre">html</span></code> 因为迭代器在分析带有错误标记的XML时可能很有用。</p>
<p>要设置迭代器和标记名，必须定义以下类属性：</p>
<dl class="py attribute">
<dt id="scrapy.spiders.XMLFeedSpider.iterator">
<code class="sig-name descname">iterator</code><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.iterator" title="永久链接至目标">¶</a></dt>
<dd><p>定义要使用的迭代器的字符串。它可以是：</p>
<blockquote>
<div><ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">'iternodes'</span></code> -基于正则表达式的快速迭代器</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">'html'</span></code> -使用的迭代器 <a class="reference internal" href="selectors.html#scrapy.selector.Selector" title="scrapy.selector.Selector"><code class="xref py py-class docutils literal notranslate"><span class="pre">Selector</span></code></a> . 请记住，这使用了DOM解析，必须将所有的DOM加载到内存中，这对于大型提要来说可能是一个问题。</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">'xml'</span></code> -使用的迭代器 <a class="reference internal" href="selectors.html#scrapy.selector.Selector" title="scrapy.selector.Selector"><code class="xref py py-class docutils literal notranslate"><span class="pre">Selector</span></code></a> . 请记住，这使用了DOM解析，必须将所有的DOM加载到内存中，这对于大型提要来说可能是一个问题。</p></li>
</ul>
</div></blockquote>
<p>默认为： <code class="docutils literal notranslate"><span class="pre">'iternodes'</span></code> .</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.XMLFeedSpider.itertag">
<code class="sig-name descname">itertag</code><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.itertag" title="永久链接至目标">¶</a></dt>
<dd><p>具有要迭代的节点（或元素）名称的字符串。例子：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">itertag</span> <span class="o">=</span> <span class="s1">&#39;product&#39;</span>
</pre></div>
</div>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.XMLFeedSpider.namespaces">
<code class="sig-name descname">namespaces</code><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.namespaces" title="永久链接至目标">¶</a></dt>
<dd><p>列表 <code class="docutils literal notranslate"><span class="pre">(prefix,</span> <span class="pre">uri)</span></code> 定义该文档中可用的命名空间的元组，这些命名空间将使用此蜘蛛进行处理。这个 <code class="docutils literal notranslate"><span class="pre">prefix</span></code> 和 <code class="docutils literal notranslate"><span class="pre">uri</span></code> 将用于使用 <a class="reference internal" href="selectors.html#scrapy.selector.Selector.register_namespace" title="scrapy.selector.Selector.register_namespace"><code class="xref py py-meth docutils literal notranslate"><span class="pre">register_namespace()</span></code></a> 方法。</p>
<p>然后，可以在 <a class="reference internal" href="#scrapy.spiders.XMLFeedSpider.itertag" title="scrapy.spiders.XMLFeedSpider.itertag"><code class="xref py py-attr docutils literal notranslate"><span class="pre">itertag</span></code></a> 属性。</p>
<p>例子：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">YourSpider</span><span class="p">(</span><span class="n">XMLFeedSpider</span><span class="p">):</span>

    <span class="n">namespaces</span> <span class="o">=</span> <span class="p">[(</span><span class="s1">&#39;n&#39;</span><span class="p">,</span> <span class="s1">&#39;http://www.sitemaps.org/schemas/sitemap/0.9&#39;</span><span class="p">)]</span>
    <span class="n">itertag</span> <span class="o">=</span> <span class="s1">&#39;n:url&#39;</span>
    <span class="c1"># ...</span>
</pre></div>
</div>
</dd></dl>

<p>除了这些新属性之外，这个蜘蛛还具有以下可重写的方法：</p>
<dl class="py method">
<dt id="scrapy.spiders.XMLFeedSpider.adapt_response">
<code class="sig-name descname">adapt_response</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#XMLFeedSpider.adapt_response"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.adapt_response" title="永久链接至目标">¶</a></dt>
<dd><p>一种方法，当响应从蜘蛛中间件到达时，在蜘蛛开始解析它之前，立即接收响应。它可以用于在解析响应体之前对其进行修改。此方法接收响应并返回响应（可以是相同的或另一个响应）。</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.XMLFeedSpider.parse_node">
<code class="sig-name descname">parse_node</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em>, <em class="sig-param"><span class="n">selector</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#XMLFeedSpider.parse_node"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.parse_node" title="永久链接至目标">¶</a></dt>
<dd><p>对与提供的标记名匹配的节点调用此方法 (<code class="docutils literal notranslate"><span class="pre">itertag</span></code> ）接收响应和 <a class="reference internal" href="selectors.html#scrapy.selector.Selector" title="scrapy.selector.Selector"><code class="xref py py-class docutils literal notranslate"><span class="pre">Selector</span></code></a> 对于每个节点。重写此方法是必需的。否则，你的蜘蛛就不能工作了。此方法必须返回 <a class="reference internal" href="items.html#topics-items"><span class="std std-ref">item object</span></a> ，A <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 对象，或包含任何对象的iterable。</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.XMLFeedSpider.process_results">
<code class="sig-name descname">process_results</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em>, <em class="sig-param"><span class="n">results</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#XMLFeedSpider.process_results"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.XMLFeedSpider.process_results" title="永久链接至目标">¶</a></dt>
<dd><p>这个方法是为spider返回的每个结果（项或请求）调用的，它用于在将结果返回到框架核心之前执行所需的任何最后一次处理，例如设置项id。它接收结果列表和产生这些结果的响应。它必须返回结果列表（项或请求）。</p>
</dd></dl>

</dd></dl>

<div class="admonition warning">
<p class="admonition-title">警告</p>
<p>由于其内部实现，在编写时必须显式设置新请求的回调 <a class="reference internal" href="#scrapy.spiders.XMLFeedSpider" title="scrapy.spiders.XMLFeedSpider"><code class="xref py py-class docutils literal notranslate"><span class="pre">XMLFeedSpider</span></code></a> -基于蜘蛛；否则会发生意外行为。</p>
</div>
<div class="section" id="xmlfeedspider-example">
<h4>XmlFeedSpider示例<a class="headerlink" href="#xmlfeedspider-example" title="永久链接至标题">¶</a></h4>
<p>这些蜘蛛很容易使用，让我们来看一个例子：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">XMLFeedSpider</span>
<span class="kn">from</span> <span class="nn">myproject.items</span> <span class="kn">import</span> <span class="n">TestItem</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">XMLFeedSpider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/feed.xml&#39;</span><span class="p">]</span>
    <span class="n">iterator</span> <span class="o">=</span> <span class="s1">&#39;iternodes&#39;</span>  <span class="c1"># This is actually unnecessary, since it&#39;s the default value</span>
    <span class="n">itertag</span> <span class="o">=</span> <span class="s1">&#39;item&#39;</span>

    <span class="k">def</span> <span class="nf">parse_node</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">logger</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;Hi, this is a &lt;</span><span class="si">%s</span><span class="s1">&gt; node!: </span><span class="si">%s</span><span class="s1">&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">itertag</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">node</span><span class="o">.</span><span class="n">getall</span><span class="p">()))</span>

        <span class="n">item</span> <span class="o">=</span> <span class="n">TestItem</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;id&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">node</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;@id&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">node</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;name&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;description&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">node</span><span class="o">.</span><span class="n">xpath</span><span class="p">(</span><span class="s1">&#39;description&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
        <span class="k">return</span> <span class="n">item</span>
</pre></div>
</div>
<p>基本上，我们所做的就是创建一个蜘蛛，从给定的 <code class="docutils literal notranslate"><span class="pre">start_urls</span></code> ，然后遍历 <code class="docutils literal notranslate"><span class="pre">item</span></code> 标签，打印出来，并将一些随机数据存储在 <a class="reference internal" href="items.html#scrapy.item.Item" title="scrapy.item.Item"><code class="xref py py-class docutils literal notranslate"><span class="pre">Item</span></code></a> .</p>
</div>
</div>
<div class="section" id="csvfeedspider">
<h3>CSVFeedSpider<a class="headerlink" href="#csvfeedspider" title="永久链接至标题">¶</a></h3>
<dl class="py class">
<dt id="scrapy.spiders.CSVFeedSpider">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">CSVFeedSpider</code><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#CSVFeedSpider"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.CSVFeedSpider" title="永久链接至目标">¶</a></dt>
<dd><p>这个spider与xmlFeedSpider非常相似，只是它迭代行，而不是节点。在每次迭代中被调用的方法是 <a class="reference internal" href="#scrapy.spiders.CSVFeedSpider.parse_row" title="scrapy.spiders.CSVFeedSpider.parse_row"><code class="xref py py-meth docutils literal notranslate"><span class="pre">parse_row()</span></code></a> .</p>
<dl class="py attribute">
<dt id="scrapy.spiders.CSVFeedSpider.delimiter">
<code class="sig-name descname">delimiter</code><a class="headerlink" href="#scrapy.spiders.CSVFeedSpider.delimiter" title="永久链接至目标">¶</a></dt>
<dd><p>带有csv文件中每个字段分隔符的字符串默认为 <code class="docutils literal notranslate"><span class="pre">','</span></code> （逗号）</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.CSVFeedSpider.quotechar">
<code class="sig-name descname">quotechar</code><a class="headerlink" href="#scrapy.spiders.CSVFeedSpider.quotechar" title="永久链接至目标">¶</a></dt>
<dd><p>带有csv文件中每个字段的外壳字符的字符串默认为 <code class="docutils literal notranslate"><span class="pre">'&quot;'</span></code> （引号）。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.CSVFeedSpider.headers">
<code class="sig-name descname">headers</code><a class="headerlink" href="#scrapy.spiders.CSVFeedSpider.headers" title="永久链接至目标">¶</a></dt>
<dd><p>csv文件中的列名列表。</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.CSVFeedSpider.parse_row">
<code class="sig-name descname">parse_row</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">response</span></em>, <em class="sig-param"><span class="n">row</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/feed.html#CSVFeedSpider.parse_row"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.CSVFeedSpider.parse_row" title="永久链接至目标">¶</a></dt>
<dd><p>接收响应和dict（代表每一行），其中为csv文件的每个提供的（或检测到的）头文件都有一个键。这个蜘蛛还提供了超越的机会 <code class="docutils literal notranslate"><span class="pre">adapt_response</span></code> 和 <code class="docutils literal notranslate"><span class="pre">process_results</span></code> 用于预处理和后处理目的的方法。</p>
</dd></dl>

</dd></dl>

<div class="section" id="csvfeedspider-example">
<h4>CSVFeedspider示例<a class="headerlink" href="#csvfeedspider-example" title="永久链接至标题">¶</a></h4>
<p>我们来看一个类似于前一个的例子，但是使用 <a class="reference internal" href="#scrapy.spiders.CSVFeedSpider" title="scrapy.spiders.CSVFeedSpider"><code class="xref py py-class docutils literal notranslate"><span class="pre">CSVFeedSpider</span></code></a> ：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">CSVFeedSpider</span>
<span class="kn">from</span> <span class="nn">myproject.items</span> <span class="kn">import</span> <span class="n">TestItem</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">CSVFeedSpider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;example.com&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">start_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/feed.csv&#39;</span><span class="p">]</span>
    <span class="n">delimiter</span> <span class="o">=</span> <span class="s1">&#39;;&#39;</span>
    <span class="n">quotechar</span> <span class="o">=</span> <span class="s2">&quot;&#39;&quot;</span>
    <span class="n">headers</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;id&#39;</span><span class="p">,</span> <span class="s1">&#39;name&#39;</span><span class="p">,</span> <span class="s1">&#39;description&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse_row</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">,</span> <span class="n">row</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">logger</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s1">&#39;Hi, this is a row!: </span><span class="si">%r</span><span class="s1">&#39;</span><span class="p">,</span> <span class="n">row</span><span class="p">)</span>

        <span class="n">item</span> <span class="o">=</span> <span class="n">TestItem</span><span class="p">()</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;id&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">row</span><span class="p">[</span><span class="s1">&#39;id&#39;</span><span class="p">]</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">row</span><span class="p">[</span><span class="s1">&#39;name&#39;</span><span class="p">]</span>
        <span class="n">item</span><span class="p">[</span><span class="s1">&#39;description&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">row</span><span class="p">[</span><span class="s1">&#39;description&#39;</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">item</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="sitemapspider">
<h3>SitemapSpider<a class="headerlink" href="#sitemapspider" title="永久链接至标题">¶</a></h3>
<dl class="py class">
<dt id="scrapy.spiders.SitemapSpider">
<em class="property">class </em><code class="sig-prename descclassname">scrapy.spiders.</code><code class="sig-name descname">SitemapSpider</code><a class="reference internal" href="../_modules/scrapy/spiders/sitemap.html#SitemapSpider"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.SitemapSpider" title="永久链接至目标">¶</a></dt>
<dd><p>SiteMapSpider允许您通过使用 <a class="reference external" href="https://www.sitemaps.org/index.html">Sitemaps</a> .</p>
<p>它支持嵌套的站点地图和从中发现站点地图URL <a class="reference external" href="https://www.robotstxt.org/">robots.txt</a> .</p>
<dl class="py attribute">
<dt id="scrapy.spiders.SitemapSpider.sitemap_urls">
<code class="sig-name descname">sitemap_urls</code><a class="headerlink" href="#scrapy.spiders.SitemapSpider.sitemap_urls" title="永久链接至目标">¶</a></dt>
<dd><p>指向要爬网其URL的网站地图的URL列表。</p>
<p>您也可以指向 <a class="reference external" href="https://www.robotstxt.org/">robots.txt</a> 它将被解析为从中提取站点地图URL。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.SitemapSpider.sitemap_rules">
<code class="sig-name descname">sitemap_rules</code><a class="headerlink" href="#scrapy.spiders.SitemapSpider.sitemap_rules" title="永久链接至目标">¶</a></dt>
<dd><p>元组列表 <code class="docutils literal notranslate"><span class="pre">(regex,</span> <span class="pre">callback)</span></code> 在哪里？</p>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">regex</span></code> 是一个正则表达式，用于匹配从站点地图中提取的URL。 <code class="docutils literal notranslate"><span class="pre">regex</span></code> 可以是str或已编译的regex对象。</p></li>
<li><p>回调是用于处理与正则表达式匹配的URL的回调。 <code class="docutils literal notranslate"><span class="pre">callback</span></code> 可以是字符串（指示spider方法的名称）或可调用的。</p></li>
</ul>
<p>例如：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">sitemap_rules</span> <span class="o">=</span> <span class="p">[(</span><span class="s1">&#39;/product/&#39;</span><span class="p">,</span> <span class="s1">&#39;parse_product&#39;</span><span class="p">)]</span>
</pre></div>
</div>
<p>规则按顺序应用，只使用第一个匹配的规则。</p>
<p>如果省略此属性，则在站点地图中找到的所有URL都将使用 <code class="docutils literal notranslate"><span class="pre">parse</span></code> 回调。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.SitemapSpider.sitemap_follow">
<code class="sig-name descname">sitemap_follow</code><a class="headerlink" href="#scrapy.spiders.SitemapSpider.sitemap_follow" title="永久链接至目标">¶</a></dt>
<dd><p>应遵循的站点地图正则表达式列表。这只适用于使用 <a class="reference external" href="https://www.sitemaps.org/protocol.html#index">Sitemap index files</a> 指向其他站点 Mapfile 。</p>
<p>默认情况下，将遵循所有站点地图。</p>
</dd></dl>

<dl class="py attribute">
<dt id="scrapy.spiders.SitemapSpider.sitemap_alternate_links">
<code class="sig-name descname">sitemap_alternate_links</code><a class="headerlink" href="#scrapy.spiders.SitemapSpider.sitemap_alternate_links" title="永久链接至目标">¶</a></dt>
<dd><p>指定是否为一个 <code class="docutils literal notranslate"><span class="pre">url</span></code> 应该遵循。这些是同一网站的链接，使用同一网站内传递的另一种语言 <code class="docutils literal notranslate"><span class="pre">url</span></code> 块。</p>
<p>例如：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o">&lt;</span><span class="n">url</span><span class="o">&gt;</span>
    <span class="o">&lt;</span><span class="n">loc</span><span class="o">&gt;</span><span class="n">http</span><span class="p">:</span><span class="o">//</span><span class="n">example</span><span class="o">.</span><span class="n">com</span><span class="o">/&lt;/</span><span class="n">loc</span><span class="o">&gt;</span>
    <span class="o">&lt;</span><span class="n">xhtml</span><span class="p">:</span><span class="n">link</span> <span class="n">rel</span><span class="o">=</span><span class="s2">&quot;alternate&quot;</span> <span class="n">hreflang</span><span class="o">=</span><span class="s2">&quot;de&quot;</span> <span class="n">href</span><span class="o">=</span><span class="s2">&quot;http://example.com/de&quot;</span><span class="o">/&gt;</span>
<span class="o">&lt;/</span><span class="n">url</span><span class="o">&gt;</span>
</pre></div>
</div>
<p>用 <code class="docutils literal notranslate"><span class="pre">sitemap_alternate_links</span></code> 设置，这将检索两个URL。用 <code class="docutils literal notranslate"><span class="pre">sitemap_alternate_links</span></code> 只有残疾人 <code class="docutils literal notranslate"><span class="pre">http://example.com/</span></code> 将被取回。</p>
<p><code class="docutils literal notranslate"><span class="pre">sitemap_alternate_links</span></code> 残疾人。</p>
</dd></dl>

<dl class="py method">
<dt id="scrapy.spiders.SitemapSpider.sitemap_filter">
<code class="sig-name descname">sitemap_filter</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">entries</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/scrapy/spiders/sitemap.html#SitemapSpider.sitemap_filter"><span class="viewcode-link">[源代码]</span></a><a class="headerlink" href="#scrapy.spiders.SitemapSpider.sitemap_filter" title="永久链接至目标">¶</a></dt>
<dd><p>这是一个过滤器函数，可以重写该函数以根据其属性选择站点地图条目。</p>
<p>例如：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o">&lt;</span><span class="n">url</span><span class="o">&gt;</span>
    <span class="o">&lt;</span><span class="n">loc</span><span class="o">&gt;</span><span class="n">http</span><span class="p">:</span><span class="o">//</span><span class="n">example</span><span class="o">.</span><span class="n">com</span><span class="o">/&lt;/</span><span class="n">loc</span><span class="o">&gt;</span>
    <span class="o">&lt;</span><span class="n">lastmod</span><span class="o">&gt;</span><span class="mi">2005</span><span class="o">-</span><span class="mi">01</span><span class="o">-</span><span class="mi">01</span><span class="o">&lt;/</span><span class="n">lastmod</span><span class="o">&gt;</span>
<span class="o">&lt;/</span><span class="n">url</span><span class="o">&gt;</span>
</pre></div>
</div>
<p>我们可以定义一个 <code class="docutils literal notranslate"><span class="pre">sitemap_filter</span></code> 要筛选的函数 <code class="docutils literal notranslate"><span class="pre">entries</span></code> 日期：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">datetime</span> <span class="kn">import</span> <span class="n">datetime</span>
<span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">SitemapSpider</span>

<span class="k">class</span> <span class="nc">FilteredSitemapSpider</span><span class="p">(</span><span class="n">SitemapSpider</span><span class="p">):</span>
    <span class="n">name</span> <span class="o">=</span> <span class="s1">&#39;filtered_sitemap_spider&#39;</span>
    <span class="n">allowed_domains</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;example.com&#39;</span><span class="p">]</span>
    <span class="n">sitemap_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://example.com/sitemap.xml&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">sitemap_filter</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">entries</span><span class="p">):</span>
        <span class="k">for</span> <span class="n">entry</span> <span class="ow">in</span> <span class="n">entries</span><span class="p">:</span>
            <span class="n">date_time</span> <span class="o">=</span> <span class="n">datetime</span><span class="o">.</span><span class="n">strptime</span><span class="p">(</span><span class="n">entry</span><span class="p">[</span><span class="s1">&#39;lastmod&#39;</span><span class="p">],</span> <span class="s1">&#39;%Y-%m-</span><span class="si">%d</span><span class="s1">&#39;</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">date_time</span><span class="o">.</span><span class="n">year</span> <span class="o">&gt;=</span> <span class="mi">2005</span><span class="p">:</span>
                <span class="k">yield</span> <span class="n">entry</span>
</pre></div>
</div>
<p>这只能找回 <code class="docutils literal notranslate"><span class="pre">entries</span></code> 2005年及以后年份修改。</p>
<p>条目是从站点地图文档中提取的dict对象。通常，键是标记名，值是其中的文本。</p>
<p>重要的是要注意：</p>
<ul class="simple">
<li><p>由于loc属性是必需的，因此不带此标记的条目将被丢弃。</p></li>
<li><p>备用链接用键存储在列表中 <code class="docutils literal notranslate"><span class="pre">alternate</span></code> （见 <code class="docutils literal notranslate"><span class="pre">sitemap_alternate_links</span></code> ）</p></li>
<li><p>名称空间被删除，因此名为 <code class="docutils literal notranslate"><span class="pre">{{namespace}}tagname</span></code> 成为唯一 <code class="docutils literal notranslate"><span class="pre">tagname</span></code></p></li>
</ul>
<p>如果省略此方法，则将处理站点地图中找到的所有条目，同时观察其他属性及其设置。</p>
</dd></dl>

</dd></dl>

<div class="section" id="sitemapspider-examples">
<h4>SiteMapSpider示例<a class="headerlink" href="#sitemapspider-examples" title="永久链接至标题">¶</a></h4>
<p>最简单的示例：使用 <code class="docutils literal notranslate"><span class="pre">parse</span></code> 回叫：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">SitemapSpider</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">SitemapSpider</span><span class="p">):</span>
    <span class="n">sitemap_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/sitemap.xml&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape item here ...</span>
</pre></div>
</div>
<p>使用特定回调处理某些URL，使用其他回调处理其他URL:：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">SitemapSpider</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">SitemapSpider</span><span class="p">):</span>
    <span class="n">sitemap_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/sitemap.xml&#39;</span><span class="p">]</span>
    <span class="n">sitemap_rules</span> <span class="o">=</span> <span class="p">[</span>
        <span class="p">(</span><span class="s1">&#39;/product/&#39;</span><span class="p">,</span> <span class="s1">&#39;parse_product&#39;</span><span class="p">),</span>
        <span class="p">(</span><span class="s1">&#39;/category/&#39;</span><span class="p">,</span> <span class="s1">&#39;parse_category&#39;</span><span class="p">),</span>
    <span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse_product</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape product ...</span>

    <span class="k">def</span> <span class="nf">parse_category</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape category ...</span>
</pre></div>
</div>
<p>遵循中定义的站点地图 <a class="reference external" href="https://www.robotstxt.org/">robots.txt</a> 文件，仅跟踪其URL包含 <code class="docutils literal notranslate"><span class="pre">/sitemap_shop</span></code> ：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">SitemapSpider</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">SitemapSpider</span><span class="p">):</span>
    <span class="n">sitemap_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/robots.txt&#39;</span><span class="p">]</span>
    <span class="n">sitemap_rules</span> <span class="o">=</span> <span class="p">[</span>
        <span class="p">(</span><span class="s1">&#39;/shop/&#39;</span><span class="p">,</span> <span class="s1">&#39;parse_shop&#39;</span><span class="p">),</span>
    <span class="p">]</span>
    <span class="n">sitemap_follow</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;/sitemap_shops&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">parse_shop</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape shop here ...</span>
</pre></div>
</div>
<p>将SiteMapSpider与其他URL源合并：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">scrapy.spiders</span> <span class="kn">import</span> <span class="n">SitemapSpider</span>

<span class="k">class</span> <span class="nc">MySpider</span><span class="p">(</span><span class="n">SitemapSpider</span><span class="p">):</span>
    <span class="n">sitemap_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/robots.txt&#39;</span><span class="p">]</span>
    <span class="n">sitemap_rules</span> <span class="o">=</span> <span class="p">[</span>
        <span class="p">(</span><span class="s1">&#39;/shop/&#39;</span><span class="p">,</span> <span class="s1">&#39;parse_shop&#39;</span><span class="p">),</span>
    <span class="p">]</span>

    <span class="n">other_urls</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;http://www.example.com/about&#39;</span><span class="p">]</span>

    <span class="k">def</span> <span class="nf">start_requests</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">requests</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">super</span><span class="p">(</span><span class="n">MySpider</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">start_requests</span><span class="p">())</span>
        <span class="n">requests</span> <span class="o">+=</span> <span class="p">[</span><span class="n">scrapy</span><span class="o">.</span><span class="n">Request</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">parse_other</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">other_urls</span><span class="p">]</span>
        <span class="k">return</span> <span class="n">requests</span>

    <span class="k">def</span> <span class="nf">parse_shop</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape shop here ...</span>

    <span class="k">def</span> <span class="nf">parse_other</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">response</span><span class="p">):</span>
        <span class="k">pass</span> <span class="c1"># ... scrape other here ...</span>
</pre></div>
</div>
</div>
</div>
</div>
</div>


           </div>
           
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="selectors.html" class="btn btn-neutral float-right" title="选择器" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="commands.html" class="btn btn-neutral float-left" title="命令行工具" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        
        &copy; 版权所有 2008–2020, Scrapy developers
      <span class="lastupdated">
        最后更新于 10月 18, 2020.
      </span>

    </p>
  </div>
    
    
    
    Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
  
 
<script type="text/javascript">
!function(){var analytics=window.analytics=window.analytics||[];if(!analytics.initialize)if(analytics.invoked)window.console&&console.error&&console.error("Segment snippet included twice.");else{analytics.invoked=!0;analytics.methods=["trackSubmit","trackClick","trackLink","trackForm","pageview","identify","reset","group","track","ready","alias","page","once","off","on"];analytics.factory=function(t){return function(){var e=Array.prototype.slice.call(arguments);e.unshift(t);analytics.push(e);return analytics}};for(var t=0;t<analytics.methods.length;t++){var e=analytics.methods[t];analytics[e]=analytics.factory(e)}analytics.load=function(t){var e=document.createElement("script");e.type="text/javascript";e.async=!0;e.src=("https:"===document.location.protocol?"https://":"http://")+"cdn.segment.com/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(e,n)};analytics.SNIPPET_VERSION="3.1.0";
analytics.load("8UDQfnf3cyFSTsM4YANnW5sXmgZVILbA");
analytics.page();
}}();

analytics.ready(function () {
    ga('require', 'linker');
    ga('linker:autoLink', ['scrapinghub.com', 'crawlera.com']);
});
</script>


</body>
</html>