

<!DOCTYPE html>
<html class="writer-html5" lang="zh" >
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>选择动态加载的内容 &mdash; Scrapy 2.3.0 文档</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster.custom.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster.bundle.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-shadow.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-punk.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-noir.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-light.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/tooltipster-sideTip-borderless.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/micromodal.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/sphinx_rtd_theme.css" type="text/css" />

  
  
  
  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
        <script src="../_static/js/hoverxref.js"></script>
        <script src="../_static/js/tooltipster.bundle.min.js"></script>
        <script src="../_static/js/micromodal.min.js"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="索引" href="../genindex.html" />
    <link rel="search" title="搜索" href="../search.html" />
    <link rel="next" title="调试内存泄漏" href="leaks.html" />
    <link rel="prev" title="使用浏览器的开发人员工具进行抓取" href="developer-tools.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home" alt="Documentation Home"> Scrapy
          

          
          </a>

          
            
            
              <div class="version">
                2.3
              </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">第一步</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intro/overview.html">Scrapy一目了然</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/install.html">安装指南</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/tutorial.html">Scrapy 教程</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intro/examples.html">实例</a></li>
</ul>
<p class="caption"><span class="caption-text">基本概念</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="commands.html">命令行工具</a></li>
<li class="toctree-l1"><a class="reference internal" href="spiders.html">蜘蛛</a></li>
<li class="toctree-l1"><a class="reference internal" href="selectors.html">选择器</a></li>
<li class="toctree-l1"><a class="reference internal" href="items.html">项目</a></li>
<li class="toctree-l1"><a class="reference internal" href="loaders.html">项目加载器</a></li>
<li class="toctree-l1"><a class="reference internal" href="shell.html">Scrapy shell</a></li>
<li class="toctree-l1"><a class="reference internal" href="item-pipeline.html">项目管道</a></li>
<li class="toctree-l1"><a class="reference internal" href="feed-exports.html">Feed 导出</a></li>
<li class="toctree-l1"><a class="reference internal" href="request-response.html">请求和响应</a></li>
<li class="toctree-l1"><a class="reference internal" href="link-extractors.html">链接提取器</a></li>
<li class="toctree-l1"><a class="reference internal" href="settings.html">设置</a></li>
<li class="toctree-l1"><a class="reference internal" href="exceptions.html">例外情况</a></li>
</ul>
<p class="caption"><span class="caption-text">内置服务</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="logging.html">登录</a></li>
<li class="toctree-l1"><a class="reference internal" href="stats.html">统计数据集合</a></li>
<li class="toctree-l1"><a class="reference internal" href="email.html">发送电子邮件</a></li>
<li class="toctree-l1"><a class="reference internal" href="telnetconsole.html">远程登录控制台</a></li>
<li class="toctree-l1"><a class="reference internal" href="webservice.html">Web服务</a></li>
</ul>
<p class="caption"><span class="caption-text">解决具体问题</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../faq.html">常见问题</a></li>
<li class="toctree-l1"><a class="reference internal" href="debug.html">调试spiders</a></li>
<li class="toctree-l1"><a class="reference internal" href="contracts.html">蜘蛛合约</a></li>
<li class="toctree-l1"><a class="reference internal" href="practices.html">常用做法</a></li>
<li class="toctree-l1"><a class="reference internal" href="broad-crawls.html">宽爬行</a></li>
<li class="toctree-l1"><a class="reference internal" href="developer-tools.html">使用浏览器的开发人员工具进行抓取</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">选择动态加载的内容</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#finding-the-data-source">查找数据源</a></li>
<li class="toctree-l2"><a class="reference internal" href="#inspecting-the-source-code-of-a-webpage">检查网页的源代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="#reproducing-requests">复制请求</a></li>
<li class="toctree-l2"><a class="reference internal" href="#handling-different-response-formats">处理不同的响应格式</a></li>
<li class="toctree-l2"><a class="reference internal" href="#parsing-javascript-code">分析javascript代码</a></li>
<li class="toctree-l2"><a class="reference internal" href="#pre-rendering-javascript">预渲染JavaScript</a></li>
<li class="toctree-l2"><a class="reference internal" href="#using-a-headless-browser">使用无头浏览器</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="leaks.html">调试内存泄漏</a></li>
<li class="toctree-l1"><a class="reference internal" href="media-pipeline.html">下载和处理文件和图像</a></li>
<li class="toctree-l1"><a class="reference internal" href="deploy.html">部署蜘蛛</a></li>
<li class="toctree-l1"><a class="reference internal" href="autothrottle.html">AutoThrottle 扩展</a></li>
<li class="toctree-l1"><a class="reference internal" href="benchmarking.html">标杆管理</a></li>
<li class="toctree-l1"><a class="reference internal" href="jobs.html">作业：暂停和恢复爬行</a></li>
<li class="toctree-l1"><a class="reference internal" href="coroutines.html">协同程序</a></li>
<li class="toctree-l1"><a class="reference internal" href="asyncio.html">asyncio</a></li>
</ul>
<p class="caption"><span class="caption-text">扩展Scrapy</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="architecture.html">体系结构概述</a></li>
<li class="toctree-l1"><a class="reference internal" href="downloader-middleware.html">下载器中间件</a></li>
<li class="toctree-l1"><a class="reference internal" href="spider-middleware.html">蜘蛛中间件</a></li>
<li class="toctree-l1"><a class="reference internal" href="extensions.html">扩展</a></li>
<li class="toctree-l1"><a class="reference internal" href="api.html">核心API</a></li>
<li class="toctree-l1"><a class="reference internal" href="signals.html">信号</a></li>
<li class="toctree-l1"><a class="reference internal" href="exporters.html">条目导出器</a></li>
</ul>
<p class="caption"><span class="caption-text">其余所有</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../news.html">发行说明</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributing.html">为 Scrapy 贡献</a></li>
<li class="toctree-l1"><a class="reference internal" href="../versioning.html">版本控制和API稳定性</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">Scrapy</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>选择动态加载的内容</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="selecting-dynamically-loaded-content">
<span id="topics-dynamic-content"></span><h1>选择动态加载的内容<a class="headerlink" href="#selecting-dynamically-loaded-content" title="永久链接至标题">¶</a></h1>
<p>某些网页在Web浏览器中加载时会显示所需的数据。但是，当您使用scrappy下载它们时，您无法使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> .</p>
<p>当这种情况发生时，建议的方法是 <a class="reference internal" href="#topics-finding-data-source"><span class="std std-ref">find the data source</span></a> 从中提取数据。</p><script async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<ins class="adsbygoogle"
     style="display:block; text-align:center;"
     data-ad-layout="in-article"
     data-ad-format="fluid"
     data-ad-client="ca-pub-1466963416408457"
     data-ad-slot="8850786025"></ins>
<script>
     (adsbygoogle = window.adsbygoogle || []).push({});
</script>
<p>如果您未能做到这一点，并且仍然可以通过 <a class="reference internal" href="developer-tools.html#topics-livedom"><span class="std std-ref">DOM</span></a> 从Web浏览器中，请参见 <a class="reference internal" href="#topics-javascript-rendering"><span class="std std-ref">预渲染JavaScript</span></a> .</p>
<div class="section" id="finding-the-data-source">
<span id="topics-finding-data-source"></span><h2>查找数据源<a class="headerlink" href="#finding-the-data-source" title="永久链接至标题">¶</a></h2>
<p>要提取所需的数据，必须首先找到其源位置。</p>
<p>如果数据是非基于文本的格式，如图像或PDF文档，请使用 <a class="reference internal" href="developer-tools.html#topics-network-tool"><span class="std std-ref">network tool</span></a> 找到相应的请求，以及 <a class="reference internal" href="#topics-reproducing-requests"><span class="std std-ref">reproduce it</span></a> .</p>
<p>如果您的Web浏览器允许您选择所需的数据作为文本，则数据可以在嵌入的javascript代码中定义，也可以从基于文本格式的外部资源加载。</p>
<p>在这种情况下，您可以使用类似 <a class="reference external" href="https://github.com/stav/wgrep">wgrep</a> 以查找该资源的URL。</p>
<p>如果数据原来来自原始URL本身，则必须 <a class="reference internal" href="#topics-inspecting-source"><span class="std std-ref">inspect the source code of the webpage</span></a> 以确定数据的位置。</p>
<p>如果数据来自不同的URL，则需要 <a class="reference internal" href="#topics-reproducing-requests"><span class="std std-ref">reproduce the corresponding request</span></a> .</p>
</div>
<div class="section" id="inspecting-the-source-code-of-a-webpage">
<span id="topics-inspecting-source"></span><h2>检查网页的源代码<a class="headerlink" href="#inspecting-the-source-code-of-a-webpage" title="永久链接至标题">¶</a></h2>
<p>有时您需要检查网页的源代码（而不是 <a class="reference internal" href="developer-tools.html#topics-livedom"><span class="std std-ref">DOM</span></a> ）确定所需数据的位置。</p>
<p>使用Scrapy's <a class="reference internal" href="commands.html#std-command-fetch"><code class="xref std std-command docutils literal notranslate"><span class="pre">fetch</span></code></a> 命令下载Scrapy看到的网页内容：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">scrapy</span> <span class="n">fetch</span> <span class="o">--</span><span class="n">nolog</span> <span class="n">https</span><span class="p">:</span><span class="o">//</span><span class="n">example</span><span class="o">.</span><span class="n">com</span> <span class="o">&gt;</span> <span class="n">response</span><span class="o">.</span><span class="n">html</span>
</pre></div>
</div>
<p>如果所需数据位于 <code class="docutils literal notranslate"><span class="pre">&lt;script/&gt;</span></code> 元素，请参见 <a class="reference internal" href="#topics-parsing-javascript"><span class="std std-ref">分析javascript代码</span></a> .</p>
<p>如果你找不到想要的数据，首先要确保它不仅仅是垃圾：用HTTP客户端下载网页，比如 <a class="reference external" href="https://curl.haxx.se/">curl</a> 或 <a class="reference external" href="https://www.gnu.org/software/wget/">wget</a> 看看这些信息是否可以在他们得到的响应中找到。</p>
<p>如果他们得到所需数据的响应，请修改您的  Scrapy    <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 以匹配其他HTTP客户端。例如，尝试使用相同的用户代理字符串 (<a class="reference internal" href="settings.html#std-setting-USER_AGENT"><code class="xref std std-setting docutils literal notranslate"><span class="pre">USER_AGENT</span></code></a> ）或者同样的 <a class="reference internal" href="request-response.html#scrapy.http.Request.headers" title="scrapy.http.Request.headers"><code class="xref py py-attr docutils literal notranslate"><span class="pre">headers</span></code></a> .</p>
<p>如果他们也得到了没有所需数据的响应，那么您需要采取措施使您的请求更类似于Web浏览器的请求。见 <a class="reference internal" href="#topics-reproducing-requests"><span class="std std-ref">复制请求</span></a> .</p>
</div>
<div class="section" id="reproducing-requests">
<span id="topics-reproducing-requests"></span><h2>复制请求<a class="headerlink" href="#reproducing-requests" title="永久链接至标题">¶</a></h2>
<p>有时，我们需要以Web浏览器执行请求的方式重新生成请求。</p>
<p>使用 <a class="reference internal" href="developer-tools.html#topics-network-tool"><span class="std std-ref">network tool</span></a> 查看Web浏览器如何执行所需的请求，并尝试用scrapy重新生成该请求。</p>
<p>它可能足以产生 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 使用相同的HTTP方法和URL。但是，您可能还需要复制body、headers和form参数（请参见 <a class="reference internal" href="request-response.html#scrapy.http.FormRequest" title="scrapy.http.FormRequest"><code class="xref py py-class docutils literal notranslate"><span class="pre">FormRequest</span></code></a> ）关于那个请求。</p>
<p>因为所有主流浏览器都允许在中导出请求 <a class="reference external" href="https://curl.haxx.se/">cURL</a> 格式，Scrapy合并的方法 <a class="reference internal" href="request-response.html#scrapy.http.Request.from_curl" title="scrapy.http.Request.from_curl"><code class="xref py py-meth docutils literal notranslate"><span class="pre">from_curl()</span></code></a> 生成等价物 <a class="reference internal" href="request-response.html#scrapy.http.Request" title="scrapy.http.Request"><code class="xref py py-class docutils literal notranslate"><span class="pre">Request</span></code></a> 从cURL命令。欲了解更多信息，请访问 <a class="reference internal" href="developer-tools.html#requests-from-curl"><span class="std std-ref">request from curl</span></a> 在“网络工具”部分中。</p>
<p>一旦得到预期的响应，您就可以 <a class="reference internal" href="#topics-handling-response-formats"><span class="std std-ref">extract the desired data from it</span></a> .</p>
<p>你可以用Scrapy复制任何请求。但是，有时复制所有必需的请求在开发人员时间内似乎不高效。如果是这样，爬行速度对你来说不是主要的问题，你也可以考虑 <a class="reference internal" href="#topics-javascript-rendering"><span class="std std-ref">JavaScript pre-rendering</span></a> .</p>
<p>如果你得到预期的答复 <cite>sometimes</cite> 但并非总是这样，问题可能不是您的请求，而是目标服务器。目标服务器可能有问题、过载或 <a class="reference internal" href="practices.html#bans"><span class="std std-ref">banning</span></a> 你的一些要求。</p>
<p>注意，要将cURL命令转换为Scrapy请求，可以使用 <a class="reference external" href="https://michael-shub.github.io/curl2scrapy/">curl2scrapy</a> .</p>
</div>
<div class="section" id="handling-different-response-formats">
<span id="topics-handling-response-formats"></span><h2>处理不同的响应格式<a class="headerlink" href="#handling-different-response-formats" title="永久链接至标题">¶</a></h2>
<p>一旦对所需数据进行响应，如何从中提取所需数据取决于响应类型：</p>
<ul>
<li><p>如果响应是HTML或XML，请使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> 像往常一样。</p></li>
<li><p>如果响应是json，则使用 <a class="reference external" href="https://docs.python.org/3/library/json.html#json.loads" title="(在 Python v3.9)"><code class="xref py py-func docutils literal notranslate"><span class="pre">json.loads()</span></code></a> 从中加载所需数据 <a class="reference internal" href="request-response.html#scrapy.http.TextResponse.text" title="scrapy.http.TextResponse.text"><code class="xref py py-attr docutils literal notranslate"><span class="pre">response.text</span></code></a> ：：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">data</span> <span class="o">=</span> <span class="n">json</span><span class="o">.</span><span class="n">loads</span><span class="p">(</span><span class="n">response</span><span class="o">.</span><span class="n">text</span><span class="p">)</span>
</pre></div>
</div>
<p>如果所需数据位于嵌入在JSON数据中的HTML或XML代码内，则可以将该HTML或XML代码加载到 <a class="reference internal" href="selectors.html#scrapy.selector.Selector" title="scrapy.selector.Selector"><code class="xref py py-class docutils literal notranslate"><span class="pre">Selector</span></code></a> 然后 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">use it</span></a> 和往常一样：</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">selector</span> <span class="o">=</span> <span class="n">Selector</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="s1">&#39;html&#39;</span><span class="p">])</span>
</pre></div>
</div>
</li>
<li><p>如果响应是javascript，或HTML <code class="docutils literal notranslate"><span class="pre">&lt;script/&gt;</span></code> 包含所需数据的元素，请参见 <a class="reference internal" href="#topics-parsing-javascript"><span class="std std-ref">分析javascript代码</span></a> .</p></li>
<li><p>如果响应是css，请使用 <a class="reference external" href="https://docs.python.org/3/library/re.html" title="(在 Python v3.9)"><span class="xref std std-doc">regular expression</span></a> 从中提取所需数据 <a class="reference internal" href="request-response.html#scrapy.http.TextResponse.text" title="scrapy.http.TextResponse.text"><code class="xref py py-attr docutils literal notranslate"><span class="pre">response.text</span></code></a> .</p></li>
</ul>
<ul id="topics-parsing-images">
<li><p>如果响应是基于图像的图像或其他格式（例如PDF），则从 <code class="xref py py-attr docutils literal notranslate"><span class="pre">response.body</span></code> 并使用OCR解决方案将所需数据提取为文本。</p>
<p>例如，您可以使用 <a class="reference external" href="https://github.com/madmaze/pytesseract">pytesseract</a>. 要从PDF中读取表格， <a class="reference external" href="https://github.com/chezou/tabula-py">tabula-py</a> 可能是更好的选择。</p>
</li>
<li><p>如果响应是SVG，或者带有包含所需数据的嵌入式SVG的HTML，则可以使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> ，因为SVG是基于XML的。</p>
<p>否则，可能需要将SVG代码转换为栅格图像，并且 <a class="reference internal" href="#topics-parsing-images"><span class="std std-ref">handle that raster image</span></a> .</p>
</li>
</ul>
</div>
<div class="section" id="parsing-javascript-code">
<span id="topics-parsing-javascript"></span><h2>分析javascript代码<a class="headerlink" href="#parsing-javascript-code" title="永久链接至标题">¶</a></h2>
<p>如果所需数据是用javascript硬编码的，则首先需要获取javascript代码：</p>
<ul class="simple">
<li><p>如果javascript代码在javascript文件中，只需读取 <a class="reference internal" href="request-response.html#scrapy.http.TextResponse.text" title="scrapy.http.TextResponse.text"><code class="xref py py-attr docutils literal notranslate"><span class="pre">response.text</span></code></a> .</p></li>
<li><p>如果javascript代码在 <code class="docutils literal notranslate"><span class="pre">&lt;script/&gt;</span></code> HTML页的元素，使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> 提取其中的文本 <code class="docutils literal notranslate"><span class="pre">&lt;script/&gt;</span></code> 元素。</p></li>
</ul>
<p>一旦有了包含javascript代码的字符串，就可以从中提取所需的数据：</p>
<ul>
<li><p>你可能会使用 <a class="reference external" href="https://docs.python.org/3/library/re.html" title="(在 Python v3.9)"><span class="xref std std-doc">regular expression</span></a> 以JSON格式提取所需数据，然后可以使用 <a class="reference external" href="https://docs.python.org/3/library/json.html#json.loads" title="(在 Python v3.9)"><code class="xref py py-func docutils literal notranslate"><span class="pre">json.loads()</span></code></a> .</p>
<p>例如，如果javascript代码包含类似 <code class="docutils literal notranslate"><span class="pre">var</span> <span class="pre">data</span> <span class="pre">=</span> <span class="pre">{{&quot;field&quot;:</span> <span class="pre">&quot;value&quot;}};</span></code> 您可以按如下方式提取该数据：</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">pattern</span> <span class="o">=</span> <span class="sa">r</span><span class="s1">&#39;\bvar\s+data\s*=\s*(\{.*?\})\s*;\s*\n&#39;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">json_data</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">css</span><span class="p">(</span><span class="s1">&#39;script::text&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">re_first</span><span class="p">(</span><span class="n">pattern</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">json</span><span class="o">.</span><span class="n">loads</span><span class="p">(</span><span class="n">json_data</span><span class="p">)</span>
<span class="go">{&#39;field&#39;: &#39;value&#39;}</span>
</pre></div>
</div>
</li>
<li><p><a class="reference external" href="https://github.com/Nykakin/chompjs">chompjs</a> 提供将JavaScript对象解析为 <a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#dict" title="(在 Python v3.9)"><code class="xref py py-class docutils literal notranslate"><span class="pre">dict</span></code></a> .</p>
<p>例如，如果javascript代码包含 <code class="docutils literal notranslate"><span class="pre">var</span> <span class="pre">data</span> <span class="pre">=</span> <span class="pre">{{field:</span> <span class="pre">&quot;value&quot;,</span> <span class="pre">secondField:</span> <span class="pre">&quot;second</span> <span class="pre">value&quot;}};</span></code> 您可以按如下方式提取该数据：</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">chompjs</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">javascript</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">css</span><span class="p">(</span><span class="s1">&#39;script::text&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">data</span> <span class="o">=</span> <span class="n">chompjs</span><span class="o">.</span><span class="n">parse_js_object</span><span class="p">(</span><span class="n">javascript</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">data</span>
<span class="go">{&#39;field&#39;: &#39;value&#39;, &#39;secondField&#39;: &#39;second value&#39;}</span>
</pre></div>
</div>
</li>
<li><p>否则，使用 <a class="reference external" href="https://github.com/scrapinghub/js2xml">js2xml</a> 要将javascript代码转换为XML文档，可以使用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> .</p>
<p>例如，如果javascript代码包含 <code class="docutils literal notranslate"><span class="pre">var</span> <span class="pre">data</span> <span class="pre">=</span> <span class="pre">{{field:</span> <span class="pre">&quot;value&quot;}};</span></code> 您可以按如下方式提取该数据：</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">js2xml</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">import</span> <span class="nn">lxml.etree</span>
<span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">parsel</span> <span class="kn">import</span> <span class="n">Selector</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">javascript</span> <span class="o">=</span> <span class="n">response</span><span class="o">.</span><span class="n">css</span><span class="p">(</span><span class="s1">&#39;script::text&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">xml</span> <span class="o">=</span> <span class="n">lxml</span><span class="o">.</span><span class="n">etree</span><span class="o">.</span><span class="n">tostring</span><span class="p">(</span><span class="n">js2xml</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">javascript</span><span class="p">),</span> <span class="n">encoding</span><span class="o">=</span><span class="s1">&#39;unicode&#39;</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">selector</span> <span class="o">=</span> <span class="n">Selector</span><span class="p">(</span><span class="n">text</span><span class="o">=</span><span class="n">xml</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">selector</span><span class="o">.</span><span class="n">css</span><span class="p">(</span><span class="s1">&#39;var[name=&quot;data&quot;]&#39;</span><span class="p">)</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
<span class="go">&#39;&lt;var name=&quot;data&quot;&gt;&lt;object&gt;&lt;property name=&quot;field&quot;&gt;&lt;string&gt;value&lt;/string&gt;&lt;/property&gt;&lt;/object&gt;&lt;/var&gt;&#39;</span>
</pre></div>
</div>
</li>
</ul>
</div>
<div class="section" id="pre-rendering-javascript">
<span id="topics-javascript-rendering"></span><h2>预渲染JavaScript<a class="headerlink" href="#pre-rendering-javascript" title="永久链接至标题">¶</a></h2>
<p>在从其他请求中获取数据的网页上，复制包含所需数据的请求是首选方法。这项工作通常是值得的：结构化的、完整的数据，最少的解析时间和网络传输。</p>
<p>然而，有时很难重现某些请求。或者你可能需要一些没有请求可以提供给你的东西，比如网页的屏幕截图，就像在网页浏览器中看到的那样。</p>
<p>在这些情况下，使用 <a class="reference external" href="https://github.com/scrapinghub/splash">Splash</a> JavaScript呈现服务，以及 <a class="reference external" href="https://github.com/scrapy-plugins/scrapy-splash">scrapy-splash</a> 实现无缝集成。</p>
<p>splash返回为html <a class="reference internal" href="developer-tools.html#topics-livedom"><span class="std std-ref">DOM</span></a> 一个网页，这样你就可以用 <a class="reference internal" href="selectors.html#topics-selectors"><span class="std std-ref">selectors</span></a> . 它通过 <a class="reference external" href="https://splash.readthedocs.io/en/stable/api.html">configuration</a> 或 <a class="reference external" href="https://splash.readthedocs.io/en/stable/scripting-tutorial.html">scripting</a>.</p>
<p>如果您需要Splash提供的以外的东西，例如从python代码即时与DOM交互而不是使用以前编写的脚本，或者处理多个Web浏览器窗口，您可能需要 <a class="reference internal" href="#topics-headless-browsing"><span class="std std-ref">use a headless browser</span></a> 相反。</p>
</div>
<div class="section" id="using-a-headless-browser">
<span id="topics-headless-browsing"></span><h2>使用无头浏览器<a class="headerlink" href="#using-a-headless-browser" title="永久链接至标题">¶</a></h2>
<p>A <a class="reference external" href="https://en.wikipedia.org/wiki/Headless_browser">headless browser</a> 是一种特殊的Web浏览器，为自动化提供API。</p>
<p>将无头浏览器与Scrapy一起使用的最简单方法是使用 <a class="reference external" href="https://www.selenium.dev/">Selenium</a>, 随着 <a class="reference external" href="https://github.com/clemfromspace/scrapy-selenium">scrapy-selenium</a> 实现无缝集成。</p>
</div>
</div>


           </div>
           
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="leaks.html" class="btn btn-neutral float-right" title="调试内存泄漏" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="developer-tools.html" class="btn btn-neutral float-left" title="使用浏览器的开发人员工具进行抓取" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        
        &copy; 版权所有 2008–2020, Scrapy developers
      <span class="lastupdated">
        最后更新于 10月 18, 2020.
      </span>

    </p>
  </div>
    
    
    
    Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
  
 
<script type="text/javascript">
!function(){var analytics=window.analytics=window.analytics||[];if(!analytics.initialize)if(analytics.invoked)window.console&&console.error&&console.error("Segment snippet included twice.");else{analytics.invoked=!0;analytics.methods=["trackSubmit","trackClick","trackLink","trackForm","pageview","identify","reset","group","track","ready","alias","page","once","off","on"];analytics.factory=function(t){return function(){var e=Array.prototype.slice.call(arguments);e.unshift(t);analytics.push(e);return analytics}};for(var t=0;t<analytics.methods.length;t++){var e=analytics.methods[t];analytics[e]=analytics.factory(e)}analytics.load=function(t){var e=document.createElement("script");e.type="text/javascript";e.async=!0;e.src=("https:"===document.location.protocol?"https://":"http://")+"cdn.segment.com/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(e,n)};analytics.SNIPPET_VERSION="3.1.0";
analytics.load("8UDQfnf3cyFSTsM4YANnW5sXmgZVILbA");
analytics.page();
}}();

analytics.ready(function () {
    ga('require', 'linker');
    ga('linker:autoLink', ['scrapinghub.com', 'crawlera.com']);
});
</script>


</body>
</html>