<!DOCTYPE html>
<html lang="zh-CN">

<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
  <meta name="theme-color" content="#222">
  <meta name="generator" content="Hexo 4.2.1">
  <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/images/safari-pinned-tab.svg" color="#222">
  <link rel="stylesheet" href="/css/main.css">
  <link rel="stylesheet" href="/lib/font-awesome/css/all.min.css">
  <link rel="stylesheet" href="/lib/pace/pace-theme-minimal.min.css">
  <script src="/lib/pace/pace.min.js"></script>
  <script id="hexo-configurations">
    var NexT = window.NexT ||
    {};
    var CONFIG = {
      "hostname": "cuiqingcai.com",
      "root": "/",
      "scheme": "Pisces",
      "version": "7.8.0",
      "exturl": false,
      "sidebar":
      {
        "position": "right",
        "width": 360,
        "display": "post",
        "padding": 18,
        "offset": 12,
        "onmobile": false,
        "widgets": [
          {
            "type": "image",
            "name": "阿布云",
            "enable": false,
            "url": "https://www.abuyun.com/http-proxy/introduce.html",
            "src": "https://qiniu.cuiqingcai.com/88au8.jpg",
            "width": "100%"
      },
          {
            "type": "image",
            "name": "天验",
            "enable": true,
            "url": "https://tutorial.lengyue.video/?coupon=12ef4b1a-a3db-11ea-bb37-0242ac130002_cqx_850",
            "src": "https://qiniu.cuiqingcai.com/bco2a.png",
            "width": "100%"
      },
          {
            "type": "image",
            "name": "华为云",
            "enable": false,
            "url": "https://activity.huaweicloud.com/2020_618_promotion/index.html?bpName=5f9f98a29e2c40b780c1793086f29fe2&bindType=1&salesID=wangyubei",
            "src": "https://qiniu.cuiqingcai.com/y42ik.jpg",
            "width": "100%"
      },
          {
            "type": "image",
            "name": "张小鸡",
            "enable": false,
            "url": "http://www.zxiaoji.com/",
            "src": "https://qiniu.cuiqingcai.com/fm72f.png",
            "width": "100%"
      },
          {
            "type": "image",
            "name": "Luminati",
            "src": "https://qiniu.cuiqingcai.com/ikkq9.jpg",
            "url": "https://luminati-china.io/?affiliate=ref_5fbbaaa9647883f5c6f77095",
            "width": "100%",
            "enable": false
      },
          {
            "type": "image",
            "name": "IPIDEA",
            "url": "http://www.ipidea.net/?utm-source=cqc&utm-keyword=?cqc",
            "src": "https://qiniu.cuiqingcai.com/0ywun.png",
            "width": "100%",
            "enable": true
      },
          {
            "type": "tags",
            "name": "标签云",
            "enable": true
      },
          {
            "type": "categories",
            "name": "分类",
            "enable": true
      },
          {
            "type": "friends",
            "name": "友情链接",
            "enable": true
      },
          {
            "type": "hot",
            "name": "猜你喜欢",
            "enable": true
      }]
      },
      "copycode":
      {
        "enable": true,
        "show_result": true,
        "style": "mac"
      },
      "back2top":
      {
        "enable": true,
        "sidebar": false,
        "scrollpercent": true
      },
      "bookmark":
      {
        "enable": false,
        "color": "#222",
        "save": "auto"
      },
      "fancybox": false,
      "mediumzoom": false,
      "lazyload": false,
      "pangu": true,
      "comments":
      {
        "style": "tabs",
        "active": "gitalk",
        "storage": true,
        "lazyload": false,
        "nav": null,
        "activeClass": "gitalk"
      },
      "algolia":
      {
        "hits":
        {
          "per_page": 10
        },
        "labels":
        {
          "input_placeholder": "Search for Posts",
          "hits_empty": "We didn't find any results for the search: ${query}",
          "hits_stats": "${hits} results found in ${time} ms"
        }
      },
      "localsearch":
      {
        "enable": true,
        "trigger": "auto",
        "top_n_per_article": 10,
        "unescape": false,
        "preload": false
      },
      "motion":
      {
        "enable": false,
        "async": false,
        "transition":
        {
          "post_block": "bounceDownIn",
          "post_header": "slideDownIn",
          "post_body": "slideDownIn",
          "coll_header": "slideLeftIn",
          "sidebar": "slideUpIn"
        }
      },
      "path": "search.xml"
    };

  </script>
  <meta name="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
  <meta property="og:type" content="website">
  <meta property="og:title" content="静觅">
  <meta property="og:url" content="https://cuiqingcai.com/page/22/index.html">
  <meta property="og:site_name" content="静觅">
  <meta property="og:description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
  <meta property="og:locale" content="zh_CN">
  <meta property="article:author" content="崔庆才">
  <meta property="article:tag" content="崔庆才">
  <meta property="article:tag" content="静觅">
  <meta property="article:tag" content="PHP">
  <meta property="article:tag" content="Java">
  <meta property="article:tag" content="Python">
  <meta property="article:tag" content="Spider">
  <meta property="article:tag" content="爬虫">
  <meta property="article:tag" content="Web">
  <meta property="article:tag" content="Kubernetes">
  <meta property="article:tag" content="深度学习">
  <meta property="article:tag" content="机器学习">
  <meta property="article:tag" content="数据分析">
  <meta property="article:tag" content="网络">
  <meta property="article:tag" content="IT">
  <meta property="article:tag" content="技术">
  <meta property="article:tag" content="博客">
  <meta name="twitter:card" content="summary">
  <link rel="canonical" href="https://cuiqingcai.com/page/22/">
  <script id="page-configurations">
    // https://hexo.io/docs/variables.html
    CONFIG.page = {
      sidebar: "",
      isHome: true,
      isPost: false,
      lang: 'zh-CN'
    };

  </script>
  <title>静觅丨崔庆才的个人站点</title>
  <meta name="google-site-verification" content="p_bIcnvirkFzG2dYKuNDivKD8-STet5W7D-01woA2fc" />
  <noscript>
    <style>
      .use-motion .brand,
      .use-motion .menu-item,
      .sidebar-inner,
      .use-motion .post-block,
      .use-motion .pagination,
      .use-motion .comments,
      .use-motion .post-header,
      .use-motion .post-body,
      .use-motion .collection-header
      {
        opacity: initial;
      }

      .use-motion .site-title,
      .use-motion .site-subtitle
      {
        opacity: initial;
        top: initial;
      }

      .use-motion .logo-line-before i
      {
        left: initial;
      }

      .use-motion .logo-line-after i
      {
        right: initial;
      }

    </style>
  </noscript>
  <link rel="alternate" href="/atom.xml" title="静觅" type="application/atom+xml">
</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container">
    <div class="headband"></div>
    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner">
        <div class="site-brand-container">
          <div class="site-nav-toggle">
            <div class="toggle" aria-label="切换导航栏">
              <span class="toggle-line toggle-line-first"></span>
              <span class="toggle-line toggle-line-middle"></span>
              <span class="toggle-line toggle-line-last"></span>
            </div>
          </div>
          <div class="site-meta">
            <a href="/" class="brand" rel="start">
              <span class="logo-line-before"><i></i></span>
              <h1 class="site-title">静觅 <span class="site-subtitle"> 崔庆才的个人站点 </span>
              </h1>
              <span class="logo-line-after"><i></i></span>
            </a>
          </div>
          <div class="site-nav-right">
            <div class="toggle popup-trigger">
              <i class="fa fa-search fa-fw fa-lg"></i>
            </div>
          </div>
        </div>
        <nav class="site-nav">
          <ul id="menu" class="main-menu menu">
            <li class="menu-item menu-item-home">
              <a href="/" rel="section">首页</a>
            </li>
            <li class="menu-item menu-item-archives">
              <a href="/archives/" rel="section">文章列表</a>
            </li>
            <li class="menu-item menu-item-tags">
              <a href="/tags/" rel="section">文章标签</a>
            </li>
            <li class="menu-item menu-item-categories">
              <a href="/categories/" rel="section">文章分类</a>
            </li>
            <li class="menu-item menu-item-about">
              <a href="/about/" rel="section">关于博主</a>
            </li>
            <li class="menu-item menu-item-message">
              <a href="/message/" rel="section">给我留言</a>
            </li>
            <li class="menu-item menu-item-search">
              <a role="button" class="popup-trigger">搜索 </a>
            </li>
          </ul>
        </nav>
        <div class="search-pop-overlay">
          <div class="popup search-popup">
            <div class="search-header">
              <span class="search-icon">
                <i class="fa fa-search"></i>
              </span>
              <div class="search-input-container">
                <input autocomplete="off" autocapitalize="off" placeholder="搜索..." spellcheck="false" type="search" class="search-input">
              </div>
              <span class="popup-btn-close">
                <i class="fa fa-times-circle"></i>
              </span>
            </div>
            <div id="search-result">
              <div id="no-result">
                <i class="fa fa-spinner fa-pulse fa-5x fa-fw"></i>
              </div>
            </div>
          </div>
        </div>
      </div>
    </header>
    <div class="back-to-top">
      <i class="fa fa-arrow-up"></i>
      <span>0%</span>
    </div>
    <div class="reading-progress-bar"></div>
    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div class="content index posts-expand">
            <div class="carousel">
              <div id="wowslider-container">
                <div class="ws_images">
                  <ul>
                    <li><a target="_blank" href="https://cuiqingcai.com/5052.html"><img title="Python3网络爬虫开发实战教程" src="https://qiniu.cuiqingcai.com/ipy96.jpg" /></a></li>
                    <li><a target="_blank" href="https://t.lagou.com/fRCBRsRCSN6FA"><img title="52讲轻松搞定网络爬虫" src="https://qiniu.cuiqingcai.com/fqq5e.png" /></a></li>
                    <li><a target="_blank" href="https://brightdata.grsm.io/cuiqingcai"><img title="亮网络解锁器" src="https://qiniu.cuiqingcai.com/6qnb7.png" /></a></li>
                    <li><a target="_blank" href="https://cuiqingcai.com/4320.html"><img title="Python3网络爬虫开发视频教程" src="https://qiniu.cuiqingcai.com/bjrny.jpg" /></a></li>
                    <li><a target="_blank" href="https://cuiqingcai.com/5094.html"><img title="爬虫代理哪家强？十大付费代理详细对比评测出炉！" src="https://qiniu.cuiqingcai.com/nifs6.jpg" /></a></li>
                  </ul>
                </div>
                <div class="ws_thumbs">
                  <div>
                    <a target="_blank" href="#"><img src="https://qiniu.cuiqingcai.com/ipy96.jpg" /></a>
                    <a target="_blank" href="#"><img src="https://qiniu.cuiqingcai.com/fqq5e.png" /></a>
                    <a target="_blank" href="#"><img src="https://qiniu.cuiqingcai.com/6qnb7.png" /></a>
                    <a target="_blank" href="#"><img src="https://qiniu.cuiqingcai.com/bjrny.jpg" /></a>
                    <a target="_blank" href="#"><img src="https://qiniu.cuiqingcai.com/nifs6.jpg" /></a>
                  </div>
                </div>
                <div class="ws_shadow"></div>
              </div>
            </div>
            <link rel="stylesheet" href="/lib/wowslide/slide.css">
            <script src="/lib/wowslide/jquery.min.js"></script>
            <script src="/lib/wowslide/slider.js"></script>
            <script>
              jQuery("#wowslider-container").wowSlider(
              {
                effect: "cube",
                prev: "",
                next: "",
                duration: 20 * 100,
                delay: 20 * 100,
                width: 716,
                height: 297,
                autoPlay: true,
                playPause: true,
                stopOnHover: false,
                loop: false,
                bullets: 0,
                caption: true,
                captionEffect: "slide",
                controls: true,
                onBeforeStep: 0,
                images: 0
              });

            </script>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4465.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4465.html" class="post-title-link" itemprop="url">免登录新浪微博爬虫系列之第一篇 单博主微博及评论数据</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <blockquote>
                    <p>我的GITHUB地址：<a href="https://github.com/xiaosimao/weibo_spider" target="_blank" rel="noopener">https://github.com/xiaosimao/weibo_spider</a> 2017.05.04 更新： 感谢哥本哈根小树对于获取containnerid的指教，多谢。</p>
                    <pre><code>_**大家好，我是新人四毛，大家可以叫我小四毛，至于为什么，在家排行老四，农村人，就是那么任性。**_
</code></pre>
                  </blockquote>
                  <pre><code>好，自我介绍完毕，开始今天的学（zhuang）习（bi）之路。
</code></pre>
                  <blockquote>
                    <p><strong>说明：本文针对的是有一些爬虫基础的同学，所以看不太懂的同学先补一下基础。</strong></p>
                    <p><strong>本文的全部代码并没有上传到GITHUB中，而且本文的code部分给出的代码也是指导性的，大部分还是要靠大家自己动手完成。待后几篇博客出来以后，代码会放到上面。</strong></p>
                    <p><strong>大家如果有问题交流的话，欢迎在下面进行评论，或者可以加我QQ:549411552(加的话麻烦注明来自静觅)，欢迎大佬拍砖指错，大家共同进步。</strong></p>
                  </blockquote>
                  <pre><code>前几天，大才发布了一个视频，主要讲的是通过维护一个新浪微博 Cookies池，抓取新浪微博的相关数据，爬取的站点是weibo.cn。相关的代码在大才的Github里【大才的视频教程真的很用心，视频高清无码，希望大家可以支持大才，毕竟写了那么多精彩的教程真心不易】。

然而，如果你只是想简单的搞点数据，对技术一点兴趣都没有，又或者某宝搜来搜去都没有买到账号，又或者装个模拟登陆需要的模块都想跳楼，有没有除此之外其他的办法呢？你有没有想过在免登陆的情况下就可以获得你想要的数据呢？如果你这么想过而又没有做出来，那么接下来，让我们一起搞（qi）事（fei）吧。
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" alt=""></a></p>
                  <p>本文重点提供解决问题的思路，会把最关键的点标示出来，代码基本没有。有什么不对或不足之处，还望大家指出，共同进步。</p>
                  <h3 id="1-前期准备"><a href="#1-前期准备" class="headerlink" title="1.前期准备"></a>1.前期准备</h3>
                  <pre><code> 代理IP。虽说本文介绍的方法不需要Cookies，但是代理IP还是需要的，要不然也是被新浪分分钟的403（我测试的时候会出现）。如果你连403都不知道是什么，那么还是去看看大才的爬虫基础课程，或者不想看文字的话直接来报大才的视频课程课，哈哈（大才，今晚得加两个菜啊，我这吆喝的）。
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/兔子.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/兔子.gif" alt=""></a></p>
                  <h3 id="2-思路分析"><a href="#2-思路分析" class="headerlink" title="2.思路分析"></a>2.思路分析</h3>
                  <pre><code>一般做爬虫爬取网站，首选的都是m站，其次是wap站，最后考虑PC站。当然，这不是绝对的，有的时候PC站的信息最全，而你又恰好需要全部的信息，那么PC站是你的首选。一般m站都以m开头后接域名，试一下 就好了，实在找不到，上网搜。

所以本文开搞的网址就是 m.weibo.cn。但是当你在浏览器中输入这个网址时，你得到的应该是下面这个页面，如果不是，说明你的浏览器保留了你最近登录微博的cookie，这个时候，清空浏览器保存的数据，再次打开这个网页，就应该也是这个界面了：
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/login-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/login-1.jpg" alt=""></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/login.jpg" target="_blank" rel="noopener"></a> 我滴天，是的，你没看错，就是这个登录界面。你不是说不需要登录吗？怎么TM的还是这个万恶的界面？怎么破？WTF?</p>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情2.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情2.jpg" alt=""></a></p>
                  <pre><code>哈哈，其实一开始我也不知道，后来经人指点，才发现只要在后面加入一些东西之后就不会看到这个界面了。那么是什么呢？
</code></pre>
                  <p><em><strong>当当当当！！！！！！！！！！</strong></em></p>
                  <blockquote>
                    <p><strong><a href="http://m.weibo.cn/u/1713926427" target="_blank" rel="noopener">http://m.weibo.cn/u/1713926427</a></strong></p>
                  </blockquote>
                  <p> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情1-300x300.jpg" alt=""></a></p>
                  <pre><code>当你看到这个网址的时候，憋说话，一定要用心去感受，这个时候说话你的嘴都是咧着的，别问我为什么知道，我就是知道。

**用心去感受，真的。**

对了，上面网址最后的数字是博主的数字ID，在weibo.com的源码里可以找到，这里不做说明了。

打开上述网址， 界面变成这个样子，是不是很厉害的样子（大手勿喷），拨云见日，对于老手来说，下面的他们就可以不看了，可以去抓包写代码了，但是对于一头雾水的小伙伴请接着往下看：
</code></pre>
                  <p> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_page-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_page-1.jpg" alt=""></a></p>
                  <pre><code>这就是本文爬虫的入口，没错，就说牛逼的榜姐，入口选一些质量高的，比如你想爬新闻方面信息，那么你就去找澎湃新闻，新浪新闻之类的。

通过该入口，我们可以抓取该博主的所有微博及评论信息，以及该博主关注的人的微博及评论信息，依次往后，循环不断。

在这里谈一点经验：
</code></pre>
                  <p> <strong>其实做爬虫，最基础的当然是写代码的能力，抓包什么的都不是什么困难的事，抓包很简单很简单。我觉得最难的是找到入口，找到一个最适合的入口。怎么定义这个最适合呢？就是要去尝试，依照一般的顺序，先找找M站，再找找wap站，最后再去看PC站，找到一个合适的入口，往往会事半功倍。前几天抓取途牛网的相关游记信息，爬PC站分分钟的302，但是爬M站，全是接口，全程无阻。</strong></p>
                  <pre><code>因大多数人都是采集微博信息以及评论信息，所以下面将以这两方面为主。

剧透一下，在这里可以抓到的信息：

(1) **博主信息 （没发现有价值的信息，下面抓包过程不讲）**

(2) **博主微博信息（下文抓包讲解）**

(3) **微博评论信息（下文抓包讲解）**

(4) **热门微博信息（小时榜，日榜，周榜，月榜）（下文抓包未讲解，大家可以摸索一下）**

       。。。。。。还有很多我没有细看，等待各位细细研究吧。
</code></pre>
                  <h3 id="3-抓包分析"><a href="#3-抓包分析" class="headerlink" title="3. 抓包分析"></a>3. 抓包分析</h3>
                  <pre><code>首先，得会抓包，一般的浏览器的Network够用了。
</code></pre>
                  <p> <strong>(1) 微博正文抓包</strong></p>
                  <pre><code>点击 上图中的微博然后往下拉，抓包出现下图：
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/post_content_zhuabao-1.jpg" target="_blank" rel="noopener"></a><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/post_content_zhuabao-1-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/post_content_zhuabao-1-1.jpg" alt=""></a></p>
                  <p><strong>分析：</strong></p>
                  <blockquote>
                    <pre><code>可以看到，服务器返回的数据为json格式，这个是做爬虫的最喜欢的了。返回的数据包括很多的字段，图中也以及做了标示，相信大家都能看的懂，看不懂那也没办法了。
</code></pre>
                  </blockquote>
                  <pre><code>最后放上抓包的数据：
</code></pre>
                  <blockquote>
                    <ol>
                      <li>
                        <p><strong>Request URL</strong>:</p>
                        <p><a href="http://m.weibo.cn/api/container/getIndex?type=uid&amp;value=1713926427&amp;containerid=1076031713926427&amp;page=2" target="_blank" rel="noopener">http://m.weibo.cn/api/container/getIndex?type=uid&amp;value=1713926427&amp;containerid=1076031713926427&amp;page=2</a></p>
                      </li>
                      <li>
                        <p><strong>Request Method</strong>:</p>
                        <p>GET</p>
                      </li>
                      <li>
                        <p><strong>Query String Parameters</strong></p>
                        <p>type: uid</p>
                        <p>value: 1713926427</p>
                        <p>containerid: 1076031713926427</p>
                        <p>page: 2</p>
                      </li>
                    </ol>
                  </blockquote>
                  <pre><code>**(2) 微博评论抓包**

单击微博内容，就可以抓包成功，如下图：
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/comment_zhuabao.jpg" target="_blank" rel="noopener"></a><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/comment_zhuabao-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/comment_zhuabao-1.jpg" alt=""></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/comment_zhuabao.jpg" target="_blank" rel="noopener"></a> <strong> 分析：</strong></p>
                  <blockquote>
                    <pre><code>从上面可以看出，这里的数据依然还是很好获取的。
</code></pre>
                  </blockquote>
                  <pre><code>最后放上抓包的数据：
</code></pre>
                  <blockquote>
                    <ol>
                      <li>
                        <p><strong>Request URL</strong>:</p>
                        <p><a href="http://m.weibo.cn/api/comments/show?id=4103388327019042&amp;page=1" target="_blank" rel="noopener">http://m.weibo.cn/api/comments/show?id=4103388327019042&amp;page=1</a></p>
                      </li>
                      <li>
                        <p><strong>Request Method</strong>:</p>
                        <p>GET</p>
                      </li>
                      <li>
                        <p><strong>Query String Parameters</strong></p>
                        <p>id: 4103388327019042</p>
                        <p>page: 1</p>
                      </li>
                    </ol>
                  </blockquote>
                  <p> <strong>再次分析：</strong></p>
                  <blockquote>
                    <pre><code>通过抓包的数据可以发现，获取微博评论必须首先获得这条微博的ID。所以，目前还是要对微博正文的抓包过程进行分析。
</code></pre>
                  </blockquote>
                  <h3 id="4-思路解析"><a href="#4-思路解析" class="headerlink" title="4. 思路解析"></a>4. 思路解析</h3>
                  <pre><code>在上面的微博正文中发现需要提交以下数据：
</code></pre>
                  <blockquote>
                    <p>type: uid</p>
                    <p>value: 1713926427</p>
                    <p>containerid: 1076031713926427</p>
                    <p>page: 2</p>
                  </blockquote>
                  <pre><code>其中：**type**(固定值)、**value**(博主微博ID)、**containerid**(意义不明确，但是带了个id在里面，应该代表的是一个唯一性的一个标识)、**page**(页码)。页码在返回的数据中可以获得。

那么分析到这里，containerid就是我们要找的最重要的信息。这个字段信息是不会凭空出现的，肯定产生于某一个请求之中，所以这时候，我们再回到开头，回到我们的初始。刷新入口网址，抓包发现了下面3个网址，见下图：
</code></pre>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_zhuabao.jpg" target="_blank" rel="noopener"></a><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_zhuabao-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_zhuabao-1.jpg" alt=""></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/home_zhuabao.jpg" target="_blank" rel="noopener"></a> <strong> 分析：</strong></p>
                  <blockquote>
                    <pre><code>这3个网址的格式一模一样，所以点进去看一下里面到底什么情况。
</code></pre>
                  </blockquote>
                  <p> 下面的先点开<strong>网址1</strong>看看：</p>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/user_infojpg.jpg" target="_blank" rel="noopener"></a><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/user_infojpg-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/user_infojpg-1.jpg" alt=""></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/user_infojpg.jpg" target="_blank" rel="noopener"></a> <strong> 分析：</strong></p>
                  <blockquote>
                    <pre><code>从返回的数据中，可以看到第1个网址的主要内容为 user_Info，即博主的个人信息，相关的字段在图中已经标示出来。最令人惊喜的是查找我们需要的containerid时，发现数据竟然就在其中，那么可以肯定我们需要的containerid就是在这个请求的返回值中，那么问题再次出现，这个请求的网址中又出现了一个containerid，我们似乎又回到了原点，而且在用户的首页抓包中，在这个请求之前，也没有什么有意义的请求了，到这里是不是就进入死胡同了呢？其实不然，在这里我们就要进行多方面的尝试了，当我们将第一个网址中的containerid删掉以后，重新请求一次，发现返回的依然是这些数据，具体见下图：
</code></pre>
                  </blockquote>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/no_containid-1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/05/no_containid-1.jpg" alt=""></a></p>
                  <p> <strong>分析：</strong></p>
                  <blockquote>
                    <pre><code>而当我将第三个网址，也就是微博正文的网址中的containerid去掉后，返回的数据就是博主的个人信息了，而不是我们需要的微博正文，所以可以肯定第一个网址中的containerid并不是必须的，而对于网址3，这个字段则是必须的。
</code></pre>
                  </blockquote>
                  <pre><code>为了让这个爬虫可以顺着一个初始用户爬取到其他用户的相关信息，甚至全网的信息，那么我们就需要让爬虫自己去添加待爬任务。本文选择的初始用户有3000多万的粉丝数，就是人们常说的微博大V。在做这一类的信息爬取时，我们往往关注的是数据的质量，所以我们选择初始用户的关注用户作为下一级的用户。在下一级中，这些用户将被作为初始用户。这样周而复始，最理想的情况当然就是可以把微博全站的质量还不错的博主的微博以及下面的评论都抓取了。但是在实际的操作过程中会发现微博的用户质量真的是参差不齐，所以我们在筛选后面的用户时，可以加一些限制条件，如用户的粉丝数等等。在这里找寻初始用户关注用户信息的这一过程就省略了，留给大家探索一下，很简单。
</code></pre>
                  <p>所以到这里，我们的整个流程就理清了（单个博主，如需循环，则只需要找到下一级用户的ID即可，相信这对于聪明的大家肯定不难的）：</p>
                  <blockquote>
                    <p>请求用户主页网址—&gt;得到containerid，请求微博正文网址—&gt;保存博文相关信息，取出博文ID，请求评论网址—&gt;得到评论信息</p>
                  </blockquote>
                  <h3 id="5-CODE-TIME"><a href="#5-CODE-TIME" class="headerlink" title="5. CODE TIME"></a>5. CODE TIME</h3>
                  <pre><code>思路已经理清了，那么下面就是CODE TIME了，毕竟:
</code></pre>
                  <blockquote>
                    <p>TALK IS CHEAP,SHOW ME YOUR CODE</p>
                  </blockquote>
                  <pre><code>本文采用scrapy编写，重写个proxy中间件，即可实现每一个request带一个随机IP，减少被封禁的概率，同时尽量把重试的次数设置大一些。

想要保存哪些信息，根据自身的业务需求而定，具体的信息，能找到的都可以在每一个请求返回的内容中找到，都是json格式的，所以这里的代码只是将上面讲的流程实现了一遍，其他的都没有实现。
</code></pre>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line">import scrapy</span><br><span class="line">import json</span><br><span class="line"></span><br><span class="line">class SinaSpider(scrapy.Spider):</span><br><span class="line">    name = <span class="string">"sina"</span></span><br><span class="line">    allowed_domains = [<span class="string">"m.weibo.cn"</span>]</span><br><span class="line">    # root id</span><br><span class="line">    first_id = <span class="string">'1713926427'</span></span><br><span class="line"></span><br><span class="line">    def start_requests(self):</span><br><span class="line">        # <span class="keyword">to</span> <span class="builtin-name">get</span> containerid</span><br><span class="line">        url = <span class="string">'http://m.weibo.cn/api/container/getIndex?type=uid&amp;value=&#123;&#125;'</span>.format(self.first_id)</span><br><span class="line">        yield scrapy.Request(<span class="attribute">url</span>=url, <span class="attribute">callback</span>=self.get_containerid)</span><br><span class="line"></span><br><span class="line">    def get_containerid(self,response):</span><br><span class="line">        content = json.loads(response.body)</span><br><span class="line">        # here, we can <span class="builtin-name">get</span> containerid</span><br><span class="line">        containerid = None</span><br><span class="line">        <span class="keyword">for</span> data <span class="keyword">in</span>  content.<span class="builtin-name">get</span>(<span class="string">'tabsInfo'</span>).<span class="builtin-name">get</span>(<span class="string">'tabs'</span>):</span><br><span class="line">            <span class="keyword">if</span> data.<span class="builtin-name">get</span>(<span class="string">'tab_type'</span>) == <span class="string">'weibo'</span>:</span><br><span class="line">                containerid = data.<span class="builtin-name">get</span>(<span class="string">'containerid'</span>)</span><br><span class="line">                <span class="builtin-name">print</span> <span class="string">'weibo request url containerid is %s'</span> % containerid</span><br><span class="line"></span><br><span class="line">        # construct the wei bo request url</span><br><span class="line">        <span class="keyword">if</span> containerid:</span><br><span class="line">            weibo_url = response.url + <span class="string">'&amp;containerid=%s'</span>%containerid</span><br><span class="line">            yield scrapy.Request(<span class="attribute">url</span>=weibo_url, <span class="attribute">callback</span>=self.get_weibo_id)</span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            <span class="builtin-name">print</span> <span class="string">'sorry, do not get containerid'</span></span><br><span class="line"></span><br><span class="line">    def get_weibo_id(self, response):</span><br><span class="line">        content = json.loads(response.body)</span><br><span class="line">        # <span class="builtin-name">get</span> weibo id ,you can also save some other data <span class="keyword">if</span> you need</span><br><span class="line">        <span class="keyword">for</span> data <span class="keyword">in</span> content.<span class="builtin-name">get</span>(<span class="string">'cards'</span>):</span><br><span class="line">            <span class="keyword">if</span> data.<span class="builtin-name">get</span>(<span class="string">'card_type'</span>) == 9:</span><br><span class="line">                single_weibo_id = data.<span class="builtin-name">get</span>(<span class="string">'mblog'</span>).<span class="builtin-name">get</span>(<span class="string">'id'</span>)</span><br><span class="line">                <span class="builtin-name">print</span> single_weibo_id</span><br><span class="line">                # here ,<span class="keyword">if</span> you want <span class="keyword">to</span> <span class="builtin-name">get</span> comment <span class="builtin-name">info</span> ,you can construct the comment url just the same as wei bo url</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="6-总结"><a href="#6-总结" class="headerlink" title="6.总结"></a>6.总结</h3>
                  <pre><code>本文写到这里就算结束了，我一直信奉授人以鱼不如授人以渔，在这篇文章中，我并没有把全部的代码展示出来，而是通过分析的过程来让大家知道怎么去处理这类问题，在文中也留了好几个可以让大家发挥的地方，如用户关注用户怎么获取？按照关键词搜索的信息怎么抓取？等等。我相信大家通过一步步的抓包以及分析一定可以解决这些问题的。这些问题，在以后的博客中我也会继续更新的。

第一次写这样的博客，感觉还是驾驭不了，还是得多多练习。写博客真的很累，向大才致敬，感谢他无私的为我们奉献了这么多精彩的教程。
</code></pre>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/四毛" class="author" itemprop="url" rel="index">四毛</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-05-04 12:56:51" itemprop="dateCreated datePublished" datetime="2017-05-04T12:56:51+08:00">2017-05-04</time>
                </span>
                <span id="/4465.html" class="post-meta-item leancloud_visitors" data-flag-title="免登录新浪微博爬虫系列之第一篇  单博主微博及评论数据" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>5.4k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>5 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4421.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4421.html" class="post-title-link" itemprop="url">小白进阶之Scrapy第四篇（图片下载管道篇）</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p><strong>PS： 爬虫不进入img_url函数的小伙伴儿 请尝试将将代码复制到你新建的py文件中。</strong> 2017/8/30 更新解决了网站防盗链导致下载图片失败的问题 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" alt=""></a> 这几天一直有小伙伴而给我吐槽说，由于妹子图站长把www.mzitu.com/all这个地址取消了。导致原来的那个采集爬虫不能用啦。 正好也有小伙伴儿问Scrapy中的图片下载管道是怎么用的。 就凑合在一起把mzitu.com给重新写了一下。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" alt=""></a> 首先确保你的Python环境已安装 Scrapy!!!!!!!! 命令行下进入你需要存放项目的目录并创建项目： 比如我放在了D:\PycharmProjects</p>
                  <figure class="highlight properties">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">D</span>:<span class="string"></span></span><br><span class="line"><span class="attr">cd</span> <span class="string">PycharmProjects</span></span><br><span class="line"><span class="attr">scrapy</span> <span class="string">startproject mzitu_scrapy</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>我是Windows！其余系统的伙伴儿自己看着办哈。 这都不会的小伙伴儿，快去洗洗睡吧。养足了精神从头看一遍教程哈！ 在PyCharm中打开我们的项目目录。 在mzitu_scrapy目录创建run.py。写入以下内容：</p>
                  <figure class="highlight smali">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from scrapy.cmdline import execute</span><br><span class="line">execute(['scrapy', 'crawl', 'mzitu'])</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>其中的 mzitu 就为待会儿spider.py文件中的name属性。这点请务必记住哦！不然是跑不起来的。 在mzitu_scrapy\spider目录中创建spider.py。文件作为爬虫文件。 好了！现在我们来想想，怎么来抓mzitu.com了。 首先我们的目标是当然是全站的妹子图片！！！ 但是问题来了，站长把之前那个mzitu.com\all 这个URL地址给取消了，我们没办法弄到全部的套图地址了！ 我们可以去仔细观察一下站点所有套图的地址都是：<a href="http://www.mzitu.com/几位数字结尾的。" target="_blank" rel="noopener">http://www.mzitu.com/几位数字结尾的。</a> 这种格式地址。 有木有小伙伴儿想到了啥？ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" alt=""></a> CrawlSpider ！！！就是这玩儿！！ 有了它我们就能追踪“<a href="http://www.mzitu.com/几位数字结尾的”这种格式的URL了。" target="_blank" rel="noopener">http://www.mzitu.com/几位数字结尾的”这种格式的URL了。</a> Go Go Go Go！开始搞事。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" alt=""></a> 首先在item.py中新建我们需要的字段。我们需要啥？我们需要套图的名字和图片地址！！ 那我们新建三个字段：</p>
                  <figure class="highlight mipsasm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import <span class="keyword">scrapy</span></span><br><span class="line"><span class="keyword"></span></span><br><span class="line"><span class="keyword"></span></span><br><span class="line"><span class="keyword">class </span>MzituScrapyItem(<span class="keyword">scrapy.Item):</span></span><br><span class="line"><span class="keyword"> </span>   <span class="comment"># define the fields for your item here like:</span></span><br><span class="line">    <span class="comment"># name = scrapy.Field()</span></span><br><span class="line">    name = <span class="keyword">scrapy.Field()</span></span><br><span class="line"><span class="keyword"> </span>   image_urls = <span class="keyword">scrapy.Field()</span></span><br><span class="line"><span class="keyword"> </span>   url = <span class="keyword">scrapy.Field()</span></span><br><span class="line"><span class="keyword"> </span>   pass</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 第一步完成啦！开始写spider.py啦！ 首先导入我们需要的包：</p>
                  <figure class="highlight pgsql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy.spider <span class="keyword">import</span> CrawlSpider, <span class="keyword">Rule</span></span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor</span><br><span class="line"><span class="keyword">from</span> mzitu_scrapy.items <span class="keyword">import</span> MzituScrapyItem</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>都是干啥的我不说了哈！不知道的小伙伴儿自己去翻翻官方文档。 接下来是：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">class Spider(CrawlSpider):</span><br><span class="line">    name = <span class="string">'mzitu'</span></span><br><span class="line">    allowed_domains = [<span class="string">'mzitu.com'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.mzitu.com/'</span>]</span><br><span class="line">    img_urls = []</span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;'</span>,), deny=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;/\d&#123;1,6&#125;'</span>)), <span class="attribute">callback</span>=<span class="string">'parse_item'</span>, <span class="attribute">follow</span>=<span class="literal">True</span>),</span><br><span class="line">    )</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>第五行的img_urls=[] 这个列表是我们之后用来存储每个套图的全部图片的URL地址的。 rules中的语句是：匹配<a href="http://www.mzitu.com/1至6位数的的URL（\\d：数字；{1,6}匹配1至6次。就能匹配出1到6位数）" target="_blank" rel="noopener">http://www.mzitu.com/1至6位数的的URL（\\d：数字；{1,6}匹配1至6次。就能匹配出1到6位数）</a> 但是我们会发现网页中除了<a href="http://www.mzitu.com/XXXXXXX" target="_blank" rel="noopener">http://www.mzitu.com/XXXXXXX</a> 这种格式的URL之外；还有 <a href="http://www.mzitu.com/XXXX/XXXX" target="_blank" rel="noopener">http://www.mzitu.com/XXXX/XXXX</a> 这个格式的URL。所以我们需要设置 deny来不匹配<a href="http://www.mzitu.com/XXXX/XXXX这种格式的URL。" target="_blank" rel="noopener">http://www.mzitu.com/XXXX/XXXX这种格式的URL。</a> 然后将匹配到的网页交给parse_item来处理。并且持续追踪 <strong>看这儿敲黑板！！划重点！！：：：</strong></p>
                  <h2 id="重点说明！！！！不能parse函数！！这是CrawlSpider进行匹配调用的函数，你要是使用了！rules就没法进行匹配啦！！！"><a href="#重点说明！！！！不能parse函数！！这是CrawlSpider进行匹配调用的函数，你要是使用了！rules就没法进行匹配啦！！！" class="headerlink" title="重点说明！！！！不能parse函数！！这是CrawlSpider进行匹配调用的函数，你要是使用了！rules就没法进行匹配啦！！！"></a><strong>重点说明！！！！不能parse函数！！这是CrawlSpider进行匹配调用的函数，你要是使用了！rules就没法进行匹配啦！！！</strong></h2>
                  <p>现在spider.py是这样的：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy.spider <span class="keyword">import</span> CrawlSpider, Rule</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor</span><br><span class="line"><span class="keyword">from</span> mzitu_scrapy.items <span class="keyword">import</span> MzituScrapyItem</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Spider</span><span class="params">(CrawlSpider)</span>:</span></span><br><span class="line">    name = <span class="string">'mzitu'</span></span><br><span class="line">    allowed_domains = [<span class="string">'mzitu.com'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.mzitu.com/'</span>]</span><br><span class="line">    img_urls = []</span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;'</span>,), deny=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;/\d&#123;1,6&#125;'</span>)), callback=<span class="string">'parse_item'</span>, follow=<span class="literal">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        print(response.url)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>来跑一下试试 别忘了怎么测试的哈！！上面新建的那个run.py！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu01.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu01.png" alt=""></a> Good！！真棒！全是我们想要的！！！ 现在干啥？啥？你不知道？EXM你没逗我吧！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/e6a6e85d131a484b8034606c0f6a504a_th.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/e6a6e85d131a484b8034606c0f6a504a_th.gif" alt=""></a> 当然是解析我们拿到的response了！从里面找我们要的套图名称和所有的图片地址了！ 我们随便打开一个URL。 首先用xpath取套图名称： 啥？你不知道怎么用xpath？？少年少女 你走吧。出去别说看过我的博文。 ./*//div[@class=’main’]/div[1]/h2/text() 这段xpath就是套图名称的xpath了！看不懂的少年少女赶快去<a href="http://www.w3school.com.cn/看看xpath的教程！" target="_blank" rel="noopener">http://www.w3school.com.cn/看看xpath的教程！</a> 当然你直接用Chrome拷贝出来的那个xpath也行。（有一定的概率不能使） 现在来找图片地址了，怎么找我在 小白爬虫第一弹中已经写过了哈！这就不详细赘述了！ 首先找到每套图有多少张图片： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu02.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu02.png" alt=""></a> 就是红框中的那个东东。 Xpath这样写：</p>
                  <figure class="highlight delphi">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">descendant::<span class="keyword">div</span>[@<span class="keyword">class</span>=<span class="string">'main'</span>]/<span class="keyword">div</span>[@<span class="keyword">class</span>=<span class="string">'content'</span>]/<span class="keyword">div</span>[@<span class="keyword">class</span>=<span class="string">'pagenavi'</span>]/a[last()-<span class="number">1</span>]/span/text()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 意思是选取根节点下面所有后代标签，在其中选取出 div[@class=’main’]下面的div[@class=’content’]下面的/div[@class=’pagenavi’]下面的倒数第二个a标签 下面的span标签中的文本。（有点长哈哈哈哈哈！其实还可以短一些，我懒就不改了） 然后循环拼接处每张图片的的网页地址，现在spider.py是这样：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy.spider <span class="keyword">import</span> CrawlSpider, Rule</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor</span><br><span class="line"><span class="keyword">from</span> mzitu_scrapy.items <span class="keyword">import</span> MzituScrapyItem</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Spider</span><span class="params">(CrawlSpider)</span>:</span></span><br><span class="line">    name = <span class="string">'mzitu'</span></span><br><span class="line">    allowed_domains = [<span class="string">'mzitu.com'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.mzitu.com/'</span>]</span><br><span class="line">    img_urls = []</span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;'</span>,), deny=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;/\d&#123;1,6&#125;'</span>)), callback=<span class="string">'parse_item'</span>, follow=<span class="literal">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        :param response: 下载器返回的response</span></span><br><span class="line"><span class="string">        :return:</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        item = MzituScrapyItem()</span><br><span class="line">        <span class="comment"># max_num为页面最后一张图片的位置</span></span><br><span class="line">        max_num = response.xpath(<span class="string">"descendant::div[@class='main']/div[@class='content']/div[@class='pagenavi']/a[last()-1]/span/text()"</span>).extract_first(default=<span class="string">"N/A"</span>)</span><br><span class="line">        item[<span class="string">'name'</span>] = response.xpath(<span class="string">"./*//div[@class='main']/div[1]/h2/text()"</span>).extract_first(default=<span class="string">"N/A"</span>)</span><br><span class="line">        <span class="keyword">for</span> num <span class="keyword">in</span> range(<span class="number">1</span>, int(max_num)):</span><br><span class="line">            <span class="comment"># page_url 为每张图片所在的页面地址</span></span><br><span class="line">            page_url = response.url + <span class="string">'/'</span> + str(num)</span><br><span class="line">            <span class="keyword">yield</span> Request(page_url, callback=self.img_url)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>extract_first(default=”N/A”)的意思是：取xpath返回值的第一个元素。如果xpath没有取到值，则返回N/A 然后调用函数img_url来提取每个网页中的图片地址。img_url长这样：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">img_url</span><span class="params">(self, response,)</span>:</span></span><br><span class="line">    <span class="string">"""取出图片URL 并添加进self.img_urls列表中</span></span><br><span class="line"><span class="string">    :param response:</span></span><br><span class="line"><span class="string">    :param img_url 为每张图片的真实地址</span></span><br><span class="line"><span class="string">    """</span></span><br><span class="line">    img_urls = response.xpath(<span class="string">"descendant::div[@class='main-image']/descendant::img/@src"</span>).extract()</span><br><span class="line">    <span class="keyword">for</span> img_url <span class="keyword">in</span> img_urls:</span><br><span class="line">        self.img_urls.append(img_url)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> descendant::div[@class=’main-image’]/descendant::img/@src这段xpath取出div[@class=’main-image’]下面所有的img标签的src属性（有的套图一个页面有好几张图） .extract()不跟上[0]返回的是列表 完整的spider.py如下：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy.spider <span class="keyword">import</span> CrawlSpider, Rule</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor</span><br><span class="line"><span class="keyword">from</span> mzitu_scrapy.items <span class="keyword">import</span> MzituScrapyItem</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Spider</span><span class="params">(CrawlSpider)</span>:</span></span><br><span class="line">    name = <span class="string">'mzitu'</span></span><br><span class="line">    allowed_domains = [<span class="string">'mzitu.com'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.mzitu.com/'</span>]</span><br><span class="line">    img_urls = []</span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;'</span>,), deny=(<span class="string">'http://www.mzitu.com/\d&#123;1,6&#125;/\d&#123;1,6&#125;'</span>)), callback=<span class="string">'parse_item'</span>, follow=<span class="literal">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        :param response: 下载器返回的response</span></span><br><span class="line"><span class="string">        :return:</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        item = MzituScrapyItem()</span><br><span class="line">        <span class="comment"># max_num为页面最后一张图片的位置</span></span><br><span class="line">        max_num = response.xpath(<span class="string">"descendant::div[@class='main']/div[@class='content']/div[@class='pagenavi']/a[last()-1]/span/text()"</span>).extract_first(default=<span class="string">"N/A"</span>)</span><br><span class="line">        item[<span class="string">'name'</span>] = response.xpath(<span class="string">"./*//div[@class='main']/div[1]/h2/text()"</span>).extract_first(default=<span class="string">"N/A"</span>)</span><br><span class="line">        item[<span class="string">'url'</span>] = response.url</span><br><span class="line">        <span class="keyword">for</span> num <span class="keyword">in</span> range(<span class="number">1</span>, int(max_num)):</span><br><span class="line">            <span class="comment"># page_url 为每张图片所在的页面地址</span></span><br><span class="line">            page_url = response.url + <span class="string">'/'</span> + str(num)</span><br><span class="line">            <span class="keyword">yield</span> Request(page_url, callback=self.img_url)</span><br><span class="line">        item[<span class="string">'image_urls'</span>] = self.img_urls</span><br><span class="line">        <span class="keyword">yield</span> item</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">img_url</span><span class="params">(self, response,)</span>:</span></span><br><span class="line">        <span class="string">"""取出图片URL 并添加进self.img_urls列表中</span></span><br><span class="line"><span class="string">        :param response:</span></span><br><span class="line"><span class="string">        :param img_url 为每张图片的真实地址</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        img_urls = response.xpath(<span class="string">"descendant::div[@class='main-image']/descendant::img/@src"</span>).extract()</span><br><span class="line">        <span class="keyword">for</span> img_url <span class="keyword">in</span> img_urls:</span><br><span class="line">            self.img_urls.append(img_url)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 下面开始把图片弄回本地啦！！ 开写我们的pipelines.py 首先根据官方文档说明我们如果需要使用图片管道 则需要使用ImagesPipeline： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu03.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu03.png" alt=""></a> 我们可以依葫芦画瓢写一个。但是这样有一个很麻烦的问题就是，这样下载下来的图片没有分类，很是难看啊！ 所以 我们需要重写一下ImagesPipeline中的file_path方法！ 具体如下：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># Define your item pipelines here</span></span><br><span class="line"><span class="comment">#</span></span><br><span class="line"><span class="comment"># Don't forget to add your pipeline to the ITEM_PIPELINES setting</span></span><br><span class="line"><span class="comment"># See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html</span></span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Request</span><br><span class="line"><span class="keyword">from</span> scrapy.pipelines.images <span class="keyword">import</span> ImagesPipeline</span><br><span class="line"><span class="keyword">from</span> scrapy.exceptions <span class="keyword">import</span> DropItem</span><br><span class="line"><span class="keyword">import</span> re</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MzituScrapyPipeline</span><span class="params">(ImagesPipeline)</span>:</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">file_path</span><span class="params">(self, request, response=None, info=None)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        :param request: 每一个图片下载管道请求</span></span><br><span class="line"><span class="string">        :param response:</span></span><br><span class="line"><span class="string">        :param info:</span></span><br><span class="line"><span class="string">        :param strip :清洗Windows系统的文件夹非法字符，避免无法创建目录</span></span><br><span class="line"><span class="string">        :return: 每套图的分类目录</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        item = request.meta[<span class="string">'item'</span>]</span><br><span class="line">        folder = item[<span class="string">'name'</span>]</span><br><span class="line">        folder_strip = strip(folder)</span><br><span class="line">        image_guid = request.url.split(<span class="string">'/'</span>)[<span class="number">-1</span>]</span><br><span class="line">        filename = <span class="string">u'full/&#123;0&#125;/&#123;1&#125;'</span>.format(folder_strip, image_guid)</span><br><span class="line">        <span class="keyword">return</span> filename</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">get_media_requests</span><span class="params">(self, item, info)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        :param item: spider.py中返回的item</span></span><br><span class="line"><span class="string">        :param info:</span></span><br><span class="line"><span class="string">        :return:</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        <span class="keyword">for</span> img_url <span class="keyword">in</span> item[<span class="string">'image_urls'</span>]:</span><br><span class="line">            referer = item[<span class="string">'url'</span>]</span><br><span class="line">            <span class="keyword">yield</span> Request(img_url, meta=&#123;<span class="string">'item'</span>: item,</span><br><span class="line">                                         <span class="string">'referer'</span>: referer&#125;)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">item_completed</span><span class="params">(self, results, item, info)</span>:</span></span><br><span class="line">        image_paths = [x[<span class="string">'path'</span>] <span class="keyword">for</span> ok, x <span class="keyword">in</span> results <span class="keyword">if</span> ok]</span><br><span class="line">        <span class="keyword">if</span> <span class="keyword">not</span> image_paths:</span><br><span class="line">            <span class="keyword">raise</span> DropItem(<span class="string">"Item contains no images"</span>)</span><br><span class="line">        <span class="keyword">return</span> item</span><br><span class="line"></span><br><span class="line">    <span class="comment"># def process_item(self, item, spider):</span></span><br><span class="line">    <span class="comment">#     return item</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">strip</span><span class="params">(path)</span>:</span></span><br><span class="line">    <span class="string">"""</span></span><br><span class="line"><span class="string">    :param path: 需要清洗的文件夹名字</span></span><br><span class="line"><span class="string">    :return: 清洗掉Windows系统非法文件夹名字的字符串</span></span><br><span class="line"><span class="string">    """</span></span><br><span class="line">    path = re.sub(<span class="string">r'[？\*|“&lt;&gt;:/]'</span>, <span class="string">''</span>, str(path))</span><br><span class="line">    <span class="keyword">return</span> path</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">"__main__"</span>:</span><br><span class="line">    a = <span class="string">'我是一个？*|“&lt;&gt;:/错误的字符串'</span></span><br><span class="line">    print(strip(a))</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 写一个中间件来处理图片下载的防盗链：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MeiZiTu</span><span class="params">(object)</span>:</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_request</span><span class="params">(self, request, spider)</span>:</span></span><br><span class="line">        <span class="string">'''设置headers和切换请求头</span></span><br><span class="line"><span class="string">        :param request: 请求体</span></span><br><span class="line"><span class="string">        :param spider: spider对象</span></span><br><span class="line"><span class="string">        :return: None</span></span><br><span class="line"><span class="string">        '''</span></span><br><span class="line">        referer = request.meta.get(<span class="string">'referer'</span>, <span class="literal">None</span>)</span><br><span class="line">        <span class="keyword">if</span> referer:</span><br><span class="line">            request.headers[<span class="string">'referer'</span>] = referer</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 最后一步设置ImagesPipeline的存储目录！ 在settings.py中写入：</p>
                  <figure class="highlight ini">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">IMAGES_STORE</span> = <span class="string">'F:\mzitu\\'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>则ImagesPipeline将所有下载的图片放置在此目录下！ 设置图片实效性： 图像管道避免下载最近已经下载的图片。使用 <a href="http://scrapy-chs.readthedocs.io/zh_CN/1.0/topics/media-pipeline.html#std:setting-FILES_EXPIRES" target="_blank" rel="noopener"><code>FILES_EXPIRES</code></a> (或 <a href="http://scrapy-chs.readthedocs.io/zh_CN/1.0/topics/media-pipeline.html#std:setting-IMAGES_EXPIRES" target="_blank" rel="noopener"><code>IMAGES_EXPIRES</code></a>) 设置可以调整失效期限，可以用天数来指定: 在settings.py中写入以下配置。</p>
                  <figure class="highlight ini">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># 30 days of delay for images expiration</span></span><br><span class="line"><span class="attr">IMAGES_EXPIRES</span> = <span class="number">30</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> settings.py中开启item_pipelines:</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">ITEM_PIPELINES</span> = &#123;</span><br><span class="line">   <span class="string">'mzitu_scrapy.pipelines.MzituScrapyPipeline'</span>: 300,</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>settings.py中开启DOWNLOADER_MIDDLEWARES</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">DOWNLOADER_MIDDLEWARES</span> = &#123;</span><br><span class="line">   <span class="string">'mzitu_scrapy.middlewares.MeiZiTu'</span>: 543,</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 如果你需要缩略图之类的请参考官方文档： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu05.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu05.png" alt=""></a> 将其写入settings.py文件中。 至此完毕！！！ 来看看效果： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu04.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/mzitu04.png" alt=""></a> 下载速度简直飞起！！友情提示：请务必配置代理哦！ 可以参考大才哥的<a href="http://cuiqingcai.com/3443.html做一个代理，就不需要重写Scrapy中间件啦！更能避免费代理总是不能用的坑爹行为。">http://cuiqingcai.com/3443.html做一个代理，就不需要重写Scrapy中间件啦！更能避免费代理总是不能用的坑爹行为。</a> 总之省事省时又省心啊！ github地址：<a href="https://github.com/thsheep/mzitu_scrapy" target="_blank" rel="noopener">https://github.com/thsheep/mzitu_scrapy</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-04-24 00:37:29" itemprop="dateCreated datePublished" datetime="2017-04-24T00:37:29+08:00">2017-04-24</time>
                </span>
                <span id="/4421.html" class="post-meta-item leancloud_visitors" data-flag-title="小白进阶之Scrapy第四篇（图片下载管道篇）" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>8.5k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>8 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4380.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4380.html" class="post-title-link" itemprop="url">利用Scrapy爬取知乎用户详细信息并存至MongoDB</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>本节分享一下爬取知乎用户信息的Scrapy爬虫实战。</p>
                  <h2 id="本节目标"><a href="#本节目标" class="headerlink" title="本节目标"></a>本节目标</h2>
                  <p>本节要实现的内容有：</p>
                  <ul>
                    <li>从一个大V用户开始，通过递归抓取粉丝列表和关注列表，实现知乎所有用户的详细信息的抓取。</li>
                    <li>将抓取到的结果存储到MongoDB，并进行去重操作。</li>
                  </ul>
                  <h2 id="思路分析"><a href="#思路分析" class="headerlink" title="思路分析"></a>思路分析</h2>
                  <p>我们都知道每个人都有关注列表和粉丝列表，尤其对于大V来说，粉丝和关注尤其更多。 如果我们从一个大V开始，首先可以获取他的个人信息，然后我们获取他的粉丝列表和关注列表，然后遍历列表中的每一个用户，进一步抓取每一个用户的信息还有他们各自的粉丝列表和关注列表，然后再进一步遍历获取到的列表中的每一个用户，进一步抓取他们的信息和关注粉丝列表，循环往复，不断递归，这样就可以做到一爬百，百爬万，万爬百万，通过社交关系自然形成了一个爬取网，这样就可以爬到所有的用户信息了。当然零粉丝零关注的用户就忽略他们吧～ 爬取的信息怎样来获得呢？不用担心，通过分析知乎的请求就可以得到相关接口，通过请求接口就可以拿到用户详细信息和粉丝、关注列表了。 接下来我们开始实战爬取。</p>
                  <h2 id="环境需求"><a href="#环境需求" class="headerlink" title="环境需求"></a>环境需求</h2>
                  <h3 id="Python3"><a href="#Python3" class="headerlink" title="Python3"></a>Python3</h3>
                  <p>本项目使用的Python版本是Python3，项目开始之前请确保你已经安装了Python3。</p>
                  <h3 id="Scrapy"><a href="#Scrapy" class="headerlink" title="Scrapy"></a>Scrapy</h3>
                  <p>Scrapy是一个强大的爬虫框架，安装方式如下：</p>
                  <figure class="highlight cmake">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">pip3 <span class="keyword">install</span> scrapy</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="MongoDB"><a href="#MongoDB" class="headerlink" title="MongoDB"></a>MongoDB</h3>
                  <p>非关系型数据库，项目开始之前请先安装好MongoDB并启动服务。</p>
                  <h3 id="PyMongo"><a href="#PyMongo" class="headerlink" title="PyMongo"></a>PyMongo</h3>
                  <p>Python的MongoDB连接库，安装方式如下：</p>
                  <figure class="highlight cmake">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">pip3 <span class="keyword">install</span> pymongo</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h2 id="创建项目"><a href="#创建项目" class="headerlink" title="创建项目"></a>创建项目</h2>
                  <p>安装好以上环境之后，我们便可以开始我们的项目了。 在项目开始之首先我们用命令行创建一个项目：</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">scrapy startproject zhihuuser</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h2 id="创建爬虫"><a href="#创建爬虫" class="headerlink" title="创建爬虫"></a>创建爬虫</h2>
                  <p>接下来我们需要创建一个spider，同样利用命令行，不过这次命令行需要进入到项目里运行。</p>
                  <figure class="highlight css">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="selector-tag">cd</span> <span class="selector-tag">zhihuuser</span></span><br><span class="line"><span class="selector-tag">scrapy</span> <span class="selector-tag">genspider</span> <span class="selector-tag">zhihu</span> <span class="selector-tag">www</span><span class="selector-class">.zhihu</span><span class="selector-class">.com</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h2 id="禁止ROBOTSTXT-OBEY"><a href="#禁止ROBOTSTXT-OBEY" class="headerlink" title="禁止ROBOTSTXT_OBEY"></a>禁止ROBOTSTXT_OBEY</h2>
                  <p>接下来你需要打开settings.py文件，将ROBOTSTXT_OBEY修改为False。</p>
                  <figure class="highlight ini">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">ROBOTSTXT_OBEY</span> = <span class="literal">False</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>它默认为True，就是要遵守robots.txt 的规则，那么 robots.txt 是个什么东西呢？ 通俗来说， robots.txt 是遵循 Robot 协议的一个文件，它保存在网站的服务器中，它的作用是，告诉搜索引擎爬虫，本网站哪些目录下的网页 不希望 你进行爬取收录。在Scrapy启动后，会在第一时间访问网站的 robots.txt 文件，然后决定该网站的爬取范围。 当然，我们并不是在做搜索引擎，而且在某些情况下我们想要获取的内容恰恰是被 robots.txt 所禁止访问的。所以，某些时候，我们就要将此配置项设置为 False ，拒绝遵守 Robot协议 ！ 所以在这里设置为False。当然可能本次爬取不一定会被它限制，但是我们一般来说会首先选择禁止它。</p>
                  <h2 id="尝试最初的爬取"><a href="#尝试最初的爬取" class="headerlink" title="尝试最初的爬取"></a>尝试最初的爬取</h2>
                  <p>接下来我们什么代码也不修改，执行爬取，运行如下命令：</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">scrapy crawl zhihu</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>你会发现爬取结果会出现这样的一个错误：</p>
                  <figure class="highlight basic">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="symbol">500 </span>Internal Server <span class="keyword">Error</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>访问知乎得到的状态码是500，这说明爬取并没有成功，其实这是因为我们没有加入请求头，知乎识别User-Agent发现不是浏览器，就返回错误的响应了。 所以接下来的一步我们需要加入请求headers信息，你可以在Request的参数里加，也可以在spider里面的custom_settings里面加，当然最简单的方法莫过于在全局settings里面加了。 我们打开settings.py文件，取消DEFAULT_REQUEST_HEADERS的注释，加入如下的内容：</p>
                  <figure class="highlight 1c">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">DEFAULT_REQUEST_HEADERS = &#123;</span><br><span class="line">    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.<span class="number">2924.87</span> Safari/537.36'</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这个是为你的请求添加请求头，如果你没有设置headers的话，它就会使用这个请求头请求，添加了User-Agent信息，所以这样我们的爬虫就可以伪装浏览器了。 接下来重新运行爬虫。</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">scrapy crawl zhihu</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这时你就会发现得到的返回状态码就正常了。 解决了这个问题，我们接下来就可以分析页面逻辑来正式实现爬虫了。</p>
                  <h2 id="爬取流程"><a href="#爬取流程" class="headerlink" title="爬取流程"></a>爬取流程</h2>
                  <p>接下来我们需要先探寻获取用户详细信息和获取关注列表的接口。 回到网页，打开浏览器的控制台，切换到Network监听模式。 我们首先要做的是寻找一个大V，以轮子哥为例吧，它的个人信息页面网址是：<a href="https://www.zhihu.com/people/excited-vczh" target="_blank" rel="noopener">https://www.zhihu.com/people/excited-vczh</a> 首先打开轮子哥的首页 <img src="https://ww4.sinaimg.cn/large/006tKfTcly1femrd0w9qwj31kw145n1v.jpg" alt=""> 我们可以看到这里就是他的一些基本信息，我们需要抓取的就是这些，比如名字、签名、职业、关注数、赞同数等等。 接下来我们需要探索一下关注列表接口在哪里，我们点击关注选项卡，然后下拉，点击翻页，我们会在下面的请求中发现出现了 followees开头的Ajax请求。这个就是获取关注列表的接口。 <img src="https://ww1.sinaimg.cn/large/006tKfTcly1femrhdtpkoj31kw0y7jw5.jpg" alt=""> 我们观察一下这个请求结构 <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femrk47kt9j31ho0sejwx.jpg" alt=""> 首先它是一个Get类型的请求，请求的URL是<a href="https://www.zhihu.com/api/v4/members/excited-vczh/followees" target="_blank" rel="noopener">https://www.zhihu.com/api/v4/members/excited-vczh/followees</a>，后面跟了三个参数，一个是include，一个是offset，一个是limit。 观察后可以发现，include是一些获取关注的人的基本信息的查询参数，包括回答数、文章数等等。 offset是偏移量，我们现在分析的是第3页的关注列表内容，offset当前为40。 limit为每一页的数量，这里是20，所以结合上面的offset可以推断，当offset为0时，获取到的是第一页关注列表，当offset为20时，获取到的是第二页关注列表，依次类推。 然后接下来看下返回结果： <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femrpgchhpj31ec0ss0wb.jpg" alt=""> 可以看到有data和paging两个字段，data就是数据，包含20个内容，这些就是用户的基本信息，也就是关注列表的用户信息。 paging里面又有几个字段，is_end表示当前翻页是否结束，next是下一页的链接，所以在判读分页的时候，我们可以先利用is_end判断翻页是否结束，然后再获取next链接，请求下一页。 这样我们的关注列表就可以通过接口获取到了。 接下来我们再看下用户详情接口在哪里，我们将鼠标放到关注列表任意一个头像上面，观察下网络请求，可以发现又会出现一个Ajax请求。 <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femrumazrij31kw0zjk1e.jpg" alt=""> 可以看到这次的请求链接为<a href="https://www.zhihu.com/api/v4/members/lu-jun-ya-1" target="_blank" rel="noopener">https://www.zhihu.com/api/v4/members/lu-jun-ya-1</a> 后面又一个参数include，include是一些查询参数，与刚才的接口类似，不过这次参数非常全，几乎可以把所有详情获取下来，另外接口的最后是加了用户的用户名，这个其实是url_token，上面的那个接口其实也是，在返回数据中是可以获得的。 <img src="https://ww4.sinaimg.cn/large/006tKfTcly1femrxhb8ptj313w0qy76m.jpg" alt=""> 所以综上所述：</p>
                  <ul>
                    <li>要获取用户的关注列表，我们需要请求类似 <a href="https://www.zhihu.com/api/v4/members/%7Buser%7D/followees?include={include}&amp;offset={offset}&amp;limit={limit}" target="_blank" rel="noopener">https://www.zhihu.com/api/v4/members/{user}/followees?include={include}&amp;offset={offset}&amp;limit={limit}</a> 这样的接口，其中user就是该用户的url_token，include是固定的查询参数，offset是分页偏移量，limit是一页取多少个。</li>
                    <li>要获取用户的详细信息，我们需要请求类似 <a href="https://www.zhihu.com/api/v4/members/%7Buser%7D?include={include}" target="_blank" rel="noopener">https://www.zhihu.com/api/v4/members/{user}?include={include}</a> 这样的接口，其中user就是该用户的url_token，include是查询参数。</li>
                  </ul>
                  <p>理清了如上接口逻辑后，我们就可以开始构造请求了。</p>
                  <h2 id="生成第一步请求"><a href="#生成第一步请求" class="headerlink" title="生成第一步请求"></a>生成第一步请求</h2>
                  <p>接下来我们要做的第一步当然是请求轮子哥的基本信息，然后获取轮子哥的关注列表了，我们首先构造一个格式化的url，将一些可变参数提取出来，然后需要重写start_requests方法，生成第一步的请求，接下来我们还需要根据获取到到关注列表做进一步的分析。</p>
                  <figure class="highlight reasonml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import json</span><br><span class="line">from scrapy import Spider, Request</span><br><span class="line">from zhihuuser.items import UserItem</span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> <span class="constructor">ZhihuSpider(Spider)</span>:</span><br><span class="line">    name = <span class="string">"zhihu"</span></span><br><span class="line">    allowed_domains = <span class="literal">["<span class="identifier">www</span>.<span class="identifier">zhihu</span>.<span class="identifier">com</span>"]</span></span><br><span class="line">    user_url = 'https:<span class="comment">//www.zhihu.com/api/v4/members/&#123;user&#125;?include=&#123;include&#125;'</span></span><br><span class="line">    follows_url = 'https:<span class="comment">//www.zhihu.com/api/v4/members/&#123;user&#125;/followees?include=&#123;include&#125;&amp;amp;offset=&#123;offset&#125;&amp;amp;limit=&#123;limit&#125;'</span></span><br><span class="line">    start_user = 'excited-vczh'</span><br><span class="line">    user_query = 'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge<span class="literal">[?(<span class="identifier">type</span>=<span class="identifier">best_answerer</span>)]</span>.topics'</span><br><span class="line">    follows_query = 'data<span class="literal">[<span class="operator">*</span>]</span>.answer_count,articles_count,gender,follower_count,is_followed,is_following,badge<span class="literal">[?(<span class="identifier">type</span>=<span class="identifier">best_answerer</span>)]</span>.topics'</span><br><span class="line"></span><br><span class="line">    def start<span class="constructor">_requests(<span class="params">self</span>)</span>:</span><br><span class="line">        yield <span class="constructor">Request(<span class="params">self</span>.<span class="params">user_url</span>.<span class="params">format</span>(<span class="params">user</span>=<span class="params">self</span>.<span class="params">start_user</span>, <span class="params">include</span>=<span class="params">self</span>.<span class="params">user_query</span>)</span>, self.parse_user)</span><br><span class="line">        yield <span class="constructor">Request(<span class="params">self</span>.<span class="params">follows_url</span>.<span class="params">format</span>(<span class="params">user</span>=<span class="params">self</span>.<span class="params">start_user</span>, <span class="params">include</span>=<span class="params">self</span>.<span class="params">follows_query</span>, <span class="params">limit</span>=20, <span class="params">offset</span>=0)</span>,</span><br><span class="line">                      self.parse_follows)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>然后我们实现一下两个解析方法parse_user和parse_follows。</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse_user</span><span class="params">(<span class="keyword">self</span>, response)</span></span><span class="symbol">:</span></span><br><span class="line">    print(response.text)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parse_follows</span><span class="params">(<span class="keyword">self</span>, response)</span></span><span class="symbol">:</span></span><br><span class="line">    print(response.text)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>最简单的实现他们的结果输出即可，然后运行观察结果。</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">scrapy crawl zhihu</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这时你会发现出现了</p>
                  <figure class="highlight basic">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="symbol">401 </span>HTTP status code is <span class="keyword">not</span> handled <span class="keyword">or</span> <span class="keyword">not</span> allowed</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>访问被禁止了，这时我们观察下浏览器请求，发现它相比之前的请求多了一个OAuth请求头。 <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femsckhmbxj30zy0qsaem.jpg" alt=""></p>
                  <h2 id="OAuth"><a href="#OAuth" class="headerlink" title="OAuth"></a>OAuth</h2>
                  <p>它是Open Authorization的缩写。 OAUTH_token:OAUTH进行到最后一步得到的一个“令牌”，通过此“令牌”请求，就可以去拥有资源的网站抓取任意有权限可以被抓取的资源。 在这里我知乎并没有登陆，这里的OAuth值是</p>
                  <figure class="highlight llvm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">oauth <span class="keyword">c</span><span class="number">3</span>cef<span class="number">7</span><span class="keyword">c</span><span class="number">66</span>a<span class="number">1843</span>f<span class="number">8</span>b<span class="number">3</span>a<span class="number">9e6</span>a<span class="number">1e3160</span>e<span class="number">20</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>经过我长久的观察，这个一直不会改变，所以可以长久使用，我们将它配置到DEFAULT_REQUEST_HEADERS里，这样它就变成了：</p>
                  <figure class="highlight 1c">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">DEFAULT_REQUEST_HEADERS = &#123;</span><br><span class="line">    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.<span class="number">2924.87</span> Safari/537.36',</span><br><span class="line">    'authorization': 'oauth c3cef7c66a<span class="number">1843</span>f8b3a9e6a1e<span class="number">3160</span>e20',</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>接下来如果我们重新运行爬虫，就可以发现可以正常爬取了。</p>
                  <h2 id="parse-user"><a href="#parse-user" class="headerlink" title="parse_user"></a>parse_user</h2>
                  <p>接下来我们处理一下用户基本信息，首先我们查看一下接口信息会返回一些什么数据。 <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femsgc32lzj31900r241a.jpg" alt=""> 可以看到返回的结果非常全，在这里我们直接声明一个Item全保存下就好了。 在items里新声明一个UserItem</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> Item, Field</span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> <span class="symbol">UserItem</span>(<span class="symbol">Item</span>):</span><br><span class="line">    # <span class="symbol">define</span> <span class="symbol">the</span> <span class="symbol">fields</span> <span class="symbol">for</span> <span class="symbol">your</span> <span class="symbol">item</span> <span class="symbol">here</span> <span class="symbol">like:</span></span><br><span class="line">    <span class="symbol">id</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">name</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">avatar_url</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">headline</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">description</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">url</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">url_token</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">gender</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">cover_url</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">type</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">badge</span> = <span class="symbol">Field</span>()</span><br><span class="line"></span><br><span class="line">    <span class="symbol">answer_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">articles_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">commercial_question_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">favorite_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">favorited_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">follower_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">following_columns_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">following_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">pins_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">question_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">thank_from_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">thank_to_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">thanked_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">vote_from_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">vote_to_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">voteup_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">following_favlists_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">following_question_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">following_topic_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">marked_answers_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">mutual_followees_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">hosted_live_count</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">participated_live_count</span> = <span class="symbol">Field</span>()</span><br><span class="line"></span><br><span class="line">    <span class="symbol">locations</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">educations</span> = <span class="symbol">Field</span>()</span><br><span class="line">    <span class="symbol">employments</span> = <span class="symbol">Field</span>()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>所以在解析方法里面我们解析得到的response内容，然后转为json对象，然后依次判断字段是否存在，赋值就好了。</p>
                  <figure class="highlight livecodeserver">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="built_in">result</span> = json.loads(response.<span class="keyword">text</span>)</span><br><span class="line"><span class="keyword">item</span> = UserItem()</span><br><span class="line"><span class="keyword">for</span> field <span class="keyword">in</span> <span class="keyword">item</span>.fields:</span><br><span class="line">    <span class="keyword">if</span> field <span class="keyword">in</span> <span class="built_in">result</span>.<span class="built_in">keys</span>():</span><br><span class="line">        <span class="keyword">item</span>[field] = <span class="built_in">result</span>.<span class="built_in">get</span>(field)</span><br><span class="line">yield <span class="keyword">item</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>得到item后通过yield返回就好了。 这样保存用户基本信息就完成了。 接下来我们还需要在这里获取这个用户的关注列表，所以我们需要再重新发起一个获取关注列表的request 在parse_user后面再添加如下代码：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">yield Request(</span><br><span class="line">            self.follows_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.follows_query, <span class="attribute">limit</span>=20, <span class="attribute">offset</span>=0),</span><br><span class="line">            self.parse_follows)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这样我们又生成了获取该用户关注列表的请求。</p>
                  <h2 id="parse-follows"><a href="#parse-follows" class="headerlink" title="parse_follows"></a>parse_follows</h2>
                  <p>接下来我们处理一下关注列表，首先也是解析response的文本，然后要做两件事：</p>
                  <ul>
                    <li>通过关注列表的每一个用户，对每一个用户发起请求，获取其详细信息。</li>
                    <li>处理分页，判断paging内容，获取下一页关注列表。</li>
                  </ul>
                  <p>所以在这里将parse_follows改写如下：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">results = json.loads(response.text)</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> <span class="string">'data'</span> <span class="keyword">in</span> results.keys():</span><br><span class="line">    <span class="keyword">for</span> result <span class="keyword">in</span> results.<span class="builtin-name">get</span>(<span class="string">'data'</span>):</span><br><span class="line">        yield Request(self.user_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.user_query),</span><br><span class="line">                      self.parse_user)</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> <span class="string">'paging'</span> <span class="keyword">in</span> results.keys() <span class="keyword">and</span> results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'is_end'</span>) == <span class="literal">False</span>:</span><br><span class="line">    next_page = results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'next'</span>)</span><br><span class="line">    yield Request(next_page,</span><br><span class="line">                  self.parse_follows)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这样，整体代码如下：</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line">import json</span><br><span class="line"></span><br><span class="line">from scrapy import Spider, Request</span><br><span class="line">from zhihuuser.items import UserItem</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">ZhihuSpider</span>(<span class="title">Spider</span>):</span></span><br><span class="line">    name = <span class="string">"zhihu"</span></span><br><span class="line">    allowed_domains = [<span class="string">"www.zhihu.com"</span>]</span><br><span class="line">    user_url = <span class="string">'https://www.zhihu.com/api/v4/members/&#123;user&#125;?include=&#123;include&#125;'</span></span><br><span class="line">    follows_url = <span class="string">'https://www.zhihu.com/api/v4/members/&#123;user&#125;/followees?include=&#123;include&#125;&amp;amp;offset=&#123;offset&#125;&amp;amp;limit=&#123;limit&#125;'</span></span><br><span class="line">    start_user = <span class="string">'excited-vczh'</span></span><br><span class="line">    user_query = <span class="string">'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics'</span></span><br><span class="line">    follows_query = <span class="string">'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">start_requests</span><span class="params">(<span class="keyword">self</span>)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">yield</span> Request(<span class="keyword">self</span>.user_url.format(user=<span class="keyword">self</span>.start_user, <span class="keyword">include</span>=<span class="keyword">self</span>.user_query), <span class="keyword">self</span>.parse_user)</span><br><span class="line">        <span class="keyword">yield</span> Request(<span class="keyword">self</span>.follows_url.format(user=<span class="keyword">self</span>.start_user, <span class="keyword">include</span>=<span class="keyword">self</span>.follows_query, limit=<span class="number">20</span>, offset=<span class="number">0</span>),</span><br><span class="line">                      <span class="keyword">self</span>.parse_follows)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_user</span><span class="params">(<span class="keyword">self</span>, response)</span></span><span class="symbol">:</span></span><br><span class="line">        result = json.loads(response.text)</span><br><span class="line">        item = UserItem()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">        <span class="keyword">for</span> field <span class="keyword">in</span> item.<span class="symbol">fields:</span></span><br><span class="line">            <span class="keyword">if</span> field <span class="keyword">in</span> result.keys()<span class="symbol">:</span></span><br><span class="line">                item[field] = result.get(field)</span><br><span class="line">        <span class="keyword">yield</span> item</span><br><span class="line"></span><br><span class="line">        <span class="keyword">yield</span> Request(</span><br><span class="line">            <span class="keyword">self</span>.follows_url.format(user=result.get(<span class="string">'url_token'</span>), <span class="keyword">include</span>=<span class="keyword">self</span>.follows_query, limit=<span class="number">20</span>, offset=<span class="number">0</span>),</span><br><span class="line">            <span class="keyword">self</span>.parse_follows)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_follows</span><span class="params">(<span class="keyword">self</span>, response)</span></span><span class="symbol">:</span></span><br><span class="line">        results = json.loads(response.text)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'data'</span> <span class="keyword">in</span> results.keys()<span class="symbol">:</span></span><br><span class="line">            <span class="keyword">for</span> result <span class="keyword">in</span> results.get(<span class="string">'data'</span>)<span class="symbol">:</span></span><br><span class="line">                <span class="keyword">yield</span> Request(<span class="keyword">self</span>.user_url.format(user=result.get(<span class="string">'url_token'</span>), <span class="keyword">include</span>=<span class="keyword">self</span>.user_query),</span><br><span class="line">                              <span class="keyword">self</span>.parse_user)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'paging'</span> <span class="keyword">in</span> results.keys() <span class="keyword">and</span> results.get(<span class="string">'paging'</span>).get(<span class="string">'is_end'</span>) == <span class="symbol">False:</span></span><br><span class="line">            next_page = results.get(<span class="string">'paging'</span>).get(<span class="string">'next'</span>)</span><br><span class="line">            <span class="keyword">yield</span> Request(next_page,</span><br><span class="line">                          <span class="keyword">self</span>.parse_follows)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这样我们就完成了获取用户基本信息，然后递归获取关注列表进一步请求了。 重新运行爬虫，可以发现当前已经可以实现循环递归爬取了。</p>
                  <h2 id="followers"><a href="#followers" class="headerlink" title="followers"></a>followers</h2>
                  <p>上面我们实现了通过获取关注列表实现爬取循环，那这里少不了的还有粉丝列表，经过分析后发现粉丝列表的api也类似，只不过把followee换成了follower，其他的完全相同，所以我们按照同样的逻辑添加followers相关信息， 最终spider代码如下：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line">import json</span><br><span class="line"></span><br><span class="line"><span class="keyword">from</span> scrapy import Spider, Request</span><br><span class="line"><span class="keyword">from</span> zhihuuser.items import UserItem</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">class ZhihuSpider(Spider):</span><br><span class="line">    name = <span class="string">"zhihu"</span></span><br><span class="line">    allowed_domains = [<span class="string">"www.zhihu.com"</span>]</span><br><span class="line">    user_url = <span class="string">'https://www.zhihu.com/api/v4/members/&#123;user&#125;?include=&#123;include&#125;'</span></span><br><span class="line">    follows_url = <span class="string">'https://www.zhihu.com/api/v4/members/&#123;user&#125;/followees?include=&#123;include&#125;&amp;offset=&#123;offset&#125;&amp;limit=&#123;limit&#125;'</span></span><br><span class="line">    followers_url = <span class="string">'https://www.zhihu.com/api/v4/members/&#123;user&#125;/followers?include=&#123;include&#125;&amp;offset=&#123;offset&#125;&amp;limit=&#123;limit&#125;'</span></span><br><span class="line">    start_user = <span class="string">'tianshansoft'</span></span><br><span class="line">    user_query = <span class="string">'locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics'</span></span><br><span class="line">    follows_query = <span class="string">'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'</span></span><br><span class="line">    followers_query = <span class="string">'data[*].answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics'</span></span><br><span class="line"></span><br><span class="line">    def start_requests(self):</span><br><span class="line">        yield Request(self.user_url.format(<span class="attribute">user</span>=self.start_user, <span class="attribute">include</span>=self.user_query), self.parse_user)</span><br><span class="line">        yield Request(self.follows_url.format(<span class="attribute">user</span>=self.start_user, <span class="attribute">include</span>=self.follows_query, <span class="attribute">limit</span>=20, <span class="attribute">offset</span>=0),</span><br><span class="line">                      self.parse_follows)</span><br><span class="line">        yield Request(self.followers_url.format(<span class="attribute">user</span>=self.start_user, <span class="attribute">include</span>=self.followers_query, <span class="attribute">limit</span>=20, <span class="attribute">offset</span>=0),</span><br><span class="line">                      self.parse_followers)</span><br><span class="line"></span><br><span class="line">    def parse_user(self, response):</span><br><span class="line">        result = json.loads(response.text)</span><br><span class="line">        item = UserItem()</span><br><span class="line"></span><br><span class="line">        <span class="keyword">for</span> field <span class="keyword">in</span> item.fields:</span><br><span class="line">            <span class="keyword">if</span> field <span class="keyword">in</span> result.keys():</span><br><span class="line">                item[field] = result.<span class="builtin-name">get</span>(field)</span><br><span class="line">        yield item</span><br><span class="line"></span><br><span class="line">        yield Request(</span><br><span class="line">            self.follows_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.follows_query, <span class="attribute">limit</span>=20, <span class="attribute">offset</span>=0),</span><br><span class="line">            self.parse_follows)</span><br><span class="line"></span><br><span class="line">        yield Request(</span><br><span class="line">            self.followers_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.followers_query, <span class="attribute">limit</span>=20, <span class="attribute">offset</span>=0),</span><br><span class="line">            self.parse_followers)</span><br><span class="line"></span><br><span class="line">    def parse_follows(self, response):</span><br><span class="line">        results = json.loads(response.text)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'data'</span> <span class="keyword">in</span> results.keys():</span><br><span class="line">            <span class="keyword">for</span> result <span class="keyword">in</span> results.<span class="builtin-name">get</span>(<span class="string">'data'</span>):</span><br><span class="line">                yield Request(self.user_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.user_query),</span><br><span class="line">                              self.parse_user)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'paging'</span> <span class="keyword">in</span> results.keys() <span class="keyword">and</span> results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'is_end'</span>) == <span class="literal">False</span>:</span><br><span class="line">            next_page = results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'next'</span>)</span><br><span class="line">            yield Request(next_page,</span><br><span class="line">                          self.parse_follows)</span><br><span class="line"></span><br><span class="line">    def parse_followers(self, response):</span><br><span class="line">        results = json.loads(response.text)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'data'</span> <span class="keyword">in</span> results.keys():</span><br><span class="line">            <span class="keyword">for</span> result <span class="keyword">in</span> results.<span class="builtin-name">get</span>(<span class="string">'data'</span>):</span><br><span class="line">                yield Request(self.user_url.format(<span class="attribute">user</span>=result.get('url_token'), <span class="attribute">include</span>=self.user_query),</span><br><span class="line">                              self.parse_user)</span><br><span class="line"></span><br><span class="line">        <span class="keyword">if</span> <span class="string">'paging'</span> <span class="keyword">in</span> results.keys() <span class="keyword">and</span> results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'is_end'</span>) == <span class="literal">False</span>:</span><br><span class="line">            next_page = results.<span class="builtin-name">get</span>(<span class="string">'paging'</span>).<span class="builtin-name">get</span>(<span class="string">'next'</span>)</span><br><span class="line">            yield Request(next_page,</span><br><span class="line">                          self.parse_followers)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>需要改变的位置有</p>
                  <ul>
                    <li>start_requests里面添加yield followers信息</li>
                    <li>parse_user里面里面添加yield followers信息</li>
                    <li>parse_followers做相应的的抓取详情请求和翻页。</li>
                  </ul>
                  <p>如此一来，spider就完成了，这样我们就可以实现通过社交网络递归的爬取，把用户详情都爬下来。</p>
                  <h2 id="小结"><a href="#小结" class="headerlink" title="小结"></a>小结</h2>
                  <p>通过以上的spider，我们实现了如上逻辑：</p>
                  <ul>
                    <li>start_requests方法，实现了第一个大V用户的详细信息请求还有他的粉丝和关注列表请求。</li>
                    <li>parse_user方法，实现了详细信息的提取和粉丝关注列表的获取。</li>
                    <li>paese_follows，实现了通过关注列表重新请求用户并进行翻页的功能。</li>
                    <li>paese_followers，实现了通过粉丝列表重新请求用户并进行翻页的功能。</li>
                  </ul>
                  <h2 id="加入pipeline"><a href="#加入pipeline" class="headerlink" title="加入pipeline"></a>加入pipeline</h2>
                  <p>在这里数据库存储使用MongoDB，所以在这里我们需要借助于Item Pipeline，实现如下：</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MongoPipeline</span>(<span class="title">object</span>):</span></span><br><span class="line">    collection_name = <span class="string">'users'</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(<span class="keyword">self</span>, mongo_uri, mongo_db)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">self</span>.mongo_uri = mongo_uri</span><br><span class="line">        <span class="keyword">self</span>.mongo_db = mongo_db</span><br><span class="line"></span><br><span class="line">    @classmethod</span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">from_crawler</span><span class="params">(cls, crawler)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">return</span> cls(</span><br><span class="line">            mongo_uri=crawler.settings.get(<span class="string">'MONGO_URI'</span>),</span><br><span class="line">            mongo_db=crawler.settings.get(<span class="string">'MONGO_DATABASE'</span>)</span><br><span class="line">        )</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">open_spider</span><span class="params">(<span class="keyword">self</span>, spider)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">self</span>.client = pymongo.MongoClient(<span class="keyword">self</span>.mongo_uri)</span><br><span class="line">        <span class="keyword">self</span>.db = <span class="keyword">self</span>.client[<span class="keyword">self</span>.mongo_db]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">close_spider</span><span class="params">(<span class="keyword">self</span>, spider)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">self</span>.client.close()</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_item</span><span class="params">(<span class="keyword">self</span>, item, spider)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">self</span>.db[<span class="keyword">self</span>.collection_name].update(&#123;<span class="string">'url_token'</span>: item[<span class="string">'url_token'</span>]&#125;, &#123;<span class="string">'$set'</span>: dict(item)&#125;, True)</span><br><span class="line">        <span class="keyword">return</span> item</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>比较重要的一点就在于process_item，在这里使用了update方法，第一个参数传入查询条件，这里使用的是url_token，第二个参数传入字典类型的对象，就是我们的item，第三个参数传入True，这样就可以保证，如果查询数据存在的话就更新，不存在的话就插入。这样就可以保证去重了。 另外记得开启一下Item Pileline</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">ITEM_PIPELINES</span> = &#123;</span><br><span class="line">    <span class="string">'zhihuuser.pipelines.MongoPipeline'</span>: 300,</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>然后重新运行爬虫</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">scrapy crawl zhihu</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这样就可以发现正常的输出了，会一直不停地运行，用户也一个个被保存到数据库。 <img src="https://ww3.sinaimg.cn/large/006tKfTcly1femt2bisibj31kw0qa7fr.jpg" alt=""> 看下MongoDB，里面我们爬取的用户详情结果。 <img src="https://ww4.sinaimg.cn/large/006tKfTcgy1femtnv605cj31kw134guj.jpg" alt=""> 到现在为止，整个爬虫就基本完结了，我们主要通过递归的方式实现了这个逻辑。存储结果也通过适当的方法实现了去重。</p>
                  <h2 id="更高效率"><a href="#更高效率" class="headerlink" title="更高效率"></a>更高效率</h2>
                  <p>当然我们现在运行的是单机爬虫，只在一台电脑上运行速度是有限的，所以后面我们要想提高抓取效率，需要用到分布式爬虫，在这里需要用到Redis来维护一个公共的爬取队列。 更多的分布式爬虫的实现可以查看<a href="https://edu.hellobi.com/course/157" target="_blank" rel="noopener">自己动手，丰衣足食！Python3网络爬虫实战案例</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-04-15 03:36:48" itemprop="dateCreated datePublished" datetime="2017-04-15T03:36:48+08:00">2017-04-15</time>
                </span>
                <span id="/4380.html" class="post-meta-item leancloud_visitors" data-flag-title="利用Scrapy爬取知乎用户详细信息并存至MongoDB" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>15k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>14 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4352.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4352.html" class="post-title-link" itemprop="url">小白学爬虫系列教程</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" alt="QQ图片20161021225948"></a> 听大才哥说好像我的文章挺难找的，这整理一下。</p>
                  <h2 id="基础知识篇："><a href="#基础知识篇：" class="headerlink" title="基础知识篇："></a>基础知识篇：</h2>
                  <p>这玩意儿我没写，各位参考大才哥的： <a href="http://cuiqingcai.com/1052.html">Python爬虫学习系列教程</a> <a href="http://cuiqingcai.com/4320.html">Python3爬虫学习视频教程</a></p>
                  <h2 id="小白系列教程"><a href="#小白系列教程" class="headerlink" title="小白系列教程"></a>小白系列教程</h2>
                  <p><a href="http://cuiqingcai.com/3179.html">小白爬虫第一弹之抓取妹子图</a> <a href="http://cuiqingcai.com/3256.html">小白爬虫第二弹之健壮的小爬虫</a> <a href="http://cuiqingcai.com/3314.html">小白爬虫第三弹之去重去重</a> <a href="http://cuiqingcai.com/3363.html">小白爬虫第四弹之爬虫快跑（多进程+多线程）</a> <a href="http://cuiqingcai.com/3472.html">小白进阶之Scrapy第一篇</a> <a href="http://cuiqingcai.com/3952.html">小白进阶之Scrapy第二篇（登录篇）</a> <a href="http://cuiqingcai.com/4048.html">小白进阶之</a><a href="http://cuiqingcai.com/4020.html">Scrapy分布式的前篇—让redis和MongoDB安全点</a> <a href="http://cuiqingcai.com/4048.html">小白进阶之Scrapy第三篇（基于Scrapy-Redis的分布式以及cookies池）</a> <a href="http://cuiqingcai.com/4421.html">小白进阶之Scrapy第四篇（图片下载管道篇）</a> <a href="http://cuiqingcai.com/4725.html">小白进阶之Scrapy第五篇（Scrapy-Splash配合CrawlSpider；瞎几把整的）</a> <a href="http://cuiqingcai.com/4652.html">利用新接口抓取微信公众号的所有文章</a> <a href="https://cuiqingcai.com/6058.html">小白进阶之</a><a href="http://cuiqingcai.com/4725.html">Scrapy第六篇</a><a href="https://cuiqingcai.com/6058.html">Scrapy-Redis详解</a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" alt="QQ图片20161021225948"></a> 暂时就这些了、最近工作刚入职。上了个新项目，没时间更新文章了（主要是我懒、挤点时间都用来打LOL了···············尴尬脸） 等项目第一期结束了，我会把以前许诺的 ：JS异步加载 | 动态爬虫 更新出来。 感谢大才哥的平台（有兴趣的小伙伴一起来更新文章啊！ 才不会告诉你们：我扯着大才哥的大旗找了个不错的工作。手动笑哭······） <strong>如果以上网站有更改无法正常采集，请PM我一下，我尽量保证demo的可用性</strong></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-04-11 20:31:03" itemprop="dateCreated datePublished" datetime="2017-04-11T20:31:03+08:00">2017-04-11</time>
                </span>
                <span id="/4352.html" class="post-meta-item leancloud_visitors" data-flag-title="小白学爬虫系列教程" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>569</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>1 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4347.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Other <i class="label-arrow"></i>
                  </a>
                  <a href="/4347.html" class="post-title-link" itemprop="url">本站投稿功能已关闭</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <h2 id="公告"><a href="#公告" class="headerlink" title="公告"></a>公告</h2>
                  <p>大家好，本站于今日（2017.4.11）关闭投稿功能。</p>
                  <h2 id="原因"><a href="#原因" class="headerlink" title="原因"></a>原因</h2>
                  <p>由于之前本站开放了投稿注册接口，该接口现在被人利用，每天都会发送垃圾邮件，经常导致邮箱发信过多而被冻结，而WordPress本身没有提供验证码验证，所以自己也不想再去修改，当然最主要的是能发优质文章的又是少之又少，经常会出现一些垃圾草稿，所以博主决定直接将投稿功能关闭，希望大家可以理解。</p>
                  <h2 id="投稿"><a href="#投稿" class="headerlink" title="投稿"></a>投稿</h2>
                  <p>如果您有在本站投稿意向，请直接联系我邮件cqc@cuiqingcai.com，我为您注册账号并开通写作权限。</p>
                  <h2 id="鸣谢"><a href="#鸣谢" class="headerlink" title="鸣谢"></a>鸣谢</h2>
                  <p>非常感谢在本站投稿的童鞋，尤其是卧槽哥，发表了很多篇高质量爬虫文章。另外还有戴笠兄也是，不过后来戴笠兄的文章因为开车过猛而下架了哈哈，不过还是非常感谢。另外也非常感谢其他在本站投稿的小伙伴，在这不一一点名啦！</p>
                  <h2 id="结语"><a href="#结语" class="headerlink" title="结语"></a>结语</h2>
                  <p>最后希望大家可以理解，也非常感谢大家的支持！前一段时间忙着在录制爬虫视频，今天刚刚收尾，现在已经更新完毕，后面我将学习一些数据分析、自然语言处理、Web安全方面的知识分享给大家，希望大家多多支持！感谢！</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-04-11 20:03:03" itemprop="dateCreated datePublished" datetime="2017-04-11T20:03:03+08:00">2017-04-11</time>
                </span>
                <span id="/4347.html" class="post-meta-item leancloud_visitors" data-flag-title="本站投稿功能已关闭" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>440</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>1 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4320.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4320.html" class="post-title-link" itemprop="url">Python3爬虫视频学习教程</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <h2 id="课程链接"><a href="#课程链接" class="headerlink" title="课程链接"></a>课程链接</h2>
                  <p><strong>天善智能：<a href="https://edu.hellobi.com/course/157" target="_blank" rel="noopener">自己动手，丰衣足食！Python3网络爬虫实战案例</a></strong> <strong>网易云课堂：<a href="http://study.163.com/course/courseMain.htm?courseId=1003827039&amp;utm_campaign=commission&amp;utm_source=cp-1018878377&amp;utm_medium=share" target="_blank" rel="noopener">自己动手，丰衣足食！Python3网络爬虫实战案例</a></strong></p>
                  <h2 id="课程简介"><a href="#课程简介" class="headerlink" title="课程简介"></a>课程简介</h2>
                  <p>大家好哈，现在呢静觅博客已经两年多啦，可能大家过来更多看到的是爬虫方面的博文，首先非常感谢大家的支持，希望我的博文对大家有帮助！ 之前我写了一些Python爬虫方面的文章，<a href="http://cuiqingcai.com/1052.html">Python爬虫学习系列教程</a>，涉及到了基础和进阶的一些内容，当时更多用到的是Urllib还有正则，后来又陆续增加了一些文章，在学习过程中慢慢积累慢慢成型了一套算不上教程的教程，后来有越来越多的小伙伴学习和支持我感到非常开心，再次感谢大家！ 不过其实这些教程总的来说有一些问题：</p>
                  <ol>
                    <li>当时用的Python2写的，刚写的时候Scrapy这个框架也没有支持Python3，一些Python3爬虫库也不怎么成熟，所以当时选择了Python2。但到现在，Python3发展迅速，爬虫库也越来越成熟，而且Python2在不久的将来就会停止维护了，所以慢慢地，我的语言重心也慢慢转向了Python3，我也相信Python3会成为主流。所以说之前的一套课程算是有点过时了，相信大家肯定还在寻找Python3的一些教程。</li>
                    <li>当时学习的时候主要用的urllib，正则，所以这些文章的较大篇幅也都是urllib和正则的一些东西，后来的一些高级库都是在后面慢慢加的，而且一些高级的框架用法也没有做深入讲解，所以感觉整个内容有点头重脚轻，安排不合理。而且现在分布式越来越火，那么分布式爬虫的应用相必也是越来越广泛，之前的课程也没有做系统讲解。</li>
                    <li>在介绍一些操作的时候可能介绍不全面，环境的配置也没有兼顾各个平台，所以可能有些小伙伴摸不着头脑，可能卡在某一步不知道接下来是怎么做的了。</li>
                  </ol>
                  <p>那么综合上面的问题呢，最近我花了前前后后将近一个月的时间录制了一套新的Pyhthon3爬虫视频教程，将我之前做爬虫的一些经验重新梳理和整合，利用Python3编写，从环境配置、基础库讲解到案例实战、框架使用，最后再到分布式爬虫进行了比较系统的讲解。 课程内容是这个样子的：</p>
                  <h3 id="一、环境篇"><a href="#一、环境篇" class="headerlink" title="一、环境篇"></a><strong>一、环境篇</strong></h3>
                  <ul>
                    <li>Python3+Pip环境配置</li>
                    <li>MongoDB环境配置</li>
                    <li>Redis环境配置</li>
                    <li>MySQL环境配置</li>
                    <li>Python多版本共存配置</li>
                    <li>Python爬虫常用库的安装</li>
                  </ul>
                  <h3 id="二、基础篇"><a href="#二、基础篇" class="headerlink" title="二、基础篇"></a><strong>二、基础篇</strong></h3>
                  <ul>
                    <li>爬虫基本原理</li>
                    <li>Urllib库基本使用</li>
                    <li>Requests库基本使用</li>
                    <li>正则表达式基础</li>
                    <li>BeautifulSoup详解</li>
                    <li>PyQuery详解</li>
                    <li>Selenium详解</li>
                  </ul>
                  <h3 id="三、实战篇"><a href="#三、实战篇" class="headerlink" title="三、实战篇"></a><strong>三、实战篇</strong></h3>
                  <ul>
                    <li>使用Requests+正则表达式爬取猫眼电影</li>
                    <li>分析Ajax请求并抓取今日头条街拍美图</li>
                    <li>使用Selenium模拟浏览器抓取淘宝商品美食信息</li>
                    <li>使用Redis+Flask维护动态代理池</li>
                    <li>使用代理处理反爬抓取微信文章</li>
                    <li>使用Redis+Flask维护动态Cookies池</li>
                  </ul>
                  <h3 id="四、框架篇"><a href="#四、框架篇" class="headerlink" title="四、框架篇"></a><strong>四、框架篇</strong></h3>
                  <ul>
                    <li>PySpider框架基本使用及抓取TripAdvisor实战</li>
                    <li>PySpider架构概述及用法详解</li>
                    <li>Scrapy框架的安装</li>
                    <li>Scrapy框架基本使用</li>
                    <li>Scrapy命令行详解</li>
                    <li>Scrapy中选择器的用法</li>
                    <li>Scrapy中Spiders的用法</li>
                    <li>Scrapy中Item Pipeline的用法</li>
                    <li>Scrapy中Download Middleware的用法</li>
                    <li>Scrapy爬取知乎用户信息实战</li>
                    <li>Scrapy+Cookies池抓取新浪微博</li>
                    <li>Scrapy+Tushare爬取微博股票数据</li>
                  </ul>
                  <h3 id="五、分布式篇"><a href="#五、分布式篇" class="headerlink" title="五、分布式篇"></a><strong>五、分布式篇</strong></h3>
                  <ul>
                    <li>Scrapy分布式原理及Scrapy-Redis源码解析</li>
                    <li>Scrapy分布式架构搭建抓取知乎</li>
                    <li>Scrapy分布式的部署详解</li>
                  </ul>
                  <p>整个课程是从小白起点的，从环境配置和基础开始讲起，环境安装部分三大平台都有介绍，实战的部分我是一边写一边讲解，还有一些分布式爬虫的搭建流程也做了介绍。 不过这个课程是收费的，其实里面也包含了我学习爬虫以来的经验和汗水，我在做讲解的时候也会把我学习爬虫的一些思路和想法讲解出来，避免大家走一些弯路，希望大家可以支持一下！ 不过在这里有免费的视频，是属于整个课程的一部分，大家可以直接观看 <strong><a href="https://edu.hellobi.com/course/156" target="_blank" rel="noopener">Python3爬虫三大案例实战分享</a></strong> 整套视频课程放在天善智能这边了，大家如果感兴趣的话可以直接在这里购买，499元。 课程链接如下： <strong>天善智能：<a href="https://edu.hellobi.com/course/157" target="_blank" rel="noopener">自己动手，丰衣足食！Python3网络爬虫实战案例</a></strong> <strong>网易云课堂：<a href="http://study.163.com/course/courseMain.htm?courseId=1003827039&amp;utm_campaign=commission&amp;utm_source=cp-1018878377&amp;utm_medium=share" target="_blank" rel="noopener">自己动手，丰衣足食！Python3网络爬虫实战案例</a></strong> <a href="https://edu.hellobi.com/course/157" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/04/WechatIMG257-1.jpeg" alt=""></a> 最后的最后希望大家可以多多支持！非常感谢！知识就是力量！也希望我的课程能为您创造更大的财富！</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-04-11 00:38:31" itemprop="dateCreated datePublished" datetime="2017-04-11T00:38:31+08:00">2017-04-11</time>
                </span>
                <span id="/4320.html" class="post-meta-item leancloud_visitors" data-flag-title="Python3爬虫视频学习教程" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>1.8k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>2 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4244.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4244.html" class="post-title-link" itemprop="url">Scrapy小技巧-MySQL存储</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/吃惊表情1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/吃惊表情1.jpg" alt="吃惊表情1"></a> 这两天上班接手，别人留下来的爬虫发现一个很好玩的 SQL脚本拼接。 只要你的Scrapy Field字段名字和 数据库字段的名字 一样。那么恭喜你你就可以拷贝这段SQL拼接脚本。进行MySQL入库处理。 具体拼接代码如下：</p>
                  <figure class="highlight livecodeserver">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">def process_item(self, <span class="keyword">item</span>, spider):</span><br><span class="line">    <span class="keyword">if</span> isinstance(<span class="keyword">item</span>, WhoscoredNewItem):</span><br><span class="line">        table_name = <span class="keyword">item</span>.pop(<span class="string">'table_name'</span>)</span><br><span class="line">        col_str = <span class="string">''</span></span><br><span class="line">        row_str = <span class="string">''</span></span><br><span class="line">        <span class="keyword">for</span> key <span class="keyword">in</span> <span class="keyword">item</span>.<span class="built_in">keys</span>():</span><br><span class="line">            col_str = col_str + <span class="string">" "</span> + key + <span class="string">","</span></span><br><span class="line">            row_str = <span class="string">"&#123;&#125;'&#123;&#125;',"</span>.<span class="built_in">format</span>(row_str, <span class="keyword">item</span>[key] <span class="keyword">if</span> <span class="string">"'"</span> <span class="keyword">not</span> <span class="keyword">in</span> <span class="keyword">item</span>[key] <span class="keyword">else</span> <span class="keyword">item</span>[key].<span class="built_in">replace</span>(<span class="string">"'"</span>, <span class="string">"\\'"</span>))</span><br><span class="line">            sql = <span class="string">"insert INTO &#123;&#125; (&#123;&#125;) VALUES (&#123;&#125;) ON DUPLICATE KEY UPDATE "</span>.<span class="built_in">format</span>(table_name, col_str[<span class="number">1</span>:<span class="number">-1</span>], row_str[:<span class="number">-1</span>])</span><br><span class="line">        <span class="keyword">for</span> (key, <span class="built_in">value</span>) <span class="keyword">in</span> <span class="literal">six</span>.iteritems(<span class="keyword">item</span>):</span><br><span class="line">            sql += <span class="string">"&#123;&#125; = '&#123;&#125;', "</span>.<span class="built_in">format</span>(key, <span class="built_in">value</span> <span class="keyword">if</span> <span class="string">"'"</span> <span class="keyword">not</span> <span class="keyword">in</span> <span class="built_in">value</span> <span class="keyword">else</span> <span class="built_in">value</span>.<span class="built_in">replace</span>(<span class="string">"'"</span>, <span class="string">"\\'"</span>))</span><br><span class="line">        sql = sql[:<span class="number">-2</span>]</span><br><span class="line">        self.cursor.execute(sql) <span class="comment">#执行SQL</span></span><br><span class="line">        self.cnx.commit()<span class="comment"># 写入操作</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这个SQL拼接实现了，如果数据库存在相同数据则 更新，不存在则插入 的SQL语句 具体实现就是第一个for循环，获取key作为MySQL字段名字、VALUES做为SQL的VALUES（拼接成一个插入的SQL语句） 第二个for循环，实现了 字段名 = VALUES的拼接。 和第一个for循环的中的sql就组成了 insert into XXXXX on duplicate key update 这个。存在则更新 不存在则插入的SQL语句。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" alt="QQ图片20161021225948"></a> 我只能所 6666666666 写这个拼接的小哥儿有想法。还挺通用。 不知道你们有没有想到这种方法 反正我是没想到。</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-03-19 17:05:09" itemprop="dateCreated datePublished" datetime="2017-03-19T17:05:09+08:00">2017-03-19</time>
                </span>
                <span id="/4244.html" class="post-meta-item leancloud_visitors" data-flag-title="Scrapy小技巧-MySQL存储" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>989</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>1 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4197.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> PHP <i class="label-arrow"></i>
                  </a>
                  <a href="/4197.html" class="post-title-link" itemprop="url">WordPress 远程附件上传插件 For 又拍云【升级版】</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>今天给大家介绍 WordPress Plugin for UPYUN 插件，专为<a href="https://www.upyun.com/index.html" target="_blank" rel="noopener">又拍云</a>和 WordPress 用户准备，主要功能如下：</p>
                  <ol>
                    <li>可以与 WordPress 无缝结合，通过 WordPress 上传图片和文件到又拍云, 支持大文件上传（需要开启表单 API) 和防盗链功能</li>
                    <li>支持同步删除（在 WordPress 后台媒体管理 “删除” 附件后，又拍云服务器中的文件也随之删除)</li>
                    <li>增加图片编辑功能</li>
                    <li>优化防盗链功能</li>
                    <li>增加与水印插件的兼容性，使上传到远程服务器的图片同样可以加上水印等</li>
                  </ol>
                  <p>PS：修复了很多之前版本存在的 bug，具体可访问：<a href="https://github.com/ihacklog/hacklog-remote-attachment-upyun" target="_blank" rel="noopener">github</a> 又拍云是以 CDN 为核心业务，另外提供云存储、云处理、云安全、流量营销等的云服务商，有开放且可扩展的API，以及开放的SDK和第三方插件，还针对开发者启动了 <a href="https://www.upyun.com/league.html" target="_blank" rel="noopener">又拍云联盟</a> 活动，可以每月获取免费空间和流量。更多介绍，请访问<a href="https://www.upyun.com/index.html" target="_blank" rel="noopener">又拍云</a>。 <strong>安装插件：</strong> 进入到你的 WordPress 的 wp-content/plugins 目录下</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">` # <span class="keyword">pwd</span>/home/wwwroot/blog.v5linux.<span class="keyword">com</span>/<span class="keyword">wp</span>-content/plugins`</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>克隆插件</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">` # git clone https:<span class="comment">//github.com/ihacklog/hacklog-remote-attachment-upyun.</span></span><br><span class="line">gitInitialized empty Git repository <span class="keyword">in</span> /home/wwwroot/blog.v5linux.com/wp-</span><br><span class="line">content/plugins/hacklog-remote-attachment-upyun/.git/remote: Counting </span><br><span class="line">objects: <span class="number">387</span>, done.remote: Compressing objects: <span class="number">100</span>% (<span class="number">31</span>/<span class="number">31</span>), done.</span><br><span class="line">remote: Total <span class="number">387</span> (delta <span class="number">16</span>), reused <span class="number">0</span> (delta <span class="number">0</span>), pack-reused <span class="number">356</span>Receiving </span><br><span class="line">objects: <span class="number">100</span>% (<span class="number">387</span>/<span class="number">387</span>), <span class="number">399.17</span> KiB | <span class="number">106</span> KiB/s, done.Resolving deltas:</span><br><span class="line"> <span class="number">100</span>% (<span class="number">223</span>/<span class="number">223</span>), done.`</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>设置权限</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">` # ll总用量 <span class="number">16</span>drwxr-xr-x <span class="number">4</span> www  www  <span class="number">4096</span> <span class="number">1</span>月  <span class="number">12</span> <span class="number">13</span>:<span class="number">20</span> akismetdrwxr-xr-x </span><br><span class="line"><span class="number">8</span> root root <span class="number">4096</span> <span class="number">1</span>月  <span class="number">16</span> <span class="number">11</span>:<span class="number">34</span> hacklog-remote-attachment-upyun-rw-r--r-- <span class="number">1</span> </span><br><span class="line">www  www  <span class="number">2255</span> <span class="number">5</span>月  <span class="number">23</span> <span class="number">2013</span> hello.php-rw-r--r-- <span class="number">1</span> www  www    <span class="number">28</span> <span class="number">6</span>月   </span><br><span class="line"><span class="number">5</span> <span class="number">2014</span> index.php# chown -R www:www hacklog-remote-attachment-upyun/`</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>注意，如果你是虚拟主机，请下载后打包成 zip 文件上传到 plugins 目录下插件配置 <strong>插件设置</strong></p>
                  <p><a href="https://img.wpdaxue.com/2017/03/screenshot-1.png" target="_blank" rel="noopener"><img src="https://static.oschina.net/uploads/img/201703/06153426_bm5y.png" alt=""></a> 主要配置 空间名：后台创建的存储类型服务的名称 操作员和操作员密码：后台获取 表单密钥：<a href="https://console.upyun.com/login/" target="_blank" rel="noopener">又拍云控制台</a> 找到对应的服务 — 高级选项 - 开启表单密钥远程基本 URL：填写你的绑定域名或默认域名（强烈建议使用绑定域名） REST 远程路径和 HTTP 路径：根据需求填写 插件启用和配置详情，请参考：<a href="http://support.upyun.com/hc/kb/article/1025121/" target="_blank" rel="noopener">WordPress 远程附件上传插件</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/yvette_" class="author" itemprop="url" rel="index">yvette_</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-03-12 08:46:01" itemprop="dateCreated datePublished" datetime="2017-03-12T08:46:01+08:00">2017-03-12</time>
                </span>
                <span id="/4197.html" class="post-meta-item leancloud_visitors" data-flag-title="WordPress 远程附件上传插件 For 又拍云【升级版】" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>1.3k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>1 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4048.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/4048.html" class="post-title-link" itemprop="url">小白进阶之Scrapy第三篇（基于Scrapy-Redis的分布式以及cookies池）</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>啥话都不说了、进入正题。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ图片20170205084843.jpg" alt="QQ图片20170205084843"></a> 首先我们更新一下scrapy版本。最新版为1.3 再说一遍Windows的小伙伴儿 pip是装不上Scrapy的。推荐使用anaconda 、不然还是老老实实用Linux吧</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">conda install <span class="attribute">scrapy</span>==1.3</span><br><span class="line">或者</span><br><span class="line">pip install <span class="attribute">scrapy</span>==1.3</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>安装Scrapy-Redis</p>
                  <figure class="highlight mipsasm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">conda <span class="keyword">install </span><span class="keyword">scrapy-redis</span></span><br><span class="line"><span class="keyword">或者</span></span><br><span class="line"><span class="keyword">pip </span><span class="keyword">install </span><span class="keyword">scrapy-redis</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>需要注意： Python 版本为 2.7，3.4 或者3.5 。个人使用3.6版本也没有问题 Redis&gt;=2.8 Scrapy&gt;=1.0 Redis-py&gt;=2.1 。 3.X版本的Python 都是自带Redis-py 其余小伙伴如果没有的话、自己 pip 安装一下。 开始搞事！ 开始之前我们得知道scrapy-redis的一些配置：PS 这些配置是写在Scrapy项目的settings.py中的！</p>
                  <figure class="highlight vala">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="meta">#启用Redis调度存储请求队列</span></span><br><span class="line">SCHEDULER = <span class="string">"scrapy_redis.scheduler.Scheduler"</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#确保所有的爬虫通过Redis去重</span></span><br><span class="line">DUPEFILTER_CLASS = <span class="string">"scrapy_redis.dupefilter.RFPDupeFilter"</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#默认请求序列化使用的是pickle 但是我们可以更改为其他类似的。PS：这玩意儿2.X的可以用。3.X的不能用</span></span><br><span class="line"><span class="meta">#SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#不清除Redis队列、这样可以暂停/恢复 爬取</span></span><br><span class="line"><span class="meta">#SCHEDULER_PERSIST = True</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#使用优先级调度请求队列 （默认使用）</span></span><br><span class="line"><span class="meta">#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'</span></span><br><span class="line"><span class="meta">#可选用的其它队列</span></span><br><span class="line"><span class="meta">#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.FifoQueue'</span></span><br><span class="line"><span class="meta">#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#最大空闲时间防止分布式爬虫因为等待而关闭</span></span><br><span class="line"><span class="meta">#这只有当上面设置的队列类是SpiderQueue或SpiderStack时才有效</span></span><br><span class="line"><span class="meta">#并且当您的蜘蛛首次启动时，也可能会阻止同一时间启动（由于队列为空）</span></span><br><span class="line"><span class="meta">#SCHEDULER_IDLE_BEFORE_CLOSE = 10</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#将清除的项目在redis进行处理</span></span><br><span class="line">ITEM_PIPELINES = &#123;</span><br><span class="line">    <span class="string">'scrapy_redis.pipelines.RedisPipeline'</span>: <span class="number">300</span></span><br><span class="line">&#125;</span><br><span class="line"></span><br><span class="line"><span class="meta">#序列化项目管道作为redis Key存储</span></span><br><span class="line"><span class="meta">#REDIS_ITEMS_KEY = '%(spider)s:items'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#默认使用ScrapyJSONEncoder进行项目序列化</span></span><br><span class="line"><span class="meta">#You can use any importable path to a callable object.</span></span><br><span class="line"><span class="meta">#REDIS_ITEMS_SERIALIZER = 'json.dumps'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#指定连接到redis时使用的端口和地址（可选）</span></span><br><span class="line"><span class="meta">#REDIS_HOST = 'localhost'</span></span><br><span class="line"><span class="meta">#REDIS_PORT = 6379</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#指定用于连接redis的URL（可选）</span></span><br><span class="line"><span class="meta">#如果设置此项，则此项优先级高于设置的REDIS_HOST 和 REDIS_PORT</span></span><br><span class="line"><span class="meta">#REDIS_URL = 'redis://user:pass@hostname:9001'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#自定义的redis参数（连接超时之类的）</span></span><br><span class="line"><span class="meta">#REDIS_PARAMS  = &#123;&#125;</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#自定义redis客户端类</span></span><br><span class="line"><span class="meta">#REDIS_PARAMS['redis_cls'] = 'myproject.RedisClient'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#如果为True，则使用redis的'spop'进行操作。</span></span><br><span class="line"><span class="meta">#如果需要避免起始网址列表出现重复，这个选项非常有用。开启此选项urls必须通过sadd添加，否则会出现类型错误。</span></span><br><span class="line"><span class="meta">#REDIS_START_URLS_AS_SET = False</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#RedisSpider和RedisCrawlSpider默认 start_usls 键</span></span><br><span class="line"><span class="meta">#REDIS_START_URLS_KEY = '%(name)s:start_urls'</span></span><br><span class="line"></span><br><span class="line"><span class="meta">#设置redis使用utf-8之外的编码</span></span><br><span class="line"><span class="meta">#REDIS_ENCODING = 'latin1'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>请各位小伙伴儿自行挑选需要的配置写到项目的settings.py文件中 英语渣靠Google、看不下去的小伙伴儿看这儿：<a href="http://scrapy-redis.readthedocs.io/en/stable/readme.html" target="_blank" rel="noopener">http://scrapy-redis.readthedocs.io/en/stable/readme.html</a> 继续在我们上一篇博文中的爬虫程序修改： 首先把我们需要的redis配置文件写入settings.py中： 如果你的redis数据库按照前一片博文配置过则需要以下至少三项</p>
                  <figure class="highlight ini">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">SCHEDULER</span> = <span class="string">"scrapy_redis.scheduler.Scheduler"</span></span><br><span class="line"></span><br><span class="line"><span class="attr">DUPEFILTER_CLASS</span> = <span class="string">"scrapy_redis.dupefilter.RFPDupeFilter"</span></span><br><span class="line"></span><br><span class="line"><span class="attr">REDIS_URL</span> = <span class="string">'redis://root:密码@主机ＩＰ:端口'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>第三项请按照你的实际情况配置。 Nice配置文件写到这儿。我们来做一些基本的反爬虫设置 最基本的一个切换UserAgent！ 首先在项目文件中新建一个useragent.py用来写一堆 User-Agent（可以去网上找更多，也可以用下面这些现成的）</p>
                  <figure class="highlight smalltalk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">agents = [</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/2.02E (Win95; U)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/3.01Gold (Win95; I)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/4.8 [en] (Windows NT 5.1; U)"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)"</span>,</span><br><span class="line">    <span class="comment">"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10  (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10  (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522  (KHTML, like Gecko) Safari/419.3"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10  (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10  (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">    <span class="comment">"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5  (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1"</span>,</span><br><span class="line">]</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>现在我们来重写一下Scrapy的下载中间件（哇靠！！重写中间件 好高端啊！！会不会好难!!!放心！！！So Easy！！跟我做！包教包会，毕竟不会你也不能顺着网线来打我啊）： 关于重写中间件的详细情况 请参考 官方文档：<a href="http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/downloader-middleware.html#scrapy.contrib.downloadermiddleware.DownloaderMiddleware" target="_blank" rel="noopener">http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/downloader-middleware.html#scrapy.contrib.downloadermiddleware.DownloaderMiddleware</a> 在项目中新建一个middlewares.py的文件（如果你使用的新版本的Scrapy，在新建的时候会有这么一个文件，直接用就好了） 首先导入UserAgentMiddleware毕竟我们要重写它啊！</p>
                  <figure class="highlight clean">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">import</span> json ##处理json的包</span><br><span class="line"><span class="keyword">import</span> redis #Python操作redis的包</span><br><span class="line"><span class="keyword">import</span> random #随机选择</span><br><span class="line"><span class="keyword">from</span> .useragent <span class="keyword">import</span> agents #导入前面的</span><br><span class="line"><span class="keyword">from</span> scrapy.downloadermiddlewares.useragent <span class="keyword">import</span> UserAgentMiddleware #UserAegent中间件</span><br><span class="line"><span class="keyword">from</span> scrapy.downloadermiddlewares.retry <span class="keyword">import</span> RetryMiddleware #重试中间件</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> 开写：</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">UserAgentmiddleware</span>(<span class="title">UserAgentMiddleware</span>):</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_request</span><span class="params">(<span class="keyword">self</span>, request, spider)</span></span><span class="symbol">:</span></span><br><span class="line">        agent = random.choice(agents)</span><br><span class="line">        request.headers[<span class="string">"User-Agent"</span>] = agent</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>第一行：定义了一个类UserAgentmiddleware继承自UserAgentMiddleware 第二行：定义了函数<code>process_request</code>(<em>request</em>, <em>spider</em>)为什么定义这个函数，因为Scrapy每一个request通过中间 件都会调用这个方法。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170206-223156.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170206-223156.png" alt="QQ20170206-223156"></a> 第三行：随机选择一个User-Agent 第四行：设置request的User-Agent为我们随机的User-Agent ^_^Y(^o^)Y一个中间件写完了！哈哈 是不是So easy！ 下面就需要登陆了。这次我们不用上一篇博文的FromRequest来实现登陆了。我们来使用Cookie登陆。这样的话我们需要重写Cookie中间件！分布式爬虫啊！你不能手动的给每个Spider写一个Cookie吧。而且你还不会知道这个Cookie到底有没有失效。所以我们需要维护一个Cookie池(这个cookie池用redis)。 好！来理一理思路，维护一个Cookie池最基本需要具备些什么功能呢？</p>
                  <ol>
                    <li>获取Cookie</li>
                    <li>更新Cookie</li>
                    <li>删除Cookie</li>
                    <li>判断Cookie是否可用进行相对应的操作（比如重试）</li>
                  </ol>
                  <p>好，我们先做前三个对Cookie进行操作。 首先我们在项目中新建一个cookies.py的文件用来写我们需要对Cookie进行的操作。 haoduofuli/haoduofuli/cookies.py: 首先日常导入我们需要的文件：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import requests</span><br><span class="line">import json</span><br><span class="line">import redis</span><br><span class="line">import logging</span><br><span class="line"><span class="keyword">from</span> .settings import REDIS_URL ##获取settings.py中的REDIS_URL</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>首先我们把登陆用的账号密码 以Key:value的形式存入redis数据库。不推荐使用db0（这是Scrapy-redis默认使用的，账号密码单独使用一个db进行存储。） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170207-221128@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170207-221128@2x.png" alt="QQ20170207-221128@2x"></a> 就像这个样子。 解决第一个问题：获取Cookie：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import requests</span><br><span class="line">import json</span><br><span class="line">import redis</span><br><span class="line">import logging</span><br><span class="line"><span class="keyword">from</span> .settings import REDIS_URL</span><br><span class="line"></span><br><span class="line">logger = logging.getLogger(__name__)</span><br><span class="line"><span class="comment">##使用REDIS_URL链接Redis数据库, deconde_responses=True这个参数必须要，数据会变成byte形式 完全没法用</span></span><br><span class="line">reds = redis.Redis.from_url(REDIS_URL, <span class="attribute">db</span>=2, <span class="attribute">decode_responses</span>=<span class="literal">True</span>)</span><br><span class="line">login_url = <span class="string">'http://haoduofuli.pw/wp-login.php'</span></span><br><span class="line"></span><br><span class="line"><span class="comment">##获取Cookie</span></span><br><span class="line">def get_cookie(account, password):</span><br><span class="line">    s = requests.Session()</span><br><span class="line">    payload = &#123;</span><br><span class="line">        <span class="string">'log'</span>: account,</span><br><span class="line">        <span class="string">'pwd'</span>: password,</span><br><span class="line">        <span class="string">'rememberme'</span>: <span class="string">"forever"</span>,</span><br><span class="line">        <span class="string">'wp-submit'</span>: <span class="string">"登录"</span>,</span><br><span class="line">        <span class="string">'redirect_to'</span>: <span class="string">"http://http://www.haoduofuli.pw/wp-admin/"</span>,</span><br><span class="line">        <span class="string">'testcookie'</span>: <span class="string">"1"</span></span><br><span class="line">    &#125;</span><br><span class="line">    response = s.post(login_url, <span class="attribute">data</span>=payload)</span><br><span class="line">    cookies = response.cookies.get_dict()</span><br><span class="line">    logger.<span class="builtin-name">warning</span>(<span class="string">"获取Cookie成功！（账号为:%s）"</span> % account)</span><br><span class="line">    return json.dumps(cookies)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这段很好懂吧。 使用requests模块提交表单登陆获得Cookie，返回一个通过Json序列化后的Cookie（如果不序列化，存入Redis后会变成Plain Text格式的，后面取出来Cookie就没法用啦。） 第二个问题：将Cookie写入Redis数据库（分布式呀，当然得要其它其它Spider也能使用这个Cookie了）</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">def init_cookie(red, spidername):</span><br><span class="line">    redkeys = reds.keys()</span><br><span class="line">    <span class="keyword">for</span><span class="built_in"> user </span><span class="keyword">in</span> redkeys:</span><br><span class="line">        password = reds.<span class="builtin-name">get</span>(user)</span><br><span class="line">        <span class="keyword">if</span> red.<span class="builtin-name">get</span>(<span class="string">"%s:Cookies:%s--%s"</span> % (spidername, user, password)) is None:</span><br><span class="line">            cookie = get_cookie(user, password)</span><br><span class="line">            red.<span class="builtin-name">set</span>(<span class="string">"%s:Cookies:%s--%s"</span>% (spidername, user, password), cookie)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>使用我们上面建立的redis链接获取redis db2中的所有Key(我们设置为账号的哦！)，再从redis中获取所有的Value(我设成了密码哦！) 判断这个spider和账号的Cookie是否存在，不存在 则调用get_cookie函数传入从redis中获取到的账号密码的cookie； 保存进redis，Key为spider名字和账号密码，value为cookie。 这儿操作redis的不是上面建立的那个reds链接哦！而是red;后面会传进来的(因为要操作两个不同的db,我在文档中没有看到切换db的方法，只好这么用了，知道的小伙伴儿留言一下)。 spidername获取方式后面也会说的。 还有剩余的更新Cookie 删除无法使用的账号等，大家伙可以自己试着写写（写不出来也没关系 不影响正常使用） 好啦！搞定！简直So Easy!!!! 现在开始大业了！重写cookie中间件；估摸着吧！聪明的小伙儿看了上面重写User-Agent的方法，十之八九也知道怎么重写Cookie中间件了。 好啦，现在继续写middlewares.py啦！</p>
                  <figure class="highlight reasonml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">class</span> <span class="constructor">CookieMiddleware(RetryMiddleware)</span>:</span><br><span class="line"></span><br><span class="line">    def <span class="constructor">__init__(<span class="params">self</span>, <span class="params">settings</span>, <span class="params">crawler</span>)</span>:</span><br><span class="line">        <span class="module-access"><span class="module"><span class="identifier">RetryMiddleware</span>.</span><span class="module"><span class="identifier">__init__</span>(</span></span>self, settings)</span><br><span class="line">        self.rconn = redis.from<span class="constructor">_url(<span class="params">settings</span>['REDIS_URL'], <span class="params">db</span>=1, <span class="params">decode_responses</span>=True)</span>##decode_responses设置取出的编码为str</span><br><span class="line">        init<span class="constructor">_cookie(<span class="params">self</span>.<span class="params">rconn</span>, <span class="params">crawler</span>.<span class="params">spider</span>.<span class="params">name</span>)</span></span><br><span class="line"></span><br><span class="line">    @classmethod</span><br><span class="line">    def from<span class="constructor">_crawler(<span class="params">cls</span>, <span class="params">crawler</span>)</span>:</span><br><span class="line">        return cls(crawler.settings, crawler)</span><br><span class="line"></span><br><span class="line">    def process<span class="constructor">_request(<span class="params">self</span>, <span class="params">request</span>, <span class="params">spider</span>)</span>:</span><br><span class="line">        redisKeys = self.rconn.keys<span class="literal">()</span></span><br><span class="line">        <span class="keyword">while</span> len(redisKeys) &gt; <span class="number">0</span>:</span><br><span class="line">            elem = random.choice(redisKeys)</span><br><span class="line">            <span class="keyword">if</span> spider.name + ':Cookies' <span class="keyword">in</span> elem:</span><br><span class="line">                cookie = json.loads(self.rconn.get(elem))</span><br><span class="line">                request.cookies = cookie</span><br><span class="line">                request.meta<span class="literal">["<span class="identifier">accountText</span>"]</span> = elem.split(<span class="string">"Cookies:"</span>)<span class="literal">[-<span class="number">1</span>]</span></span><br><span class="line">                break</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>第一行：不说 第二行第三行得说一下 这玩意儿叫重载（我想了大半天都没想起来叫啥，还是问了大才。尴尬）有啥用呢： 也不扯啥子高深问题了，小伙伴儿可能发现，当你继承父类之后；子类是不能用 def <strong>init</strong>()方法的，不过重载父类之后就能用啦！ 第四行：settings[‘REDIS_URL’]是个什么鬼？这是访问scrapy的settings。怎么访问的？下面说 第五行：往redis中添加cookie。第二个参数就是spidername的获取方法（其实就是字典啦！）</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">@classmethod</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">from_crawler</span><span class="params">(cls, crawler)</span></span><span class="symbol">:</span></span><br><span class="line">    <span class="keyword">return</span> cls(crawler.settings, crawler)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这个貌似不好理解，作用看下面： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/D9DF3655-F28A-482C-8B02-C53B152958A0.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/D9DF3655-F28A-482C-8B02-C53B152958A0.jpg" alt="D9DF3655-F28A-482C-8B02-C53B152958A0"></a> 这样是不是一下就知道了？? 至于访问settings的方法官方文档给出了详细的方法： <a href="http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/settings.html#how-to-access-settings" target="_blank" rel="noopener">http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/settings.html#how-to-access-settings</a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170207-233701@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170207-233701@2x.png" alt="QQ20170207-233701@2x"></a> 下面就是完整的middlewares.py文件：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># Define here the models for your spider middleware</span></span><br><span class="line"><span class="comment">#</span></span><br><span class="line"><span class="comment"># See documentation in:</span></span><br><span class="line"><span class="comment"># http://doc.scrapy.org/en/latest/topics/spider-middleware.html</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> signals</span><br><span class="line"><span class="keyword">import</span> json</span><br><span class="line"><span class="keyword">import</span> redis</span><br><span class="line"><span class="keyword">import</span> random</span><br><span class="line"><span class="keyword">from</span> .useragent <span class="keyword">import</span> agents</span><br><span class="line"><span class="keyword">from</span> .cookies <span class="keyword">import</span> init_cookie, remove_cookie, update_cookie</span><br><span class="line"><span class="keyword">from</span> scrapy.downloadermiddlewares.useragent <span class="keyword">import</span> UserAgentMiddleware</span><br><span class="line"><span class="keyword">from</span> scrapy.downloadermiddlewares.retry <span class="keyword">import</span> RetryMiddleware</span><br><span class="line"><span class="keyword">import</span> logging</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">logger = logging.getLogger(__name__)</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">UserAgentmiddleware</span><span class="params">(UserAgentMiddleware)</span>:</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_request</span><span class="params">(self, request, spider)</span>:</span></span><br><span class="line">        agent = random.choice(agents)</span><br><span class="line">        request.headers[<span class="string">"User-Agent"</span>] = agent</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">CookieMiddleware</span><span class="params">(RetryMiddleware)</span>:</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, settings, crawler)</span>:</span></span><br><span class="line">        RetryMiddleware.__init__(self, settings)</span><br><span class="line">        self.rconn = redis.from_url(settings[<span class="string">'REDIS_URL'</span>], db=<span class="number">1</span>, decode_responses=<span class="literal">True</span>)<span class="comment">##decode_responses设置取出的编码为str</span></span><br><span class="line">        init_cookie(self.rconn, crawler.spider.name)</span><br><span class="line"></span><br><span class="line"><span class="meta">    @classmethod</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">from_crawler</span><span class="params">(cls, crawler)</span>:</span></span><br><span class="line">        <span class="keyword">return</span> cls(crawler.settings, crawler)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">process_request</span><span class="params">(self, request, spider)</span>:</span></span><br><span class="line">        redisKeys = self.rconn.keys()</span><br><span class="line">        <span class="keyword">while</span> len(redisKeys) &gt; <span class="number">0</span>:</span><br><span class="line">            elem = random.choice(redisKeys)</span><br><span class="line">            <span class="keyword">if</span> spider.name + <span class="string">':Cookies'</span> <span class="keyword">in</span> elem:</span><br><span class="line">                cookie = json.loads(self.rconn.get(elem))</span><br><span class="line">                request.cookies = cookie</span><br><span class="line">                request.meta[<span class="string">"accountText"</span>] = elem.split(<span class="string">"Cookies:"</span>)[<span class="number">-1</span>]</span><br><span class="line">                <span class="keyword">break</span></span><br><span class="line">            <span class="comment">#else:</span></span><br><span class="line">                <span class="comment">#redisKeys.remove(elem)</span></span><br><span class="line"></span><br><span class="line">    <span class="comment">#def process_response(self, request, response, spider):</span></span><br><span class="line"></span><br><span class="line">         <span class="comment">#"""</span></span><br><span class="line">         <span class="comment">#下面的我删了，各位小伙伴可以尝试以下完成后面的工作</span></span><br><span class="line"></span><br><span class="line">         <span class="comment">#你需要在这个位置判断cookie是否失效</span></span><br><span class="line"></span><br><span class="line">         <span class="comment">#然后进行相应的操作，比如更新cookie  删除不能用的账号</span></span><br><span class="line"></span><br><span class="line">         <span class="comment">#写不出也没关系，不影响程序正常使用，</span></span><br><span class="line"></span><br><span class="line">         <span class="comment">#"""</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>存储我也不写啦！就是这么简单一个分布式的scrapy就这么完成啦！！！ 我试了下 三台机器 两个小时 就把整个站点全部爬完了。 弄好你的存储 放在不同的机器上就可以跑啦！ 完整的代码在GitHub上： GitHub：<a href="https://github.com/thsheep/haoduofuli" target="_blank" rel="noopener">https://github.com/thsheep/haoduofuli</a> Y(^o^)Y完工 下篇博文来对付爬虫的大敌：Ajax 以后的教程用微博做靶子，那些数据比较有用，可以玩玩分析什么的。</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-02-07 23:53:44" itemprop="dateCreated datePublished" datetime="2017-02-07T23:53:44+08:00">2017-02-07</time>
                </span>
                <span id="/4048.html" class="post-meta-item leancloud_visitors" data-flag-title="小白进阶之Scrapy第三篇（基于Scrapy-Redis的分布式以及cookies池）" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>15k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>14 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/4020.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> 技术杂谈 <i class="label-arrow"></i>
                  </a>
                  <a href="/4020.html" class="post-title-link" itemprop="url">Scrapy分布式的前篇--让redis和MongoDB安全点</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>各位小伙伴 大家好啊！年假结束了··· 也该开始继续我的装逼之旅了。 年前博文的结尾说了 还有一个基于Scrapy的分布式版本、 今天这博文就先给大家做些前期工作，其实吧、最主要的是防止你的服务器因为这篇博文被轮········· 博文开始之前 我们先来看篇文章： <a href="http://www.youxia.org/daily-news-attack-extortion-does-not-delay-a-week-had-27000-mongodb-database.html" target="_blank" rel="noopener">http://www.youxia.org/daily-news-attack-extortion-does-not-delay-a-week-had-27000-mongodb-database.html</a> 关于年前MongoDB由于<strong>默认可匿名访问</strong> 而导致了一大堆的管理员掉坑里 预估中国有十万数据库被坑。 这是继Redis之后又一个小白式的错误······（Redis也是默认匿名访问） 所以在下一篇博文开始之前，先给一些新手小伙伴做一些准备工作。 因为篇幅较少 先写写Redis的一些安全设置： 安装Redis: 请参考这儿;<a href="https://redis.io/download" target="_blank" rel="noopener">https://redis.io/download</a></p>
                  <figure class="highlight gams">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="symbol">$</span> wget http:<span class="comment">//download.redis.io/releases/redis-3.2.7.tar.gz</span></span><br><span class="line"><span class="symbol">$</span> tar xzf redis<span class="number">-3.2</span><span class="number">.7</span>.tar.gz</span><br><span class="line"><span class="symbol">$</span> cd redis<span class="number">-3.2</span><span class="number">.7</span></span><br><span class="line"><span class="symbol">$</span> make</span><br><span class="line"></span><br><span class="line"><span class="symbol">$</span> src/redis-server</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>ps :如果以上有报错，可能是你的服务器没有安装依赖： CentOS7：</p>
                  <figure class="highlight brainfuck">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment">yum</span> <span class="comment">install</span> <span class="literal">-</span><span class="comment">y</span> <span class="comment">gcc</span><span class="literal">-</span><span class="comment">c</span>++ <span class="comment">tcl</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <pre><code>只写关于Linux的、Windows的很简单，配置文件通用： 安装完成后 在目录 redis-3.2.7中有一个redis.conf的配置文件，按照默认习惯我们将其复制到/etc目录下：
</code></pre>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">[<span class="symbol">root@</span>MyCloudServer ~]# cp redis<span class="number">-3.2</span><span class="number">.7</span>/redis.conf /etc</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>PS：请使用复制（cp）而不要使用移动（mv）；毕竟你要弄错了还可以再拷贝一份儿过去用不是？ 使用vim编辑刚刚拷贝的redis.conf</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">vim</span> /etc/redis.<span class="keyword">conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>PS:使用vim需要先安装： CentOS7：</p>
                  <figure class="highlight cmake">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">yum  <span class="keyword">install</span> vim</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>我们需要注意以下几项： 1、注释掉47行的bind 127.0.0.1（这个意思是限制为只能 127.0.0.1 也就是本机登录）PS：个人更建议 将你需要连接Redis数据库的IP地址填写在此处，而不是注释掉。这样做会比直接注释掉更加安全。 2、更改第84行port 6379 为你需要的端口号（这是Redis的默认监听端口）PS：个人建议务必更改 3、更改第128行 daemonize no 为 daemonize yes（这是让Redis后台运行） PS:个人建议更改 4、取消第 480 # requirepass foobared 的#注释符（这是redis的访问密码） 并更改foobared为你需要的密码 比如 我需们需要密码为123456 则改为 requirepass 123456。PS：密码不可过长否则Python的redis客户端无法连接 以上配置文件更改完毕，需要在防火墙放行：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">firewall-cmd <span class="attribute">--zone</span>=public <span class="attribute">--add-port</span>=xxxx/tcp --permanent</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>请将xxxx更改为你自己的redis端口。 重启防火墙生效：</p>
                  <figure class="highlight css">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="selector-tag">systemctl</span> <span class="selector-tag">restart</span> <span class="selector-tag">firewalld</span><span class="selector-class">.service</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>指定配置文件启动redis:</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">[<span class="symbol">root@</span>MyCloudServer ~]# redis<span class="number">-3.2</span><span class="number">.7</span>/src/redis-server /etc/redis.conf</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>加入到开机启动:</p>
                  <figure class="highlight jboss-cli">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">echo</span> <span class="string">"/root/redis-3.2.6/src/redis-server /etc/redis.conf"</span> &gt;&gt; <span class="string">/etc/rc.local</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>一个较为安全的redis配置完毕。 redis的桌面客户端我推荐：RedisDesktopManager 去下面这个地址下载就不需要捐助啦！ <a href="https://github.com/uglide/RedisDesktopManager/releases" target="_blank" rel="noopener">https://github.com/uglide/RedisDesktopManager/releases</a> 当然还有一些其他配置、我们用不到也就不写啦！ <strong>MongoDB：</strong> 这次MongoDB挺惨啊！由于默认匿名访问、下面给MongoDB配置一点安全措施： 安装MongoDB： 以CentOS7为例其余发行版请参考官方文档：<a href="https://docs.mongodb.com/manual/administration/install-on-linux/" target="_blank" rel="noopener">https://docs.mongodb.com/manual/administration/install-on-linux/</a> 1、建一个yum源：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">[<span class="symbol">root@</span>MyCloudServer ~]# vim /etc/yum.repos.d/mongodb-org<span class="number">-3.4</span>.repo</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>写入以下内容：</p>
                  <figure class="highlight ini">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="section">[mongodb-org-3.4]</span></span><br><span class="line"><span class="attr">name</span>=MongoDB Repository</span><br><span class="line"><span class="attr">baseurl</span>=https://repo.mongodb.org/yum/redhat/<span class="variable">$releasever</span>/mongodb-org/<span class="number">3.4</span>/x<span class="number">86_64</span>/</span><br><span class="line"><span class="attr">gpgcheck</span>=<span class="number">1</span></span><br><span class="line"><span class="attr">enabled</span>=<span class="number">1</span></span><br><span class="line"><span class="attr">gpgkey</span>=https://www.mongodb.org/static/pgp/server-<span class="number">3.4</span>.asc</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>2、安装mongoDB以及相关工具：</p>
                  <figure class="highlight cmake">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">sudo yum <span class="keyword">install</span> -y mongodb-org</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>3、启动MongoDB：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">sudo<span class="built_in"> service </span>mongod start</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>ＰＳ：如果你的服务器在使用SELinux的话，你需要配置SElinux允许MongoDB启动，当然更简单的方法是关掉SElinux。 关闭SElinux:</p>
                  <figure class="highlight autoit">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">[root<span class="symbol">@MyCloudServer</span> ~]<span class="meta"># vim /etc/selinux/config</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>将第7行设置为：SELINUX=disabled 4、停止MongoDB：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">sudo<span class="built_in"> service </span>mongod stop</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>上面安装完按成了MongoDB下面要步入正题了： 1、备份和更改配置文件：</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">[root@MyCloudServer ~]# <span class="keyword">cp</span> /etc/mongod.<span class="keyword">conf</span>  /etc/mongod_backup.<span class="keyword">conf</span></span><br><span class="line">[root@MyCloudServer ~]# <span class="keyword">vim</span> /etc/mongod.<span class="keyword">conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>更改第28行 prot 2701为你需要更改的端口（这是MongoDB默认的监听端口） 更改第29行 bindIp: 127.0.0.1为0.0.0.0（MongoDB默认只能本地访问）ＰＳ：个人建议此处添加你需要连接MongoDB服务器的IP地址、而不是改成0.0.0.0。这样做会更安全 启动MongoDB：</p>
                  <figure class="highlight jboss-cli">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">mongod <span class="params">--config</span> <span class="string">/etc/mongod.conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>意思是：指定/etc/mongod.conf为配置文件启动MongoDB 好了、配置文件更改完毕，现在可以外网访问我们的MongoDB了！不需要用户名！匿名的！现在我们进行下一步设置。 因为MongoDB默认是匿名访问的、我们需要开启用户认证。 我估摸着很多哥们儿和我一样没补全 啥都不会干、所以直接在服务器上改就不太现实了，需要借助于第三方客户端。我个人推荐：mongobooster 官方地址：<a href="https://mongobooster.com/" target="_blank" rel="noopener">https://mongobooster.com/</a> 收费版免费版功能一样 不用在意： 首先我们需要连上MongoDB服务器（别忘了防火墙放行你使用的端口啊！！！） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/170203.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/170203.gif" alt="170203"></a> 连上之后大慨是这个样子： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/17020301.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/17020301.png" alt="17020301"></a> 按下Ctrl+T 打开shell界面输入一下内容：</p>
                  <figure class="highlight gherkin">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">use admin</span><br><span class="line">db.createUser(</span><br><span class="line">   &#123;</span><br><span class="line">     user: <span class="string">"你的用户名"</span>,</span><br><span class="line">     pwd: <span class="string">"你的密码"</span>,</span><br><span class="line">     roles: [ &#123;role:<span class="string">"userAdminAnyDatabase"</span>, db:<span class="string">"admin"</span>&#125; ]</span><br><span class="line">    /<span class="symbol">*</span> All build-in Roles </span><br><span class="line">    Database User Roles: read|<span class="string">readWrite</span></span><br><span class="line"><span class="string">    数据库用户角色：读</span>|<span class="string">读写</span></span><br><span class="line"><span class="string">    Database Admion Roles: dbAdmin</span>|<span class="string">dbOwner</span>|userAdmin</span><br><span class="line">    数据库管理角色：数据库管理员|<span class="string">数据库所有者</span>|<span class="string">用户管理</span></span><br><span class="line"><span class="string">    Cluster Admin Roles: clusterAdmin</span>|<span class="string">clusterManager</span>|<span class="string">clusterMonitor</span>|hostManager</span><br><span class="line">    集群管理角色：</span><br><span class="line">    Backup and Restoration Roles: backup|<span class="string">restore</span></span><br><span class="line"><span class="string">    All-Database Roles: readAnyDatabase</span>|<span class="string">readWriteAnyDatabase</span>|<span class="string">userAdminAnyDatabase</span>|dbAdminAnyDatabase</span><br><span class="line">    所有数据库角色：读所有数据库|<span class="string">读写所有数据库</span>|<span class="string">所有数据库的用户管理员</span>|<span class="string">所有数据库的管理员</span></span><br><span class="line"><span class="string">    Superuser Roles: root */</span></span><br><span class="line"><span class="string">   &#125;</span></span><br><span class="line"><span class="string">)</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>再点击run运行即可 会在信息栏中提示True 现在断开数据库连接、再打开会发现多出一个admin的数据库。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ截图20170204001502.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ截图20170204001502.png" alt="QQ截图20170204001502"></a> 上面的都做了些什么呢？ 首先我们新建了一个admin的数据库（MongoDB的原则哦、有则切换没有就创建） 然后在admin数据中创建了一个用户 和 密码 赋予了这个用户管理admin数据库 所有数据库用户的权限。 至于有那些权限 在注释中都有写哦！常用的我估摸着写了个对应意思········· OK！搞定这一部分 就可以开启MongoDB的用户认证了！ 怎么开启呢？首先关闭正在运行的MongoDB：</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">ps</span> -<span class="keyword">e</span> | <span class="keyword">grep</span> mongod</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>上面的命令会找出MongoDB的进程号、然后运行kill 进程号即可！ 开启MongoDB：</p>
                  <figure class="highlight jboss-cli">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">mongod <span class="params">--auth</span> <span class="params">--config</span> <span class="string">/etc/mongod.conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>意思是：以认证模式 指定/etc/mongod.conf启动 MongoDB。 加入开机启动：</p>
                  <figure class="highlight jboss-cli">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">echo</span> <span class="string">"mongod --auth --config /etc/mongod.conf"</span> &gt;&gt; <span class="string">/etc/rc.local</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>好了！现在MongoDB也配置完成 啦！ 现在如果你需要新建一个用户让其使用数据库 你该怎么做呢？ 像下面这样；首先你需要连接到admin数据库！ 在选项Basic中照常配置： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004332@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004332@2x.png" alt="QQ20170204-004332@2x"></a> 需要额外设置的是Authentication选项： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004627@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004627@2x.png" alt="QQ20170204-004627@2x"></a> 连接成功后大概是这个样子： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004930@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/02/QQ20170204-004930@2x.png" alt="QQ20170204-004930@2x"></a> 需要注意的一点是：这个用户只能看到所有的数据库和用户、并不能看到数据！因为我们创建的时候只给了所有数据库用户管理的权限哦！ 然后打开shell界面按照创建admin的模板执行即可：</p>
                  <figure class="highlight gherkin">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">use 想要创建的数据库</span><br><span class="line">db.createUser(</span><br><span class="line">   &#123;</span><br><span class="line">     user: <span class="string">"想要使用的用户名"</span>,</span><br><span class="line">     pwd: <span class="string">"想要使用的密码"</span>,</span><br><span class="line">     roles: [ &#123;role:<span class="string">"赋予什么样的权限"</span>, db:<span class="string">"创建的数据库"</span>&#125; ]</span><br><span class="line">    /<span class="symbol">*</span> All build-in Roles </span><br><span class="line">    Database User Roles: read|<span class="string">readWrite</span></span><br><span class="line"><span class="string">    数据库用户角色：读</span>|<span class="string">读写</span></span><br><span class="line"><span class="string">    Database Admion Roles: dbAdmin</span>|<span class="string">dbOwner</span>|userAdmin</span><br><span class="line">    数据库管理角色：数据库管理员|<span class="string">数据库所有者</span>|<span class="string">用户管理</span></span><br><span class="line"><span class="string">    Cluster Admin Roles: clusterAdmin</span>|<span class="string">clusterManager</span>|<span class="string">clusterMonitor</span>|hostManager</span><br><span class="line">    集群管理角色：</span><br><span class="line">    Backup and Restoration Roles: backup|<span class="string">restore</span></span><br><span class="line"><span class="string">    All-Database Roles: readAnyDatabase</span>|<span class="string">readWriteAnyDatabase</span>|<span class="string">userAdminAnyDatabase</span>|dbAdminAnyDatabase</span><br><span class="line">    所有数据库角色：读所有数据库|<span class="string">读写所有数据库</span>|<span class="string">所有数据库的用户管理员</span>|<span class="string">所有数据库的管理员</span></span><br><span class="line"><span class="string">    Superuser Roles: root */</span></span><br><span class="line"><span class="string">   &#125;</span></span><br><span class="line"><span class="string">)</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>创建完成后、就可以用创建好的用户名和密码去链接有权限的数据库啦！！是不是So Easy！！！ 其实吧 还是 bindIp安全 哈哈哈！ 以上完毕！！ 下一篇就是基于Scrapy-Redis的分布式了、真的超级简单！简单得不要不要的</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-02-04 00:59:05" itemprop="dateCreated datePublished" datetime="2017-02-04T00:59:05+08:00">2017-02-04</time>
                </span>
                <span id="/4020.html" class="post-meta-item leancloud_visitors" data-flag-title="Scrapy分布式的前篇--让redis和MongoDB安全点" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>4.8k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>4 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3998.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> 个人日记 <i class="label-arrow"></i>
                  </a>
                  <a href="/3998.html" class="post-title-link" itemprop="url">回首我的二零一六</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>没有选择那个二零一六年尾，而是选择了这个二零一六年尾来总结。</p>
                  <p>毕竟元旦那时候真的被一堆考试烦透，说到考试，可以说我是极其反对这种形式，在我看来，因为有了考试，学一门课反倒成了任务，而不是真正踏实地去学，有了考试，学习的目的不再是单纯学习，而是为了最后的应考。所以很多科目，经验之谈，一旦它成了我的课程，我反倒没有那么多耐心去学它。而又有很多考试，理解性的东西真的不考察理解，你背过，就高分了，背不过，那就没分。做到原题了，就有分了，做不到原题，那就不一定有分。到头来，一门课程的结束伴随着你仅仅在短时间内记忆了一些概念和题目去应考。考试结束，抛掉了，你还记得什么？何况，某些课，你可能这辈子都用不到了。 然而就是这样，或许真的没有比这更合适的考察方式了吧。 果然一扯就停不下来，后面简单点扯。 嗯，就是这样，我来北航读研了，2016级的新生，刚刚渡过了研究生第一个学期。这个学期，基本上把研究生所有的课都上完。我能体会到自己还是偏重于实践性的东西而非理论，一个想法，纯理论都是空谈，实现出来才是最终目标。作为一名程序猿，平时我喜欢瞎捣腾些东西，逛GitHub，搜开源项目，找到有趣的组件来实现自己想要的功能。 二零一六年上半年，毕设的一段时间吧，由于自己对爬虫比较感兴趣，正好毕设也有个选题是关于爬虫的，所以干脆毕设就实现了一个分布式爬虫框架，虽然也是开源项目组合起来的，Scrapy，Redis，Mongo，Splash，Django等等吧，不过这个过程的探索也是受益匪浅。哦对了，也是上半年这个时候吧，换上了自己的第一台Mac，联想也终于寿终正寝了，我也算是真正踏上了程序员的行列。一年下来，不得不说，开发真的太便捷。 那时候正好是大四，也没多少事，期间也接着大大小小的外包，赚点外快，后来又入手了单反，然而到现在我发现自己没有那么狂爱摄影。 每年都有毕业季，今年轮到我们了。毕业行去了云南，还有些意犹未尽的感觉，也感谢一路同行的小伙伴给我拍的绝世美照哈哈。后来忙着毕业照啦，穿上学士服，辗转各大校区，各种奇怪的姿势拍拍拍。现在真的挺想念山大的，那里的人儿，那里的事儿。嗯，毕业快乐。 暑假，我又回到北京。一件重要的事那就是女朋友保研，虽然中间出了点小叉子，不过还是恭喜她能被中科院录取，随后在北京呆了近整个暑假。 随之而来的，便是北航研究生的新学期了。嗯，从山大到了北航。开学时我并没有那么欣喜，或许是已经过来太多次了习惯了。上学期课满满当当，然而你以为我会乖乖听课？我可不是那种学霸。我总是有着自己的学习和项目计划，学习一些我觉得有用的东西，比如Andrew Ng的机器学习、Web相关知识还有在做自己在忙的一些项目。前面说了我不喜欢上课，不喜欢考试，因为我觉得这些时间，可以去做更有意义的事情。最后几个星期突击一下就好了。其实我的大学就是这么过来的，上课都在学习别的和撸代码去了，成绩也还说得过去，不过感觉这样还是挺充实的。然而考前突击的时候是难了点儿，因为大部分我得预习。还好，这学期过去了，后面的时间我终于可以尽情做我想做的事情了，喜欢无拘无束自己探索的感觉。 期间其实还在和同学创业，演艺行业平台，自己负责技术这方面，好玩表演（hwby.com），一年来了吧，网站实现后投入运营，前期还是非常艰难，不过近期也还是有了起色，继续加油。写的过程中也抽离出了自己的一套CMS，以便后期开发应用的时候更加便捷，现在还不成熟，暂未公开。 说一件值得骄傲的事情吧，每天坚持记有道，把每天完成的事情，成功的事情，失败的事情每天做一下总结，这种感觉似乎是记录了自己路途的脚印，自己能感觉出自己走了多远，收获了多少，有一种自我激励的感觉。从14年开始记录到到今天了，希望自己能坚持下去。 哦又想到一个，之前博客上会有很多人加我，后来我想，干脆建一个交流群多好，于是乎在九月份左右，进击的Coder诞生了，三个多月的时间吧，几乎每天都有人加，刚才看了下已经788人啦，在群里跟大家探讨经验，交流技术，没事吐吐槽，扯扯淡，真的很愉快，爱你们。 然而现在还是觉得自己有时候懒癌发作之后就什么也不想干，执行力差，定了一些计划，今天拖明天，明天拖后天，最后就那么不了了之了。半年前定的学习鬼步舞呢，到现在跳的依然那么差。说好的练好腹肌呢，现在似乎没多大效果。 总结了这么多，似乎也没有多么值得骄傲的一件事，算是瞎忙了一整年吧哈哈。 新年计划： 1.写一本爬虫的书并出版，出套算不上教程的经验分享 2.完善好我的CMS，长期维护下去 3.学习数据挖掘和Web安全，向大牛进发 4.懒癌，不敢说改掉，但也能稍微缓解下吧 5.好玩表演，燥起来。 太多太多…. 觉得自己不会的还是太多，想学的也太多，好好提高自己的执行力和自制力吧，新的一年成为更好的自己。 凌晨三点了，安。</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-01-27 02:49:20" itemprop="dateCreated datePublished" datetime="2017-01-27T02:49:20+08:00">2017-01-27</time>
                </span>
                <span id="/3998.html" class="post-meta-item leancloud_visitors" data-flag-title="回首我的二零一六" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>2k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>2 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3992.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> PHP <i class="label-arrow"></i>
                  </a>
                  <a href="/3992.html" class="post-title-link" itemprop="url">Mac下升级PHP版本至7.1</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>博主在搞Web开发主要采用的是Laravel，然而发现其对PHP版本的要求是越来越高，PHP5.6已经越来受到限制，Laravel 5.5将正式弃用PHP5.6，所以博主决定直接升级到7.1版本。</p>
                  <h2 id="移除旧版本"><a href="#移除旧版本" class="headerlink" title="移除旧版本"></a>移除旧版本</h2>
                  <p>由于系统本身已经装了PHP5.6，所以需要先将其移除。 在这里列出目录以及移除需要的命令。</p>
                  <figure class="highlight groovy">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="regexp">/private/</span>etc/               sudo rm -rf php-fpm.conf.<span class="keyword">default</span> php.ini php.ini.<span class="keyword">default</span></span><br><span class="line"><span class="regexp">/usr/</span>bin/               sudo rm -rf php php-config phpdoc phpize</span><br><span class="line"><span class="regexp">/usr/</span>include                sudo rm -rf php</span><br><span class="line"><span class="regexp">/usr/</span>lib                sudo rm -rf php</span><br><span class="line"><span class="regexp">/usr/</span>sbin               sudo rm -rf php-fpm</span><br><span class="line"><span class="regexp">/usr/</span>share              sudo rm -rf php</span><br><span class="line"><span class="regexp">/usr/</span>share<span class="regexp">/man/</span>man1         sudo rm -rf php-config<span class="number">.1</span> php<span class="number">.1</span> phpize<span class="number">.1</span></span><br><span class="line"><span class="regexp">/usr/</span>share<span class="regexp">/man/</span>man8         sudo rm -rf php-fpm<span class="number">.8</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>顺次手动删除它们即可。</p>
                  <h2 id="搞清关系"><a href="#搞清关系" class="headerlink" title="搞清关系"></a>搞清关系</h2>
                  <p>在卸载过程中你会发现有PHP、FastCGI、php-fpm、spawn-fcgi等等的概念，所以在这里先梳理一下。</p>
                  <h3 id="CGI"><a href="#CGI" class="headerlink" title="CGI"></a>CGI</h3>
                  <p>CGI是为了保证web server传递过来的数据是标准格式的，方便CGI程序的编写者。 web server（比如说nginx）只是内容的分发者。比如，如果请求<code>/index.html</code>，那么web server会去文件系统中找到这个文件，发送给浏览器，这里分发的是静态数据。好了，如果现在请求的是<code>/index.php</code>，根据配置文件，nginx知道这个不是静态文件，需要去找PHP解析器来处理，那么他会把这个请求简单处理后交给PHP解析器。Nginx会传哪些数据给PHP解析器呢？url要有吧，查询字符串也得有吧，POST数据也要有，HTTP header不能少吧，好的，CGI就是规定要传哪些数据、以什么样的格式传递给后方处理这个请求的协议。仔细想想，你在PHP代码中使用的用户从哪里来的。 当web server收到<code>/index.php</code>这个请求后，会启动对应的CGI程序，这里就是PHP的解析器。接下来PHP解析器会解析php.ini文件，初始化执行环境，然后处理请求，再以规定CGI规定的格式返回处理后的结果，退出进程。web server再把结果返回给浏览器。</p>
                  <h3 id="FastCGI"><a href="#FastCGI" class="headerlink" title="FastCGI"></a>FastCGI</h3>
                  <p>Fastcgi是用来提高CGI程序性能的。 那么CGI程序的性能问题在哪呢？”PHP解析器会解析php.ini文件，初始化执行环境”，就是这里了。标准的CGI对每个请求都会执行这些步骤（不闲累啊！启动进程很累的说！），所以处理每个时间的时间会比较长。这明显不合理嘛！那么Fastcgi是怎么做的呢？首先，Fastcgi会先启一个master，解析配置文件，初始化执行环境，然后再启动多个worker。当请求过来时，master会传递给一个worker，然后立即可以接受下一个请求。这样就避免了重复的劳动，效率自然是高。而且当worker不够用时，master可以根据配置预先启动几个worker等着；当然空闲worker太多时，也会停掉一些，这样就提高了性能，也节约了资源。这就是fastcgi的对进程的管理。</p>
                  <h3 id="PHP-FPM"><a href="#PHP-FPM" class="headerlink" title="PHP-FPM"></a>PHP-FPM</h3>
                  <p>是一个实现了Fastcgi的程序，被PHP官方收了。 大家都知道，PHP的解释器是php-cgi。php-cgi只是个CGI程序，他自己本身只能解析请求，返回结果，不会进程管理（皇上，臣妾真的做不到啊！）所以就出现了一些能够调度php-cgi进程的程序，比如说由lighthttpd分离出来的spawn-fcgi。好了PHP-FPM也是这么个东东，在长时间的发展后，逐渐得到了大家的认可（要知道，前几年大家可是抱怨PHP-FPM稳定性太差的），也越来越流行。 php-fpm的管理对象是php-cgi。但不能说php-fpm是fastcgi进程的管理器，因为前面说了fastcgi是个协议，似乎没有这么个进程存在，就算存在php-fpm也管理不了他（至少目前是）。 有的说，php-fpm是php内核的一个补丁 以前是对的。因为最开始的时候php-fpm没有包含在PHP内核里面，要使用这个功能，需要找到与源码版本相同的php-fpm对内核打补丁，然后再编译。后来PHP内核集成了PHP-FPM之后就方便多了，使用<code>\--enalbe-fpm</code>这个编译参数即可。</p>
                  <h2 id="安装PHP7-1"><a href="#安装PHP7-1" class="headerlink" title="安装PHP7.1"></a>安装PHP7.1</h2>
                  <p>用brew进行安装。</p>
                  <figure class="highlight armasm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">brew </span>install homebrew/php/php71</span><br><span class="line"><span class="keyword">brew </span>install homebrew/php/php71-mcrypt</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>安装完了之后它会自带PHP-FPM，在 启动PHP-FPM</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">sudo php-fpm</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="配置文件目录"><a href="#配置文件目录" class="headerlink" title="配置文件目录"></a>配置文件目录</h3>
                  <p>php.ini</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="regexp">/usr/</span>local<span class="regexp">/etc/</span>php<span class="regexp">/7.1/</span>php.ini</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>php-fpm.conf</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="regexp">/usr/</span>local<span class="regexp">/etc/</span>php<span class="regexp">/7.1/</span>php-fpm.conf</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>php-fpm</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="regexp">/usr/</span>local<span class="regexp">/opt/</span>php71<span class="regexp">/sbin/</span>php-fpm</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>但是执行<code>php-fpm</code>发现没有反应，所以这里需要加一个symlink</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">ln -s <span class="regexp">/usr/</span>local<span class="regexp">/opt/</span>php71<span class="regexp">/sbin/</span>php-fpm <span class="regexp">/usr/</span>local<span class="regexp">/bin/</span>php-fpm</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>然后运行php-fpm</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">sudo php-fpm</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>启动nginx</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">sudo nginx</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>关于MySQL和其他的安装在这就不再赘述。 以上便完成了PHP的升级。</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-01-26 22:05:48" itemprop="dateCreated datePublished" datetime="2017-01-26T22:05:48+08:00">2017-01-26</time>
                </span>
                <span id="/3992.html" class="post-meta-item leancloud_visitors" data-flag-title="Mac下升级PHP版本至7.1" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>2.3k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>2 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3952.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3952.html" class="post-title-link" itemprop="url">小白进阶之Scrapy第二篇（登录篇）</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021225948.jpg" alt="QQ图片20161021225948"></a>其实拿这个网站当教程刚开始我是拒绝、换其他网站吧，又没什么动力···· 然后就··········· 上一篇Scrapy带大家玩了 Spider 今天带带大家玩的东西有两点、第一CrawlSpider、第二Scrapy登录。 目标站点：www.haoduofuli.wang <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" alt="9555112"></a> Go Go Go！开整！ 还记得第一步要干啥？ 创建项目文件啊！没有Scrapy环境的小伙伴们请参考第一篇安装一下环境哦！ 打开你的命令行界面（Windows是CMD）使用切换目录的命令到你需要的存放项目文件的磁盘目录</p>
                  <figure class="highlight properties">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">D</span>:<span class="string"></span></span><br><span class="line"><span class="attr">scrapy</span> <span class="string">startproject haoduofuli</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>好了 我在D盘创建了一个叫做haoduofuli的项目。 用Pycharm打开这个目录开始我们的爬取之路 Come on！ 下一步我们该做什么记得吧？当然是在items.py中声明字段了！方便我们在Spider中保存获取的内容并通过Pipline进行保存（items.py本质上是一个dict字典） 我在items.py中声明了以下类容：</p>
                  <figure class="highlight mipsasm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># -*- coding: utf-8 -*-</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># Define here the models for your scraped items</span></span><br><span class="line">#</span><br><span class="line"><span class="comment"># See documentation in:</span></span><br><span class="line"><span class="comment"># http://doc.scrapy.org/en/latest/topics/items.html</span></span><br><span class="line"></span><br><span class="line">import <span class="keyword">scrapy</span></span><br><span class="line"><span class="keyword"></span></span><br><span class="line"><span class="keyword"></span></span><br><span class="line"><span class="keyword">class </span>HaoduofuliItem(<span class="keyword">scrapy.Item):</span></span><br><span class="line"><span class="keyword"> </span>   <span class="comment"># define the fields for your item here like:</span></span><br><span class="line">    <span class="comment"># name = scrapy.Field()</span></span><br><span class="line"></span><br><span class="line">    category = <span class="keyword">scrapy.Field() </span><span class="comment">#类型</span></span><br><span class="line">    title = <span class="keyword">scrapy.Field() </span> <span class="comment">#标题</span></span><br><span class="line">    imgurl = <span class="keyword">scrapy.Field() </span><span class="comment">#图片的地址</span></span><br><span class="line">    yunlink = <span class="keyword">scrapy.Field() </span>   <span class="comment">#百度云盘的连接</span></span><br><span class="line">    password = <span class="keyword">scrapy.Field() </span>  <span class="comment">#百度云盘的密码</span></span><br><span class="line">    url = <span class="keyword">scrapy.Field() </span>   <span class="comment">#页面的地址</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>至于为啥声明的这些类容：各位自己去网站上观察一下、（主要是吧，贴在这儿的话 估计这博文就要被人道主义销毁了） 别忘记上一篇博文教大家的那种在IDE中运行Scrapy的方法哦！ 好上面的我们搞定、开始下一步编写Spider啦！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021223818.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021223818.jpg" alt="QQ图片20161021223818"></a> 在spiders文件夹中新建一个文件haoduofuli.py（还不清楚目录和作用的小哥儿快去看看Scrapy的第一篇） 首先导入以下包:</p>
                  <figure class="highlight clean">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy.spiders <span class="keyword">import</span> CrawlSpider, Rule, Request ##CrawlSpider与Rule配合使用可以骑到历遍全站的作用、Request干啥的我就不解释了</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor ##配合Rule进行URL规则匹配</span><br><span class="line"><span class="keyword">from</span> haoduofuli.items <span class="keyword">import</span> HaoduofuliItem ##不解释</span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest ##Scrapy中用作登录使用的一个包</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>详细介绍请参考：<a href="http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/spiders.html" target="_blank" rel="noopener">http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/spiders.html</a> 中的：CrawlSpider、爬取规则(Crawling rules)、pare_start_url(response)|(此方法重写start_urls)、以及Spider中start_requests()方法的重写。 下面我带大家简单的玩玩儿顺便获取我们想要的东西。 前面提到了我们需要获取全站的资源、如果使用Spider的话就需要写大量的代码（当然只是相对而言的大量代码）！但是我们还有另一个选择那就是今天要说的CrawlSpider！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/吃惊表情1.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/吃惊表情1.jpg" alt="吃惊表情1"></a> 首先我们新建一个函数 继承CrawlSpider（上一篇博文是继承Spider哦！） 见证奇迹的时刻到了!</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy.spiders <span class="keyword">import</span> CrawlSpider, Rule, Request <span class="comment">##CrawlSpider与Rule配合使用可以骑到历遍全站的作用、Request干啥的我就不解释了</span></span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor <span class="comment">##配合Rule进行URL规则匹配</span></span><br><span class="line"><span class="keyword">from</span> haoduofuli.items <span class="keyword">import</span> HaoduofuliItem <span class="comment">##不解释</span></span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest <span class="comment">##Scrapy中用作登录使用的一个包</span></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">myspider</span><span class="params">(CrawlSpider)</span>:</span></span><br><span class="line"></span><br><span class="line">    name = <span class="string">'haoduofuli'</span></span><br><span class="line">    allowed_domains = [<span class="string">'haoduofuli.wang'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.haoduofuli.wang'</span>]</span><br><span class="line"></span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'\.html'</span>,)), callback=<span class="string">'parse_item'</span>, follow=<span class="literal">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        print(response.url)</span><br><span class="line">        <span class="keyword">pass</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>是不是很厉害！加上中间的空行也就不到二十行代码啊！就把整个网站历遍了！So Easy！！ 上面的几行代码的意思 很明了了啊！我只说说rules这一块儿 表示所有response都会通过这个规则进行过滤匹配、匹配啥？当然是后缀为.html的URL了、callback=’parse_item’表示将获取到的response交给parse_item函数处理（这儿要注意了、不要使用parse函数、因为CrawlSpider使用的parse来实现逻辑、如果你使用了parse函数、CrawlSpider会运行失败。）、follow=True表示跟进匹配到的URL（顺便说一句allow的参数支持正则表达式、虽然我也用得不熟、不过超级好使） 至于我这儿的allow的参数为啥是’.\html’；大伙儿自己观察一下我们需要获取想要信息的页面的URL是不是都是以.html结束的？明白了吧！ 然后rules的大概运作方式是下面这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122164117.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122164117.png" alt="QQ截图20170122164117"></a> 图很清晰明了了（本人也是初学、如有错误 还请各位及时留言 我好纠正。）中间的数据流向是靠引擎来完成的。 好了 我们来看看效果如何： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-011812.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-011812.png" alt="QQ20170122-011812"></a> 这是我们返回response的URL、一水儿的 URL啊！完美！下面就可以进行提取数据了（诶！不对啊怎么没有没什么提取工具啊！还记得上篇博文说的不？下载器返回的response是支持Xpath的哦！我们直接使用Xpath来提取数据就行啦！） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情2.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/表情2.jpg" alt="表情2"></a> 那么问题来了！Xpath没用过啊！不会用啊！这可咋整啊！别怕！草鸡简单的！！来不着急！ 先大声跟我念：Google大法好啊！ 哈哈哈 没错、我们需要Chrome（至于为啥不用Firefox、因为不知道为啥Firefox的Xpath有时和Chrome的结构不一样 有些时候提取不到数据、Chrome则没什么问题） 来来！跟着我的节奏来！包你五分钟学会使用Xpath！学不会也没关系、毕竟你也不能顺着网线来打我啊！ 第一步：打开你的Chrome浏览器 挑选上面任意一个URL打开进入我们提取数据的页面（不贴图 容易被Say GoogBay）： 第二步：打开Chrome的调试模式找到我们需要提取的内容（如何快速找到呢？还不知道的小哥儿 我只能说你实在是太水了） 点击下面红圈的箭头 然后去网页上点击你需要的内容就 哔！的一下跳过去了！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-013435.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-013435.png" alt="QQ20170122-013435"></a> 第三步：在跳转的那一行就是你想要提取内容的一行（背景色完全区别于其它行！！）右键Copy ——Copy XPath: 就像下面我提取标题： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-013823.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-013823.png" alt="QQ20170122-013823"></a> 你会得到这样的内容： //<em>[@id=”post_content”]/p[1] 意思是：在根节点下面的有一个id为post_content的标签里面的第一个p标签（p[1]） 如果你需要提取的是这个标签的文本你需要在后面加点东西变成下面这样： //</em>[@id=”post_content”]/p[1]/text() 后面加上text()标签就是提取文本 如果要提取标签里面的属性就把text()换成@属性比如： //*[@id=”post_content”]/p[1]/@src So Easy！XPath提取完毕！来看看怎么用的！那就更简单了！！！！ response.xpath(‘你Copy的XPath’).extract()[‘要取第几个值’] 注意XPath提取出来的默认是List。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021224219.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161021224219.gif" alt="QQ图片20161021224219"></a> 看完上面这一段 估计还没有五分钟吧 ！好了XPath掌握了！我们来开始取我们想要的东西吧！现在我们的代码应该变成这样了：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy.spiders <span class="keyword">import</span> CrawlSpider, Rule, Request <span class="comment">##CrawlSpider与Rule配合使用可以骑到历遍全站的作用、Request干啥的我就不解释了</span></span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor <span class="comment">##配合Rule进行URL规则匹配</span></span><br><span class="line"><span class="keyword">from</span> haoduofuli.items <span class="keyword">import</span> HaoduofuliItem <span class="comment">##不解释</span></span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest <span class="comment">##Scrapy中用作登录使用的一个包</span></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">myspider</span><span class="params">(CrawlSpider)</span>:</span></span><br><span class="line"></span><br><span class="line">    name = <span class="string">'haoduofuli'</span></span><br><span class="line">    allowed_domains = [<span class="string">'haoduofuli.wang'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.haoduofuli.wang'</span>]</span><br><span class="line"></span><br><span class="line">    rules = (</span><br><span class="line">        Rule(LinkExtractor(allow=(<span class="string">'\.html'</span>,)), callback=<span class="string">'parse_item'</span>, follow=<span class="literal">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        item = HaoduofuliItem()</span><br><span class="line">        item[<span class="string">'url'</span>] = response.url</span><br><span class="line">        item[<span class="string">'category'</span>] = response.xpath(<span class="string">'//*[@id="content"]/div[1]/div[1]/span[2]/a/text()'</span>).extract()[<span class="number">0</span>]</span><br><span class="line">        item[<span class="string">'title'</span>] = response.xpath(<span class="string">'//*[@id="content"]/div[1]/h1/text()'</span>).extract()[<span class="number">0</span>]</span><br><span class="line">        item[<span class="string">'imgurl'</span>] = response.xpath(<span class="string">'//*[@id="post_content"]/p/img/@src'</span>).extract()</span><br><span class="line">        <span class="keyword">return</span> item</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>我们来跑一下！简直完美！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-020745.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-020745.png" alt="QQ20170122-020745"></a> 关于imgurl那个XPath： 你先随便找一找图片的地址Copy XPath类似得到这样的： //<em>[@id=”post_content”]/p[2]/img 你瞅瞅网页会发现每一个有几张图片 每张地址都在一个p标签下的img标签的src属性中 把这个2去掉变成： //</em>[@id=”post_content”]/p/img 就变成了所有p标签下的img标签了！加上 /@src 后所有图片就获取到啦！（不加[0]是因为我们要所有的地址、加了 就只能获取一个了！） 关于XPath更多的用法与功能详解，建议大家去看看w3cschool (^o^)/ 第一部分完工、开始第二部分的工作吧!登！录! <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" alt="QQ图片20161022193315"></a> 毕竟这些都不是我们要的重点！我们要的是资源 资源啊！能下载东西的地方！如果不是为了资源 那么爬虫将毫无意义（给工钱的另算）。 但是下载资源是隐藏的，需要登录才能看见（别找我要帐号、我也是借的别人的。） 我们先来看看这个网站是怎么登录的，使用Firefox打开www.haoduofuli.wang/login.php（为啥是Firefox、因为个人感觉Firefox的表单界面看起来很爽啊！哈哈哈） 打开页面之后开启调试模式（怎么开不说了）—开启持续日志（不然跳转之后没了） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122101749.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122101749.png" alt="QQ截图20170122101749"></a> 然后选择网络—选中html和XHR（这样页面类容就会少很多、又不会缺少我们需要的东西） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122103140.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122103140.png" alt="QQ截图20170122103140"></a> 现在开始登录（顺手把记住登录也勾上）！调试窗口不要关啊！！！！登录完毕之后你会发现出现一些内容 我们找到其中方法为post的请求、然后选择 参数 就能看到我们需要的登录表单啦！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122104241.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122104241.png" alt="QQ截图20170122104241"></a> 我划掉的是帐号密码、这个位置应该显示你的帐号密码（这是很简单的一个登录表单、不通用但是思路是一样的。）找到了我们想要的东西我们开始登录吧 首先要知道Scrapy登录是如何实现的？ 借助于FromRequests这个包实现的（前面已经导入过了），下面开整。不需要太大的改动只需增加一些函数 就可以轻而易举的实现的登录。 将我们的start_urls中的地址换掉换成我们我们的登陆地址www.haoduofuli.wang/login.php变成这样：</p>
                  <figure class="highlight clean">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy.spiders <span class="keyword">import</span> CrawlSpider, Rule, Request ##CrawlSpider与Rule配合使用可以骑到历遍全站的作用、Request干啥的我就不解释了</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor ##配合Rule进行URL规则匹配</span><br><span class="line"><span class="keyword">from</span> haoduofuli.items <span class="keyword">import</span> HaoduofuliItem ##不解释</span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest ##Scrapy中用作登录使用的一个包</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"></span><br><span class="line">account = <span class="string">'你的账号'</span></span><br><span class="line">password = <span class="string">'你的密码'</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> myspider(CrawlSpider):</span><br><span class="line"></span><br><span class="line">    name = <span class="string">'haoduofuli'</span></span><br><span class="line">    allowed_domains = [<span class="string">'haoduofuli.wang'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.haoduofuli.wang/wp-login.php'</span>]</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>那么问题来了！参考上面的流程图你会发现、这丫的没法登录表单没法写啊！start_urls返回的responses就直接给rules进行处理了诶！我们需要一个什么方法来截断start_urls返回的responses 方便我们把登录的表单提交上去！那么问题来了 ！该用啥？ 答案是：parse_start_url(response)这方法；此方法作用是当start_url返回responses时调用这个方法。官方解释如下： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122105258.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122105258.png" alt="QQ截图20170122105258"></a> 然后呢？当然是构造表单并通过FormRequests提交了！所以我们的程序现在就应该变成这样子了:</p>
                  <figure class="highlight clean">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> scrapy.spiders <span class="keyword">import</span> CrawlSpider, Rule, Request ##CrawlSpider与Rule配合使用可以骑到历遍全站的作用、Request干啥的我就不解释了</span><br><span class="line"><span class="keyword">from</span> scrapy.linkextractors <span class="keyword">import</span> LinkExtractor ##配合Rule进行URL规则匹配</span><br><span class="line"><span class="keyword">from</span> haoduofuli.items <span class="keyword">import</span> HaoduofuliItem ##不解释</span><br><span class="line"><span class="keyword">from</span> scrapy <span class="keyword">import</span> FormRequest ##Scrapy中用作登录使用的一个包</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"></span><br><span class="line">account = <span class="string">'你的帐号'</span></span><br><span class="line">password = <span class="string">'你的密码'</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> myspider(CrawlSpider):</span><br><span class="line"></span><br><span class="line">    name = <span class="string">'haoduofuli'</span></span><br><span class="line">    allowed_domains = [<span class="string">'haoduofuli.wang'</span>]</span><br><span class="line">    start_urls = [<span class="string">'http://www.haoduofuli.wang/wp-login.php'</span>]</span><br><span class="line"></span><br><span class="line">    def parse_start_url(self, response):</span><br><span class="line">        ###</span><br><span class="line">        如果你登录的有验证码之类的，你就可以在此处加入各种处理方法；</span><br><span class="line">        比如提交给打码平台，或者自己手动输入、再或者pil处理之类的</span><br><span class="line">        ###</span><br><span class="line">        formdate = &#123;</span><br><span class="line">                <span class="string">'log'</span>: account,</span><br><span class="line">                <span class="string">'pwd'</span>: password,</span><br><span class="line">                <span class="string">'rememberme'</span>: <span class="string">"forever"</span>,</span><br><span class="line">                <span class="string">'wp-submit'</span>: <span class="string">"登录"</span>,</span><br><span class="line">                <span class="string">'redirect_to'</span>: <span class="string">"http://www.haoduofuli.wang/wp-admin/"</span>,</span><br><span class="line">                <span class="string">'testcookie'</span>: <span class="string">"1"</span></span><br><span class="line">         &#125;</span><br><span class="line">        return [FormRequest.from_response(response, formdata=formdate, callback=self.after_login)]</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>最后一句的意思是提交表单formdate并将回调after_login函数处理后续内容（一般用来判断是否登录成功） 然后开始请求我们需要爬取的页面 现在就变成这样了！</p>
                  <figure class="highlight scala">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from scrapy.spiders <span class="keyword">import</span> <span class="type">CrawlSpider</span>, <span class="type">Rule</span>, <span class="type">Request</span> ##<span class="type">CrawlSpider</span>与<span class="type">Rule</span>配合使用可以骑到历遍全站的作用、<span class="type">Request</span>干啥的我就不解释了</span><br><span class="line">from scrapy.linkextractors <span class="keyword">import</span> <span class="type">LinkExtractor</span> ##配合<span class="type">Rule</span>进行<span class="type">URL</span>规则匹配</span><br><span class="line">from haoduofuli.items <span class="keyword">import</span> <span class="type">HaoduofuliItem</span> ##不解释</span><br><span class="line">from scrapy <span class="keyword">import</span> <span class="type">FormRequest</span> ##<span class="type">Scrapy</span>中用作登录使用的一个包</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"></span><br><span class="line">account = '你的帐号'</span><br><span class="line">password = '你的密码'</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">myspider</span>(<span class="params"><span class="type">CrawlSpider</span></span>)</span>:</span><br><span class="line"></span><br><span class="line">    name = <span class="symbol">'haoduoful</span>i'</span><br><span class="line">    allowed_domains = [<span class="symbol">'haoduofuli</span>.wang']</span><br><span class="line">    start_urls = [<span class="symbol">'http</span>:<span class="comment">//www.haoduofuli.wang/wp-login.php']</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_start_url</span></span>(self, response):</span><br><span class="line">        ###</span><br><span class="line">        如果你登录的有验证码之类的，你就可以在此处加入各种处理方法；</span><br><span class="line">        比如提交给打码平台，或者自己手动输入、再或者pil处理之类的</span><br><span class="line">        ###</span><br><span class="line">        formdate = &#123;</span><br><span class="line">                <span class="symbol">'lo</span>g': account,</span><br><span class="line">                <span class="symbol">'pw</span>d': password,</span><br><span class="line">                <span class="symbol">'rememberm</span>e': <span class="string">"forever"</span>,</span><br><span class="line">                <span class="symbol">'wp</span>-submit': <span class="string">"登录"</span>,</span><br><span class="line">                <span class="symbol">'redirect_t</span>o': <span class="string">"http://www.haoduofuli.wang/wp-admin/"</span>,</span><br><span class="line">                <span class="symbol">'testcooki</span>e': <span class="string">"1"</span></span><br><span class="line">         &#125;</span><br><span class="line">        <span class="keyword">return</span> [<span class="type">FormRequest</span>.from_response(response, formdata=formdate, callback=self.after_login)]</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">after_login</span></span>(self, response):</span><br><span class="line">        ###</span><br><span class="line">        可以在此处加上判断来确认是否登录成功、进行其他动作。</span><br><span class="line">        ###</span><br><span class="line">        lnk = <span class="symbol">'http</span>:<span class="comment">//www.haoduofuli.wang'</span></span><br><span class="line">        <span class="keyword">return</span> <span class="type">Request</span>(lnk)</span><br><span class="line"></span><br><span class="line">    rules = (</span><br><span class="line">        <span class="type">Rule</span>(<span class="type">LinkExtractor</span>(allow=('\.html',)), callback=<span class="symbol">'parse_ite</span>m', follow=<span class="type">True</span>),</span><br><span class="line">    )</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse_item</span></span>(self, response):</span><br><span class="line">        item = <span class="type">HaoduofuliItem</span>()</span><br><span class="line">        <span class="keyword">try</span>:</span><br><span class="line">            item[<span class="symbol">'categor</span>y'] = response.xpath('<span class="comment">//*[@id="content"]/div[1]/div[1]/span[2]/a/text()').extract()[0]</span></span><br><span class="line">            item[<span class="symbol">'titl</span>e'] = response.xpath('<span class="comment">//*[@id="content"]/div[1]/h1/text()').extract()[0]</span></span><br><span class="line">            item[<span class="symbol">'imgur</span>l'] = response.xpath('<span class="comment">//*[@id="post_content"]/p/img/@src').extract()</span></span><br><span class="line">            item[<span class="symbol">'yunlin</span>k'] = response.xpath('<span class="comment">//*[@id="post_content"]/blockquote/a/@href').extract()[0]</span></span><br><span class="line">            item[<span class="symbol">'passwor</span>d'] = response.xpath('<span class="comment">//*[@id="post_content"]/blockquote/font/text()').extract()[0]</span></span><br><span class="line">            <span class="keyword">return</span> item</span><br><span class="line">        except:</span><br><span class="line">            item[<span class="symbol">'categor</span>y'] = response.xpath('<span class="comment">//*[@id="content"]/div[1]/div[1]/span[2]/a/text()').extract()[0]</span></span><br><span class="line">            item[<span class="symbol">'titl</span>e'] = response.xpath('<span class="comment">//*[@id="content"]/div[1]/h1/text()').extract()[0]</span></span><br><span class="line">            item[<span class="symbol">'imgur</span>l'] = response.xpath('<span class="comment">//*[@id="post_content"]/p/img/@src').extract()</span></span><br><span class="line">            item[<span class="symbol">'yunlin</span>k'] = response.xpath('<span class="comment">//*[@id="post_content"]/blockquote/p/a/@href).extract()[0] </span></span><br><span class="line">            item[<span class="symbol">'passwor</span>d'] = response.xpath('<span class="comment">//*[@id="post_content"]/blockquote/p/span/text()').extract()[0] </span></span><br><span class="line">            <span class="keyword">return</span> item</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>return Request（lnk）就表示我们的开始页面了 至于为啥多了一个try判断；完全是因为 这站长不守规矩啊！有些页面不一样·····我能怎么办 我也很无奈啊！ 都是被逼的。囧 好了！Spider写完啦！但是我们的工作还没完！！！网站是靠什么知道这个request是否是登录用户发出的？答案是 Cookie！ 所以我们需要 下载器 在下载网页之前在request中加入Cookie来向网站证明我们是登录用户身份；才能获取到需要登录才能查看的信息！ 这个该怎么做？现在Scrapy的中间件派上用场了！ 关于Cookie中间件参考：<a href="http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/downloader-middleware.html#module-scrapy.contrib.downloadermiddleware.cookies" target="_blank" rel="noopener">http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/downloader-middleware.html#module-scrapy.contrib.downloadermiddleware.cookies</a> 我们需要做的就是在settings.py中的 DOWNLOADER_MIDDLEWARES 开启这个中间件：scrapy.downloadermiddlewares.cookies.CookiesMiddleware 请注意！！！！！！ 每一个中间件会对request进行操作、你所做的操作可能会依赖于前一个中间件、所以每个中间件的顺序就异常的重要。具体该设置多少请参考： <a href="http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/settings.html#std:setting-DOWNLOADER_MIDDLEWARES_BASE" target="_blank" rel="noopener">http://scrapy-chs.readthedocs.io/zh_CN/latest/topics/settings.html#std:setting-DOWNLOADER_MIDDLEWARES_BASE</a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122165743.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122165743.png" alt="QQ截图20170122165743"></a> 中的值设置！！这点务必注意···如果不清楚依赖关系 请按照上图的值设置。 从上面可以看出Cookie中间件的值为700 、我们在settings.py设置也应该为700 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122170041.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ截图20170122170041.png" alt="QQ截图20170122170041"></a> 我注释掉的请无视掉！！！ 做好这些以后Scrapy运作的整个流程大概就变成了下面这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-232839.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-232839.png" alt="QQ20170122-232839"></a></p>
                  <figure class="highlight kotlin">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">return</span> Request(lnk) 这一个请求也算作 初始URL 只不过 不是start_urls的返回response 所以不会调用parse_start_url函数哦！</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-230207.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2017/01/QQ20170122-230207.png" alt="QQ20170122-230207"></a> 跑一下！效果杠杠滴！！！至于后面的数据持久化（如何保存数据、大家请自行解决哦！比毕竟上一篇博文讲过了、） 这种更适合使用MongoDB存储 超级简单好使。 至此本篇博文结束。 这个还有一个分布式的版本、现在不想写了··· 等年后再写吧。 另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。另外我真的一个资源都没看。</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-01-22 23:34:39" itemprop="dateCreated datePublished" datetime="2017-01-22T23:34:39+08:00">2017-01-22</time>
                </span>
                <span id="/3952.html" class="post-meta-item leancloud_visitors" data-flag-title="小白进阶之Scrapy第二篇（登录篇）" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>10k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>9 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3912.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> 职位推荐 <i class="label-arrow"></i>
                  </a>
                  <a href="/3912.html" class="post-title-link" itemprop="url">[北京][14k-25k][PHP + 前端][两年经验] Laravel/Vue/Slack 灵析研发团队，找对 “世界” 有理解的工程师</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <h2 id="置顶"><a href="#置顶" class="headerlink" title="置顶"></a>置顶</h2>
                  <p>博主实习过的一家公司，工作环境非常好，薪资丰厚，各种福利请往下看！和一群可爱的人一起工作，生活真的很充实，我不止在一篇博客里安利过了，现在团队又需要新能量啦，大家快看过来~</p>
                  <h2 id="灵析团队在做什么"><a href="#灵析团队在做什么" class="headerlink" title="灵析团队在做什么?"></a>灵析团队在做什么?</h2>
                  <p>灵析致力于<strong>让人人皆可参与公益，且受益</strong>。目前核心产品为：<strong><a href="http://www.lingxi360.com/" target="_blank" rel="noopener">灵析</a></strong> 基于非营利组织的筹款传播、与数据管理。从国际知名的“TNC”、到国内知名的“希望工程”到“清华大学教育基金会”到“瓷娃娃罕见病关爱中心”……基本上你所了解的知名非营利组织都在用我们的产品和服务。 创立4年，稳健前行，靠谱，不浮躁。<strong>去年融资，能挣钱，不差钱。唯缺更多认同我们理念和业务的工程师和我们一起更快满足社会需求，高速健康发展。</strong></p>
                  <hr>
                  <h2 id="为什么灵析研发团队值得你考虑"><a href="#为什么灵析研发团队值得你考虑" class="headerlink" title="为什么灵析研发团队值得你考虑?"></a>为什么灵析研发团队值得你考虑?</h2>
                  <ul>
                    <li><strong>研发</strong>
                      <ul>
                        <li>我们使用 Laravel/Vue 作为主力框架，使用 Slack/Gitlab/Worktile 进行团队协作</li>
                        <li>我们实行新人导师制，定期外部/内部培训、辅导、分享，供应各类技术峰会门票</li>
                        <li>我们的技术架构和团队架构都在高速扩展期，有大量技术、架构、成长实践机会</li>
                      </ul>
                    </li>
                    <li><strong>基础</strong>：六险一金、弹性工作、年终奖、内推奖</li>
                    <li><strong>福利</strong>：旅游、硬件补贴、节假日福利（现金、京东卡…）、完善的加班调休制度（我们不鼓励加班！）、各种团建（以吃为主，以玩为辅）、零食</li>
                    <li><strong>健康</strong>：补充医保，体检，健身卡，每周体育活动（羽毛球），每日体育活动（乒乓球、桌上足球、拳击、XBOX 360…）</li>
                    <li><strong>成长</strong>：阅读基金、周四 Pizza Time</li>
                    <li><strong>团队</strong>：姑娘占据半壁江山的灵析团队，是一群有着不同背景、专业有趣的小伙伴，在 技术、创意营销、社会企业、游戏多媒体等领域都各有专长</li>
                  </ul>
                  <hr>
                  <h2 id="招聘-PHP-工程师（Ⅰ-级）"><a href="#招聘-PHP-工程师（Ⅰ-级）" class="headerlink" title="招聘 - PHP 工程师（Ⅰ 级）"></a>招聘 - PHP 工程师（Ⅰ 级）</h2>
                  <h3 id="岗位职责"><a href="#岗位职责" class="headerlink" title="岗位职责"></a>岗位职责</h3>
                  <ol>
                    <li>在小组 Leader 和同事的支持和配合下，根据产品或研发需求，按时、保质完成以后端为主的开发、测试、 文档编写任务</li>
                    <li>参与构建系统原型及关键技术问题的攻关活动</li>
                    <li>在开发过程中发现并解决存在的问题，帮助团队持续改进开发效率</li>
                    <li>改进框架、基础架构，持续优化服务</li>
                  </ol>
                  <h3 id="岗位要求"><a href="#岗位要求" class="headerlink" title="岗位要求"></a>岗位要求</h3>
                  <ol>
                    <li>熟练掌握 LNMP + 缓存架构，一到两年中小型网站系统的开发、维护调优经验</li>
                    <li>熟练掌握 MVC/REST 架构，对构建可伸缩、可扩展、高可用系统有一定的了解</li>
                    <li>熟悉常用设计模式，熟悉 S.O.L.I.D 原则</li>
                    <li>熟练掌握基础的前端技术（HTML+CSS+JS），了解（熟悉可以加分）现代前端技术（ES6、Vue/React 、前端工作流等）</li>
                  </ol>
                  <h3 id="加分项"><a href="#加分项" class="headerlink" title="加分项"></a>加分项</h3>
                  <ol>
                    <li>熟练使用 Linux</li>
                    <li>熟悉数据库、缓存、应用各层性能调优</li>
                  </ol>
                  <hr>
                  <h2 id="招聘-前端工程师（Ⅰ-级）"><a href="#招聘-前端工程师（Ⅰ-级）" class="headerlink" title="招聘 - 前端工程师（Ⅰ 级）"></a>招聘 - 前端工程师（Ⅰ 级）</h2>
                  <h3 id="岗位职责-1"><a href="#岗位职责-1" class="headerlink" title="岗位职责"></a>岗位职责</h3>
                  <ol>
                    <li>在小组 Leader 和同事的支持和配合下，根据产品或研发需求，按时、保质完成以前端为主的开发、测试、 文档编写任务</li>
                    <li>参与构建系统原型及关键技术问题的攻关活动</li>
                    <li>在开发过程中发现并解决存在的问题，帮助团队持续改进开发效率</li>
                    <li>改进框架、基础架构，持续优化服务</li>
                  </ol>
                  <h3 id="岗位要求-1"><a href="#岗位要求-1" class="headerlink" title="岗位要求"></a>岗位要求</h3>
                  <ol>
                    <li>1 年以上前端实际开发经验，有多个中小型网站系统前端的开发、维护调优经验<ul>
                        <li>基础：HTML+CSS3+JS，熟练掌握 jQuery、less、lodash 等常用辅助库</li>
                        <li>JS 基础：扎实的 JS 基础，最新的 ES 标准</li>
                        <li>MVVM 框架：如 Vue/React 等，开发过中小型 SPA（真实项目！不要只是做个几个组件练习）</li>
                      </ul>
                    </li>
                    <li>熟悉基于 MVC/REST 架构前后端接口设计、对接流程</li>
                    <li>熟悉前端工程化工作流程，熟练使用 webpack/gulp 等前端工具链</li>
                    <li>熟悉常用设计模式，熟悉 S.O.L.I.D 原则</li>
                    <li>熟悉常见的浏览器的特点和限制，熟悉常用性能优化手段</li>
                  </ol>
                  <h3 id="加分项-1"><a href="#加分项-1" class="headerlink" title="加分项"></a>加分项</h3>
                  <ol>
                    <li>熟悉 PHP 或 Node.js，能开发后端接口</li>
                    <li>熟练使用 Linux</li>
                  </ol>
                  <hr>
                  <h2 id="补充-加分项！，除了各岗位的要求，还有哪些特征可以让我们更契合？"><a href="#补充-加分项！，除了各岗位的要求，还有哪些特征可以让我们更契合？" class="headerlink" title="补充 加分项！，除了各岗位的要求，还有哪些特征可以让我们更契合？"></a>补充 <code>加分项！</code>，除了各岗位的要求，还有哪些特征可以让我们更契合？</h2>
                  <ul>
                    <li><strong>认同我们的理念</strong>：Do Good、 Be Proud、Make History，期待用技术推动公益行业发展</li>
                    <li><strong>有经验</strong>
                      <ul>
                        <li>专业：一到两年工作经验，或（校内）项目开发经验</li>
                        <li>开源：有开源项目开发维护经验</li>
                        <li>全栈：其它语言、场景的开发经验，如算法/Shell/Cpp/Java/Go/Python 等</li>
                        <li>协作：熟练使用 Slack、Git、Webpack 等团队协作、工程化开发工具</li>
                      </ul>
                    </li>
                    <li><strong>有代码洁癖和工程思想</strong>：我们期望你是创造价值的工程师，不是售卖劳力的程序员</li>
                    <li><strong>有 Geek 范</strong>： 认同 UNIX 设计哲学，熟练使用各种 Geek 工具<ul>
                        <li><strong>有设计思维</strong>：一定的产品思维及审美品位</li>
                      </ul>
                    </li>
                    <li><strong>善于协作沟通</strong>：有团队沟通协作、项目进度及时间管理实践基础</li>
                    <li><strong>是一个终身学习者</strong>：保持学习，热爱分享，技术视野开阔，对业界新技术敏感</li>
                  </ul>
                  <hr>
                  <h2 id="简历投递"><a href="#简历投递" class="headerlink" title="简历投递"></a>简历投递</h2>
                  <p>简历可以提交到这个表单： <a href="http://lxi.me/9yh7t" target="_blank" rel="noopener">http://lxi.me/9yh7t</a> HR 邮箱： hr+lingxi360.com 或者，加我微信给我： <a href="https://s.lingxi360.com/hr/img/mywechat.jpeg" target="_blank" rel="noopener">https://s.lingxi360.com/hr/img/mywechat.jpeg</a> 请带上你的 简历、 github 、作品、博客或其它能表现、证明你能力的东西！ 补充说明： 以上前后端的 JD 是对应的一到两年实际开发经验 /能力（我们内部的 I 级），如果你的能力超过上面的描述，我们同时也需要（ I I 级）的伙伴来担当更具挑战和更高回报的工作，请联系我！（其实是二级的 JD 还没写好…） <img src="https://dn-phphub.qbox.me/uploads/images/201605/30/4552/4PCYsz8b7T.png" alt=""> P.S 博主就是后面那个二笔伸手抢镜的（捂脸哭）</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2017-01-08 12:19:54" itemprop="dateCreated datePublished" datetime="2017-01-08T12:19:54+08:00">2017-01-08</time>
                </span>
                <span id="/3912.html" class="post-meta-item leancloud_visitors" data-flag-title="[北京][14k-25k][PHP + 前端][两年经验] Laravel/Vue/Slack 灵析研发团队，找对 “世界” 有理解的工程师" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>2k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>2 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3801.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3801.html" class="post-title-link" itemprop="url">使用Python收集获取Linux系统主机信息</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>使用 python 代码收集主机的系统信息，主要：主机名称、IP、系统版本、服务器厂商、型号、序列号、CPU信息、内存等系统信息。</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br><span class="line">89</span><br><span class="line">90</span><br><span class="line">91</span><br><span class="line">92</span><br><span class="line">93</span><br><span class="line">94</span><br><span class="line">95</span><br><span class="line">96</span><br><span class="line">97</span><br><span class="line">98</span><br><span class="line">99</span><br><span class="line">100</span><br><span class="line">101</span><br><span class="line">102</span><br><span class="line">103</span><br><span class="line">104</span><br><span class="line">105</span><br><span class="line">106</span><br><span class="line">107</span><br><span class="line">108</span><br><span class="line">109</span><br><span class="line">110</span><br><span class="line">111</span><br><span class="line">112</span><br><span class="line">113</span><br><span class="line">114</span><br><span class="line">115</span><br><span class="line">116</span><br><span class="line">117</span><br><span class="line">118</span><br><span class="line">119</span><br><span class="line">120</span><br><span class="line">121</span><br><span class="line">122</span><br><span class="line">123</span><br><span class="line">124</span><br><span class="line">125</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment">#!/usr/bin/env python</span></span><br><span class="line"><span class="comment">#encoding: utf-8</span></span><br><span class="line"></span><br><span class="line"><span class="string">'''</span></span><br><span class="line"><span class="string">收集主机的信息：</span></span><br><span class="line"><span class="string">主机名称、IP、系统版本、服务器厂商、型号、序列号、CPU信息、内存信息</span></span><br><span class="line"><span class="string">'''</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">from</span> subprocess <span class="keyword">import</span> Popen, PIPE</span><br><span class="line"><span class="keyword">import</span> os,sys</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取 ifconfig 命令的输出 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getIfconfig</span><span class="params">()</span>:</span></span><br><span class="line">    p = Popen([<span class="string">'ifconfig'</span>], stdout = PIPE)</span><br><span class="line">    data = p.stdout.read()</span><br><span class="line">    <span class="keyword">return</span> data</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取 dmidecode 命令的输出 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getDmi</span><span class="params">()</span>:</span></span><br><span class="line">    p = Popen([<span class="string">'dmidecode'</span>], stdout = PIPE)</span><br><span class="line">    data = p.stdout.read()</span><br><span class="line">    <span class="keyword">return</span> data</span><br><span class="line"></span><br><span class="line"><span class="string">''' 根据空行分段落 返回段落列表'''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parseData</span><span class="params">(data)</span>:</span></span><br><span class="line">    parsed_data = []</span><br><span class="line">    new_line = <span class="string">''</span></span><br><span class="line">    data = [i <span class="keyword">for</span> i <span class="keyword">in</span> data.split(<span class="string">'\n'</span>) <span class="keyword">if</span> i]</span><br><span class="line">    <span class="keyword">for</span> line <span class="keyword">in</span> data:</span><br><span class="line">        <span class="keyword">if</span> line[<span class="number">0</span>].strip():</span><br><span class="line">            parsed_data.append(new_line)</span><br><span class="line">            new_line = line + <span class="string">'\n'</span></span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            new_line += line + <span class="string">'\n'</span></span><br><span class="line">    parsed_data.append(new_line)</span><br><span class="line">    <span class="keyword">return</span> [i <span class="keyword">for</span> i <span class="keyword">in</span> parsed_data <span class="keyword">if</span> i]</span><br><span class="line"></span><br><span class="line"><span class="string">''' 根据输入的段落数据分析出ifconfig的每个网卡ip信息 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parseIfconfig</span><span class="params">(parsed_data)</span>:</span></span><br><span class="line">    dic = &#123;&#125;</span><br><span class="line">    parsed_data = [i <span class="keyword">for</span> i <span class="keyword">in</span> parsed_data <span class="keyword">if</span> <span class="keyword">not</span> i.startswith(<span class="string">'lo'</span>)]</span><br><span class="line">    <span class="keyword">for</span> lines <span class="keyword">in</span> parsed_data:</span><br><span class="line">        line_list = lines.split(<span class="string">'\n'</span>)</span><br><span class="line">        devname = line_list[<span class="number">0</span>].split()[<span class="number">0</span>]</span><br><span class="line">        macaddr = line_list[<span class="number">0</span>].split()[<span class="number">-1</span>]</span><br><span class="line">        ipaddr  = line_list[<span class="number">1</span>].split()[<span class="number">1</span>].split(<span class="string">':'</span>)[<span class="number">1</span>]</span><br><span class="line">        <span class="keyword">break</span></span><br><span class="line">    dic[<span class="string">'ip'</span>] = ipaddr</span><br><span class="line">    <span class="keyword">return</span> dic</span><br><span class="line"></span><br><span class="line"><span class="string">''' 根据输入的dmi段落数据 分析出指定参数 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">parseDmi</span><span class="params">(parsed_data)</span>:</span></span><br><span class="line">    dic = &#123;&#125;</span><br><span class="line">    parsed_data = [i <span class="keyword">for</span> i <span class="keyword">in</span> parsed_data <span class="keyword">if</span> i.startswith(<span class="string">'System Information'</span>)]</span><br><span class="line">    parsed_data = [i <span class="keyword">for</span> i <span class="keyword">in</span> parsed_data[<span class="number">0</span>].split(<span class="string">'\n'</span>)[<span class="number">1</span>:] <span class="keyword">if</span> i]</span><br><span class="line">    dmi_dic = dict([i.strip().split(<span class="string">':'</span>) <span class="keyword">for</span> i <span class="keyword">in</span> parsed_data])</span><br><span class="line">    dic[<span class="string">'vender'</span>] = dmi_dic[<span class="string">'Manufacturer'</span>].strip()</span><br><span class="line">    dic[<span class="string">'product'</span>] = dmi_dic[<span class="string">'Product Name'</span>].strip()</span><br><span class="line">    dic[<span class="string">'sn'</span>] = dmi_dic[<span class="string">'Serial Number'</span>].strip()</span><br><span class="line">    <span class="keyword">return</span> dic</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取Linux系统主机名称 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getHostname</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="keyword">with</span> open(<span class="string">'/etc/sysconfig/network'</span>) <span class="keyword">as</span> fd:</span><br><span class="line">        <span class="keyword">for</span> line <span class="keyword">in</span> fd:</span><br><span class="line">            <span class="keyword">if</span> line.startswith(<span class="string">'HOSTNAME'</span>):</span><br><span class="line">                hostname = line.split(<span class="string">'='</span>)[<span class="number">1</span>].strip()</span><br><span class="line">                <span class="keyword">break</span></span><br><span class="line">    <span class="keyword">return</span> &#123;<span class="string">'hostname'</span>:hostname&#125;</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取Linux系统的版本信息 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getOsVersion</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="keyword">with</span> open(<span class="string">'/etc/issue'</span>) <span class="keyword">as</span> fd:</span><br><span class="line">        <span class="keyword">for</span> line <span class="keyword">in</span> fd:</span><br><span class="line">            osver = line.strip()</span><br><span class="line">            <span class="keyword">break</span></span><br><span class="line">    <span class="keyword">return</span> &#123;<span class="string">'osver'</span>:osver&#125;</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取CPU的型号和CPU的核心数 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getCpu</span><span class="params">()</span>:</span></span><br><span class="line">    num = <span class="number">0</span></span><br><span class="line">    <span class="keyword">with</span> open(<span class="string">'/proc/cpuinfo'</span>) <span class="keyword">as</span> fd:</span><br><span class="line">        <span class="keyword">for</span> line <span class="keyword">in</span> fd:</span><br><span class="line">            <span class="keyword">if</span> line.startswith(<span class="string">'processor'</span>):</span><br><span class="line">                num += <span class="number">1</span></span><br><span class="line">            <span class="keyword">if</span> line.startswith(<span class="string">'model name'</span>):</span><br><span class="line">                cpu_model = line.split(<span class="string">':'</span>)[<span class="number">1</span>].strip().split()</span><br><span class="line">                cpu_model = cpu_model[<span class="number">0</span>] + <span class="string">' '</span> + cpu_model[<span class="number">2</span>]  + <span class="string">' '</span> + cpu_model[<span class="number">-1</span>]</span><br><span class="line">    <span class="keyword">return</span> &#123;<span class="string">'cpu_num'</span>:num, <span class="string">'cpu_model'</span>:cpu_model&#125;</span><br><span class="line"></span><br><span class="line"><span class="string">''' 获取Linux系统的总物理内存 '''</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">getMemory</span><span class="params">()</span>:</span></span><br><span class="line">    <span class="keyword">with</span> open(<span class="string">'/proc/meminfo'</span>) <span class="keyword">as</span> fd:</span><br><span class="line">        <span class="keyword">for</span> line <span class="keyword">in</span> fd:</span><br><span class="line">            <span class="keyword">if</span> line.startswith(<span class="string">'MemTotal'</span>):</span><br><span class="line">                mem = int(line.split()[<span class="number">1</span>].strip())</span><br><span class="line">                <span class="keyword">break</span></span><br><span class="line">    mem = <span class="string">'%.f'</span> % (mem / <span class="number">1024.0</span>) + <span class="string">' MB'</span></span><br><span class="line">    <span class="keyword">return</span> &#123;<span class="string">'Memory'</span>:mem&#125;</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    dic = &#123;&#125;</span><br><span class="line">    data_ip = getIfconfig()</span><br><span class="line">    parsed_data_ip = parseData(data_ip)</span><br><span class="line">    ip = parseIfconfig(parsed_data_ip)</span><br><span class="line">    </span><br><span class="line">    data_dmi = getDmi()</span><br><span class="line">    parsed_data_dmi = parseData(data_dmi)</span><br><span class="line">    dmi = parseDmi(parsed_data_dmi)</span><br><span class="line"></span><br><span class="line">    hostname = getHostname()</span><br><span class="line">    osver = getOsVersion()</span><br><span class="line">    cpu = getCpu()</span><br><span class="line">    mem = getMemory()</span><br><span class="line">    </span><br><span class="line">    dic.update(ip)</span><br><span class="line">    dic.update(dmi)</span><br><span class="line">    dic.update(hostname)</span><br><span class="line">    dic.update(osver)</span><br><span class="line">    dic.update(cpu)</span><br><span class="line">    dic.update(mem)</span><br><span class="line"></span><br><span class="line">    <span class="string">''' 将获取到的所有数据信息并按简单格式对齐显示 '''</span></span><br><span class="line">    <span class="keyword">for</span> k,v <span class="keyword">in</span> dic.items():</span><br><span class="line">        <span class="keyword">print</span> <span class="string">'%-10s:%s'</span> % (k, v)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>实验测试结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">product   :VMware Virtual Platform</span><br><span class="line">osver     :CentOS release <span class="number">6.4</span> (Final)</span><br><span class="line">sn        :VMware<span class="number">-56</span> <span class="number">4</span>d b4 <span class="number">6</span>c <span class="number">05</span> e5 <span class="number">20</span> dc-c6 <span class="number">49</span> <span class="number">0</span>c e1 e0 <span class="number">18</span> <span class="number">1</span>c <span class="number">75</span></span><br><span class="line">Memory    :<span class="number">1870</span> MB</span><br><span class="line">cpu_num   :<span class="number">2</span></span><br><span class="line">ip        :<span class="number">192.168</span><span class="number">.0</span><span class="number">.8</span></span><br><span class="line">vender    :VMware, Inc.</span><br><span class="line">hostname  :vip</span><br><span class="line">cpu_model :Intel(R) i7<span class="number">-4710</span>MQ <span class="number">2.50</span>GHz</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/李伟" class="author" itemprop="url" rel="index">李伟</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-12-19 21:22:20" itemprop="dateCreated datePublished" datetime="2016-12-19T21:22:20+08:00">2016-12-19</time>
                </span>
                <span id="/3801.html" class="post-meta-item leancloud_visitors" data-flag-title="使用Python收集获取Linux系统主机信息" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>3.1k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>3 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3472.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3472.html" class="post-title-link" itemprop="url">小白进阶之Scrapy第一篇</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>这博文写得我懒癌犯了，最后的那个章节内容排序，我没有实验是否是正确的，不过这只是个教大家用Scrapy的教程，正确与否并不重要··· 如果不正确，记得留言；等我懒癌过了，我再改改······ 还有其它的问题也是一样··· ，把问题留言下； 等我懒癌过了·· 我改回来！嗯！是等我懒癌结束了，再改。 前面几篇博文，给大家从头到尾做了一个比较高效的爬虫，从这篇起来说说Python的爬虫框架Scrapy； 至于为什么要说框架呢？因为啊，框架可以帮我们处理一部分事情，比如下载模块不用我们自己写了，我们只需专注于提取数据就好了； 最重要的一点啊！框架使用了异步的模式;可以加快我们的下载速度，而不用自己去实现异步框架；毕竟实现异步爬虫是一件比较麻烦的事情。 不过啊！反爬虫这个坎还是要我们自己迈过去啊！这是后话，以后再说。我们先来让Scrapy能跑起来，并提取出我们需要的数据，再解决其它问题。 官方文档在这儿：点我 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/9555112.jpg" alt="9555112"></a> <strong>环境搭建：</strong> 关于这一点，对在Windows环境下使用的小伙伴来说，请务必使用我之前提到的 Anaconda 这个Python的发行版本，不然光环境的各种报错就能消磨掉你所有的学习兴趣！ <strong>下载地址在这儿：<a href="http://pan.baidu.com/s/1pLgySav" target="_blank" rel="noopener">http://pan.baidu.com/s/1pLgySav</a></strong> 安装完成之后，在cmd中执行：conda install Scrapy (如果需要使用特定版本，请在Scrapy后面加上 ==XXXX XXXX代表你需要的版本号) 下面是安装示意图： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/安装Scrapy.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/安装Scrapy.gif" alt="安装Scrapy"></a> So Easy@@！环境搭建完成！是不是超简单？全程无痛啊！ 下面开始踏上新的征程！Go Go Go！！ 使用Scrapy第一步：创建项目；CMD进入你需要放置项目的目录 输入：</p>
                  <figure class="highlight mipsasm">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">scrapy </span>startproject XXXXX             XXXXX代表你项目的名字</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p><a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/创建项目.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/创建项目.gif" alt="创建项目"></a> OK项目创建完成。现在可以开始我们的爬取之旅了！ 下面是目录中各个文件的作用 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/各个文件的作用.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/各个文件的作用.png" alt="各个文件的作用"></a> 好了，目录我们认识完了，在开始之前给大家一个小技巧，Scrapy默认是不能在IDE中调试的，我们在根目录中新建一个py文件叫：entrypoint.py；在里面写入以下内容：</p>
                  <figure class="highlight smali">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from scrapy.cmdline import execute</span><br><span class="line">execute(['scrapy', 'crawl', 'dingdian'])</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p><strong>注意！第二行中代码中的前两个参数是不变的，第三个参数请使用自己的spider的名字。稍后我会讲到！！</strong> 现在整个目录看起来是这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/快捷启动.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/快捷启动.png" alt="快捷启动"></a> 基础工作准备完毕！我们来说说基本思路。 上面的准备工作完成之后，我们先不要着急开始工作，毕竟作为一个框架，还是很复杂的；贸然上手 开整，很容易陷入懵逼状态啊！一团浆糊，理不清思路，后面的事情做起来很很麻烦啦！ 我们来看看下面这张图： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy_architecture.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy_architecture.png" alt="scrapy_architecture"></a> 这就是整个Scrapy的架构图了； <strong>Scrapy Engine: 这是引擎，负责Spiders、ItemPipeline、Downloader、Scheduler中间的通讯，信号、数据传递等等！（像不像人的身体？）</strong> <strong>Scheduler(调度器): 它负责接受引擎发送过来的requests请求，并按照一定的方式进行整理排列，入队、并等待Scrapy Engine(引擎)来请求时，交给引擎。</strong> <strong>Downloader（下载器）：负责下载Scrapy Engine(引擎)发送的所有Requests请求，并将其获取到的Responses交还给Scrapy Engine(引擎)，由引擎交给Spiders来处理，</strong> <strong>Spiders：它负责处理所有Responses,从中分析提取数据，获取Item字段需要的数据，并将需要跟进的URL提交给引擎，再次进入Scheduler(调度器)，</strong> <strong>Item Pipeline：它负责处理Spiders中获取到的Item，并进行处理，比如去重，持久化存储（存数据库，写入文件，总之就是保存数据用的）</strong> <strong>Downloader Middlewares（下载中间件）：你可以当作是一个可以自定义扩展下载功能的组件</strong> <strong>Spider Middlewares（Spider中间件）：你可以理解为是一个可以自定扩展和操作引擎和Spiders中间‘通信‘的功能组件（比如进入Spiders的Responses;和从Spiders出去的Requests）</strong> <strong>数据在整个Scrapy的流向：</strong> 程序运行的时候， <strong>引擎：</strong>Hi！Spider, 你要处理哪一个网站？ <strong>Spiders：</strong>我要处理23wx.com <strong>引擎：</strong>你把第一个需要的处理的URL给我吧。 <strong>Spiders：</strong>给你第一个URL是XXXXXXX.com <strong>引擎：</strong>Hi！调度器，我这有request你帮我排序入队一下。 <strong>调度器：</strong>好的，正在处理你等一下。 <strong>引擎：</strong>Hi！调度器，把你处理好的request给我， <strong>调度器：</strong>给你，这是我处理好的request <strong>引擎：</strong>Hi！下载器，你按照下载中间件的设置帮我下载一下这个request <strong>下载器：</strong>好的！给你，这是下载好的东西。（如果失败：不好意思，这个request下载失败，然后<strong>引擎</strong>告诉<strong>调度器</strong>，这个request下载失败了，你记录一下，我们待会儿再下载。） <strong>引擎：</strong>Hi！Spiders，这是下载好的东西，并且已经按照Spider中间件处理过了，你处理一下（<strong>注意！这儿responses默认是交给def parse这个函数处理的</strong>） <strong>Spiders：（处理完毕数据之后对于需要跟进的URL）</strong>，Hi！<strong>引擎</strong>，这是我需要跟进的URL，将它的responses交给函数 def xxxx(self, responses)处理。还有这是我获取到的Item。 <strong>引擎</strong>：Hi ！<strong>Item Pipeline</strong> 我这儿有个item你帮我处理一下！<strong>调度器！</strong>这是我需要的URL你帮我处理下。然后从第四步开始循环，直到获取到你需要的信息， 注意！只有当调度器中不存在任何request了，整个程序才会停止，（也就是说，对于下载失败的ＵＲＬ，Scrapy会重新下载。） 以上就是Scrapy整个流程了。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" alt="QQ图片20161022193315"></a> 大家将就着看看。 建立一个项目之后： <strong>第一件事情</strong>是在items.py文件中定义一些字段，这些字段用来临时存储你需要保存的数据。方便后面保存数据到其他地方，比如数据库 或者 本地文本之类的。 <strong>第二件事情</strong>在spiders文件夹中编写自己的爬虫 <strong>第三件事情</strong>在pipelines.py中存储自己的数据 <strong>还有一件事情</strong>，不是非做不可的，就settings.py文件 并不是一定要编辑的，只有有需要的时候才会编辑。 <strong>建议一点：在大家调试的时候建议大家在settings.py中取消下面几行的注释：</strong> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/设置setting01.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/设置setting01.png" alt="设置setting01"></a> 这几行注释的作用是，Scrapy会缓存你有的Requests!当你再次请求时，如果存在缓存文档则返回缓存文档，而不是去网站请求，这样既加快了本地调试速度，也减轻了 网站的压力。一举多得 <strong>第一步定义字段：</strong> 好了，我们来做 第一步 定义一些字段；那具体我们要定义那些字段呢？ 这个根据自己需要的提取的内容来定义。 比如：我们爬取小说站点都需要提取些什么数据啊？ 小说名字、作者、小说地址、连载状态、连载字数、文章类别 就像下面这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy01.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy01.png" alt="Scrapy01"></a> 这样我们第一步就完成啦！是不是So Easy？ヾ(<em>´▽‘</em>)ﾉ ； 下面开始重点了哦！编写spider（就是我们用来提取数据的爬虫了） <strong>第二步编写Spider：</strong> 在spiders文件中新建一个dingdian.py文件 并导入我们需用的模块 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy02.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy02.png" alt="Scrapy02"></a> <strong>PS：Scrapy中Response可以直接使用Xpath来解析数据；不过大家也可以使用自己习惯的包，比如我导入的BS4 、re ；当然也可以使其他比如pyquery之类的。这个并没有什么限制</strong> <strong>另外或许个别小伙伴会遇到 from dingdian.items import DingdianItem这个导入失败的情况；可以试试把项目文件移动到根目录。</strong> <strong>Request这个模块可以用来重写单独请求一个URL，用于我们后面跟进URL。</strong> 好了开整；首先我们需要什么？ 我们需要从一个地址入手开始爬取，我在顶点小说上没有发现有全站小说地址，但是我找到每个分类地址全部小说： 玄幻魔幻：<a href="http://www.23wx.com/class/1_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/1_1.html</a> 武侠修真：<a href="http://www.23wx.com/class/2_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/2_1.html</a> 都市言情：<a href="http://www.23wx.com/class/3_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/3_1.html</a> 历史军事：<a href="http://www.23wx.com/class/4_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/4_1.html</a> 侦探推理：<a href="http://www.23wx.com/class/5_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/5_1.html</a> 网游动漫：<a href="http://www.23wx.com/class/6_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/6_1.html</a> 科幻小说：<a href="http://www.23wx.com/class/7_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/7_1.html</a> 恐怖灵异：<a href="http://www.23wx.com/class/8_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/8_1.html</a> 散文诗词：<a href="http://www.23wx.com/class/9_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/9_1.html</a> 其他：<a href="http://www.23wx.com/class/10_1.html" target="_blank" rel="noopener">http://www.23wx.com/class/10_1.html</a> 全本：<a href="http://www.23wx.com/quanben/1" target="_blank" rel="noopener">http://www.23wx.com/quanben/1</a> 好啦！入口地址我们找到了，现在开始写第一部分代码： 当然对于上面的地址，我们是可以直接全使用Start_urls这种列表全部请求，不过并不太美观，我需要把其中，有规律的部分，单独其他方式实现，比如字典之类的： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy22.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy22.png" alt="Scrapy22"></a> 第十行：首先我们创建一个类 Myspider；这个类继承自scrapy.Spider（当然还有一些其他父类，继承各个父类后能实现的功能不一样）； 第十二行：我们定义name：dingdian （请注意，这name就是我们在entrypoint.py文件中的第三个参数！）！！！！<strong>请务必注意：此Name的！名字！在整个项目中有且只能有一个、名字不可重复！！！！</strong> 第十一行：我们定义了一个allowed_domains；这个不是必须的；但是在某写情况下需要用得到，比如使用爬取规则的时候就需要了；它的作用是只会跟进存在于allowed_domains中的URL。不存在的URL会被忽略。 第十七行到第十九行：我们使用字符串拼接的方式实现了我们上面发现的全部URL。 第二十行和二十一行：我们使用了导入的Request包，来跟进我们的URL（并将返回的response作为参数传递给self.parse, 嗯！这个叫<strong>回调函数</strong>！） 第二十三行：使用parse函数接受上面request获取到的response。（请务必注意：不要轻易改写parse函数（意思就是不要把parse函数用作它用）；因为这样request的回调函数被你用了，就没谁接受request返回的response啦！如果你非要用作它用，则需要自己给request一个回调函数哦！）</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">import</span> re</span><br><span class="line"><span class="keyword">import</span> scrapy <span class="comment">#导入scrapy包</span></span><br><span class="line"><span class="keyword">from</span> bs4 <span class="keyword">import</span> BeautifulSoup</span><br><span class="line"><span class="keyword">from</span> scrapy.http <span class="keyword">import</span> Request <span class="comment">##一个单独的request的模块，需要跟进URL的时候，需要用它</span></span><br><span class="line"><span class="keyword">from</span> dingdian.items <span class="keyword">import</span> DingdianItem <span class="comment">##这是我定义的需要保存的字段，（导入dingdian项目中，items文件中的DingdianItem类）</span></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Myspider</span><span class="params">(scrapy.Spider)</span>:</span></span><br><span class="line"></span><br><span class="line">    name = <span class="string">'dingdian'</span></span><br><span class="line">    allowed_domains = [<span class="string">'23wx.com'</span>]</span><br><span class="line">    bash_url = <span class="string">'http://www.23wx.com/class/'</span></span><br><span class="line">    bashurl = <span class="string">'.html'</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">start_requests</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">1</span>, <span class="number">11</span>):</span><br><span class="line">            url = self.bash_url + str(i) + <span class="string">'_1'</span> + self.bashurl</span><br><span class="line">            <span class="keyword">yield</span> Request(url, self.parse)</span><br><span class="line">        <span class="keyword">yield</span> Request(<span class="string">'http://www.23wx.com/quanben/1'</span>, self.parse)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">parse</span><span class="params">(self, response)</span>:</span></span><br><span class="line">        print(response.text)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>我们测试一下是否正常工作：在IDE中运行我们之前创建的entrypoint.py文件（如果没有这个文件是不能在IDE中运行的哦！ヽ(=^･ω･^=)丿） 然后会像这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/Spider编写03.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/Spider编写03.png" alt="Spider编写03"></a> 你会发现在红色状态报告之后，所有页面几乎是一瞬间出现的；那是因为Scrapy使用了异步啦！ヽ(°◇° )ノ 另外因为Scrapy遵循了robots规则，如果你想要获取的页面在robots中被禁止了，Scrapy是会忽略掉的哦！！ヾ(。￣□￣)ﾂ゜゜゜ 请求就这么轻而易举的实现了啊！简直So Easy！ 继续 继续！ 我们需要历遍所有页面才能取得所有的小说页面连接： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/分析网页2.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/分析网页2.png" alt="分析网页2"></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/分析网页01.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/分析网页01.png" alt="分析网页01"></a> 每个页面的这个位置都是最后一个页面，我们提取出它，历遍就可以拼接出一个完整的URL了ヾ§ ￣▽)ゞ2333333 Go Go <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy20.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy20.png" alt="Scrapy20"></a> 第二十三行：def parse(self, response)这个函数接受来在二十一行返回的response，并处理。 第二十四行：我们使用BS4从response中获取到了最大页码。 第二十五行至二十七行：我们照例拼接了一个完整的URL（response.url:就是这个response的URL地址） 第二十八行：功能和第二十行一样，callback= 是指定回调函数，不过不写callback=也没有什么影响！ 注意我只是说的callback=这个几个；不是后面的self.get_name. 看清楚了response是怎么用的没？ヾ§ ￣▽)ゞ2333333是不是So Easy？ 如果不清楚那个拼接URL的小伙伴可以打印出来，看看哦··· 再去观察一下网页，就很明白啦 上面两个函数就彻底的把整个网站的所有小说的页面URL的提取出来了，并将每个页面的response交给了get_name函数处理哦！ 现在我们的爬虫就开始处理具体的小说了哦： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy07.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy07.png" alt="Scrapy07"></a> 瞅见没 我们需要的东西，快用F12工具看一下吧，在什么位置有什么标签，可以方便我们提取数据。还不知道怎么看的小伙伴，去看看妹子图那个教程，有教哦；实在不行百度一下也行！ 过程忽略了，直接上代码（主要是懒癌来了）： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy09.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy09.png" alt="Scrapy09"></a> 前面三行不说了， 第三十七和三十八行：是我们的小说名字和URL 第三十九行和第四十行；大伙儿可能会发现，多了个一个meta这么一个字典，这是Scrapy中传递额外数据的方法。因我们还有一些其他内容需要在下一个页面中才能获取到。 下面我的爬虫进入了这个页面： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy10.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy10.png" alt="Scrapy10"></a> 这个页面就有很多我们需要的信息了：废话不说了代码上来： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy11.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy11.png" alt="Scrapy11"></a> 第四十行：将我们导入的item文件进行实例化，用来存储我们的数据。 后面全部：将需要的数据，复制给item[key] (注意这儿的Key就是我们前面在item文件中定义的那些字段。) 注意！response.meta[key]：这个是提取从上一个函数传递下来的值。 return item 就是返回我们的字典了，然后Pipelines就可以开始对这些数据进行处理了。比如 存储之类的。 好啦，Spiders我们先编写到这个地方。（是不是有小伙伴发现我还有几个字段没有取值？当然留着你们自己试试了，哈哈哈ヽ(=^･ω･^=)丿）后面再继续。 我现在教教大家怎么处理这些数据：对头就是说说Pipeline了： 对于基本的Pipeline存储方式，网上有很多教程了，今天我们做一个自定义的MySQL的Pipeline： 首先为了能好区分框架自带的Pipeline，我们把MySQL的Pipeline单独放到一个目录里面。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy12.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy12.png" alt="Scrapy12"></a> 我们在项目中新建了一个mysqlpipelines的文件夹，我们所有的MySQL文件都放在这个目录。 <strong>init</strong>.py 这个文件不需要我说了吧，不知道做啥的小哥儿自己百度。 pipelines.py 这个是我们写存放数据的文件 sql.py 看名字就知道，需要的sql语句。 首先是需要的MySQL表，（MySQL都没有的小哥儿 自己百度装一个啊，我就不教了）</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">DROP TABLE <span class="keyword">IF</span> EXISTS `dd_name`;</span><br><span class="line">CREATE TABLE `dd_name` (</span><br><span class="line">  `id` int(11) <span class="keyword">NOT</span> <span class="literal">NULL</span> AUTO_INCREMENT,</span><br><span class="line">  `xs_name` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `xs_author` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `category` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `name_id` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  PRIMARY KEY (`id`)</span><br><span class="line">) <span class="attribute">ENGINE</span>=InnoDB <span class="attribute">AUTO_INCREMENT</span>=38<span class="built_in"> DEFAULT </span><span class="attribute">CHARSET</span>=utf8mb4;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>首先我们再settings.py文件中定义好MySQL的配置文件（当然你也可以直接定义在sql.py文件中）： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/MySQL-setting.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/MySQL-setting.png" alt="MySQL setting"></a> <strong>PS :注意MySQL的默认端口是 3306；我自己的MySQL改成了3389。这儿各位酌情自己更改。</strong> 在开始写sql.py之前，我们需要安装一个Python操作MySQL的包，来自MySQL官方的一个包：<a href="http://cdn.mysql.com//Downloads/Connector-Python/mysql-connector-python-2.1.4.zip" target="_blank" rel="noopener">点我下载</a> 下载完成后解压出来，从CMD进入该目录的绝对路径，然后 Python setup.py install ；即可完成安装 下面是我们的sql.py文件： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/sql01.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/sql01.png" alt="sql01"></a> 第一行至第二行分别导入了：我的MySQL操作包，settings配置文件 第四行至第八行 ： 从settings配置文件中获取到了，我们的MySQL配置文件 第十行至第十一行： 初始化了一个MySQL的操作游标 第十三行： 定义了一个Sql的类 第十六行至第二十五行：定义了一个函数，将函数中的四个变量写入数据库（这四个变量就是我们后面传进来的需要存储的数据。） <strong>关于@classmethod这个是一个修饰符；作用是我们不需要初始化类就可以直接调用类中的函数使用（具体说起来麻烦，知道作用就好啦）</strong> 好了第一部分写完了，我们还需要一个能够去重的： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/sql01-1.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/sql01-1.png" alt="sql01"></a> 这一段代码会查找name_id这个字段，如果存在则会返回 1 不存在则会返回0 Nice！sqi.py这一部分我们完成，来开始写pipeline吧： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/pipeline02.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/pipeline02.png" alt="pipeline02"></a> 第一行至第二行：我们导入了之前编写的sql.py中的Sql类，和我们建立的item 第六行：建立了一个DingdianPipeline的类（别忘了一定要继承object这个类啊，这是做啥的不用多了解，说多了你们头晕，我也懒） 第八行：我们定义了一个process_item函数并有，item和spider这两个参数（请注意啊！这两玩意儿 务必！！！要加上！！千万不能少！！！！务必！！！务必！！！） 第十行：你这样理解如果在 item中存在DingdianItem；就执行下面的。 第十一行：从item中取出 name_id的值。 第十二行：调用Sql中的select_name函数获得返回值 第十三行：判断ret是否等于1 ，是的话证明已经存了 第二十行：调用Sql中的 insert_dd_name函数，存储上面几个值。 搞完！下面我们启用这个Pipeline在settings中作如下设置： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/setting02.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/setting02.png" alt="setting02"></a> <strong>PS: dingdian（项目目录）.mysqlpipelines（自己建立的MySQL目录）.pipelines（自己建立的pipelines文件）.DingdianPipeline（其中定义的类） 后面的 1 是优先级程度（1-1000随意设置，数值越低，组件的优先级越高）</strong> 好！我们来运行一下试试！！Go Go Go！ <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy15.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy15.png" alt="scrapy15"></a> Nice!!完美！！我之前运行过了 所以提示已经存在。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy17.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy17.png" alt="scrapy17"></a> 下面我们开始还剩下的一些内容获取：小说章节 和章节内容 首先我们在item中新定义一些需要获取内容的字段： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy16.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy16.png" alt="scrapy16"></a> 代码不解释了哦！（懒癌来了，写不下去了） 继续编写Spider文件： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy18.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy18.png" alt="scrapy18"></a> 请注意我图中画红框的的地方，这个地方返回item是不能用return的哦！用了就结束了，程序就不会继续下去了，得用yield（你知道就行，这玩意儿说起来麻烦。） 第五十八行： num这个变量的作用是 因为Scrapy是异步的方式运作，你采集到的章节顺序都是混乱的，需要给它有序的序列，我们按照这个排序就能得到正确的章节顺序啦 请注意在顶部导入定义的第二个item类！ 下面我们来写存储这部分spider的Pipeline： 数据表：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">DROP TABLE <span class="keyword">IF</span> EXISTS `dd_chaptername`;</span><br><span class="line">CREATE TABLE `dd_chaptername` (</span><br><span class="line">  `id` int(11) <span class="keyword">NOT</span> <span class="literal">NULL</span> AUTO_INCREMENT,</span><br><span class="line">  `xs_chaptername` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `xs_content` text,</span><br><span class="line">  `id_name` int(11)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `num_id` int(11)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  `url` varchar(255)<span class="built_in"> DEFAULT </span><span class="literal">NULL</span>,</span><br><span class="line">  PRIMARY KEY (`id`)</span><br><span class="line">) <span class="attribute">ENGINE</span>=InnoDB <span class="attribute">AUTO_INCREMENT</span>=2726<span class="built_in"> DEFAULT </span><span class="attribute">CHARSET</span>=gb18030;</span><br><span class="line"><span class="builtin-name">SET</span> <span class="attribute">FOREIGN_KEY_CHECKS</span>=1;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p> Sql.py: <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy13.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy13.png" alt="Scrapy13"></a> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy14.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy14.png" alt="Scrapy14"></a> 不解释了哦！ 下面是Pipeline： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy21.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/scrapy21.png" alt="scrapy21"></a> 有小伙伴注意，这儿比上面一个Pipeline少一个判断，因为我把判断移动到Spider中去了，这样就可以减少一次Request，减轻服务器压力。 改变后的Spider长这样： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy16.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/12/Scrapy16.png" alt="Scrapy16"></a> 别忘了在spider中导入Sql哦！ヾ(。￣□￣)ﾂ゜゜゜ 到此收工！！！！ 至于小说图片，因为Scrapy的图片下载管道，是自动以md5命名，而且感觉不爽··· 后面单独写一个异步下载的脚本··· <a href="https://github.com/thsheep/dingdian" target="_blank" rel="noopener">https://github.com/thsheep/dingdian</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-12-07 16:44:48" itemprop="dateCreated datePublished" datetime="2016-12-07T16:44:48+08:00">2016-12-07</time>
                </span>
                <span id="/3472.html" class="post-meta-item leancloud_visitors" data-flag-title="小白进阶之Scrapy第一篇" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>8.7k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>8 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3494.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> PHP <i class="label-arrow"></i>
                  </a>
                  <a href="/3494.html" class="post-title-link" itemprop="url">Composer进阶使用之常用命令和版本约束</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>这篇文章主要介绍一些常用的包管理命令以及包的版本如何进行约束。</p>
                  <h2 id="常用命令"><a href="#常用命令" class="headerlink" title="常用命令"></a>常用命令</h2>
                  <h3 id="require命令"><a href="#require命令" class="headerlink" title="require命令"></a>require命令</h3>
                  <p>在《Composer快速入门》中已经简单介绍过使用<code>install</code>命令安装依赖的方式。除了<code>install</code>命令，我们还可以使用<code>require</code>命令快速的安装一个依赖而不需要手动在<code>composer.json</code>里添加依赖信息：</p>
                  <figure class="highlight sql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">$ composer require monolog/monolog</span><br><span class="line">Using version ^1.19 for monolog/monolog</span><br><span class="line">./composer.json has been updated</span><br><span class="line">Loading composer repositories <span class="keyword">with</span> <span class="keyword">package</span> information</span><br><span class="line">Updating dependencies (<span class="keyword">including</span> require-dev)</span><br><span class="line">  - Installing psr/<span class="keyword">log</span> (<span class="number">1.0</span><span class="number">.0</span>)</span><br><span class="line">    Downloading: <span class="number">100</span>%         </span><br><span class="line"></span><br><span class="line">  - Installing monolog/monolog (<span class="number">1.19</span><span class="number">.0</span>)</span><br><span class="line">    Downloading: <span class="number">100</span>%         </span><br><span class="line"></span><br><span class="line">monolog/monolog suggests installing graylog2/gelf-php (<span class="keyword">Allow</span> sending <span class="keyword">log</span> messages <span class="keyword">to</span> a GrayLog2 <span class="keyword">server</span>)</span><br><span class="line">......</span><br><span class="line">monolog/monolog suggests installing php-console/php-console (<span class="keyword">Allow</span> sending <span class="keyword">log</span> messages <span class="keyword">to</span> Google Chrome)</span><br><span class="line">Writing <span class="keyword">lock</span> <span class="keyword">file</span></span><br><span class="line">Generating autoload files</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>Composer会先找到合适的版本，然后更新<code>composer.json</code>文件，在<code>require</code>那添加<code>monolog/monolog</code>包的相关信息，再把相关的依赖下载下来进行安装，最后更新<code>composer.lock</code>文件并生成php的自动加载文件。</p>
                  <h3 id="update命令"><a href="#update命令" class="headerlink" title="update命令"></a>update命令</h3>
                  <p>通过<code>update</code>命令，可以更新项目里所有的包，或者指定的某些包。</p>
                  <figure class="highlight elixir">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># 更新所有依赖</span></span><br><span class="line"><span class="variable">$ </span>composer update</span><br><span class="line"></span><br><span class="line"><span class="comment"># 更新指定的包</span></span><br><span class="line"><span class="variable">$ </span>composer update monolog/monolog</span><br><span class="line"></span><br><span class="line"><span class="comment"># 更新指定的多个包</span></span><br><span class="line"><span class="variable">$ </span>composer update monolog/monolog symfony/dependency-injection</span><br><span class="line"></span><br><span class="line"><span class="comment"># 还可以通过通配符匹配包</span></span><br><span class="line"><span class="variable">$ </span>composer update monolog/monolog symfony/*</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>需要注意的时，包能升级的版本会受到版本约束的约束，包不会升级到超出约束的版本的范围。例如如果<code>composer.json</code>里包的版本约束为<code>^1.10</code>，而最新版本为2.0。那么<code>update</code>命令是不能把包升级到2.0版本的，只能最高升级到1.x版本。关于版本约束请看后面的介绍。</p>
                  <h3 id="remove命令"><a href="#remove命令" class="headerlink" title="remove命令"></a>remove命令</h3>
                  <p>使用remove命令可以移除一个包及其依赖（在依赖没有被其他包使用的情况下）：</p>
                  <figure class="highlight sql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">$ composer remove monolog/monolog</span><br><span class="line">Loading composer repositories <span class="keyword">with</span> <span class="keyword">package</span> information</span><br><span class="line">Updating dependencies (<span class="keyword">including</span> require-dev)</span><br><span class="line">  - Removing monolog/monolog (<span class="number">1.19</span><span class="number">.0</span>)</span><br><span class="line">  - Removing psr/<span class="keyword">log</span> (<span class="number">1.0</span><span class="number">.0</span>)</span><br><span class="line">Writing <span class="keyword">lock</span> <span class="keyword">file</span></span><br><span class="line">Generating autoload files</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="search命令"><a href="#search命令" class="headerlink" title="search命令"></a>search命令</h3>
                  <p>使用<code>search</code>命令可以进行包的搜索：</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">$ composer <span class="built_in">search</span> monolog</span><br><span class="line">monolog/monolog Sends your logs <span class="keyword">to</span> <span class="keyword">files</span>, sockets, inboxes, databases <span class="built_in">and</span> various web services</span><br><span class="line"></span><br><span class="line"># 如果只是想匹配名称可以使用--<span class="keyword">only</span>-name选项</span><br><span class="line">$ composer <span class="built_in">search</span> --<span class="keyword">only</span>-name monolog</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="show命令"><a href="#show命令" class="headerlink" title="show命令"></a>show命令</h3>
                  <p>使用<code>show</code>命令可以列出项目目前所安装的包的信息：</p>
                  <figure class="highlight elixir">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="comment"># 列出所有已经安装的包</span></span><br><span class="line"><span class="variable">$ </span>composer show</span><br><span class="line"></span><br><span class="line"><span class="comment"># 可以通过通配符进行筛选</span></span><br><span class="line"><span class="variable">$ </span>composer show monolog/*</span><br><span class="line"></span><br><span class="line"><span class="comment"># 显示具体某个包的信息</span></span><br><span class="line"><span class="variable">$ </span>composer show monolog/monolog</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>以上是常用命令的介绍。</p>
                  <h2 id="版本约束"><a href="#版本约束" class="headerlink" title="版本约束"></a>版本约束</h2>
                  <p>前面说到，我们可以指定要下载的包的版本。例如我们想要下载版本1.19的monolog。我们可以通过<code>composer.json</code>文件：</p>
                  <figure class="highlight json">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">&#123;</span><br><span class="line">    <span class="attr">"require"</span>: &#123;</span><br><span class="line">        <span class="attr">"monolog/monolog"</span>: <span class="string">"1.19"</span></span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>然后运行<code>install</code>命令，或者通过<code>require</code>命令达到目的：</p>
                  <figure class="highlight elixir">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="variable">$ </span>composer <span class="keyword">require</span> monolog/<span class="symbol">monolog:</span><span class="number">1.19</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 或者</span></span><br><span class="line"><span class="variable">$ </span>composer <span class="keyword">require</span> monolog/monolog=<span class="number">1.19</span></span><br><span class="line"></span><br><span class="line"><span class="comment"># 或者</span></span><br><span class="line"><span class="variable">$composer</span> <span class="keyword">require</span> monolog/monolog <span class="number">1.19</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>除了像上面那样指定具体的版本，我们还可以通过不同的约束方式去指定版本。</p>
                  <h3 id="基本约束"><a href="#基本约束" class="headerlink" title="基本约束"></a>基本约束</h3>
                  <h4 id="精确版本"><a href="#精确版本" class="headerlink" title="精确版本"></a>精确版本</h4>
                  <p>可以指定具体的版本，告诉Composer只能安装这个版本。但是如果其他的依赖需要用到其他的版本，则包的安装或者更新最后会失败并终止。 例子：<code>1.0.2</code></p>
                  <h4 id="范围"><a href="#范围" class="headerlink" title="范围"></a>范围</h4>
                  <p>使用比较操作符你可以指定包的范围。这些操作符包括：<code>\&gt;</code>，<code>\&gt;=</code>，<code>&lt;</code>，<code>&lt;=</code>，<code>!=</code>。 你可以定义多个范围，使用空格 或者逗号<code>,</code>表示逻辑上的与，使用双竖线<code>||</code>表示逻辑上的或。其中与的优先级会大于或。</p>
                  <blockquote>
                    <p>需要注意的是，使用没有边界的范围有可能会导致安装不可预知的版本，并破坏向下的兼容性。建议使用折音号操作符。</p>
                  </blockquote>
                  <p>例子：</p>
                  <ul>
                    <li><code>\&gt;=1.0</code></li>
                    <li><code>\&gt;=1.0 &lt;2.0</code></li>
                    <li><code>\&gt;=1.0 &lt;1.1 || &gt;=1.2</code></li>
                  </ul>
                  <h4 id="范围（使用连字符）"><a href="#范围（使用连字符）" class="headerlink" title="范围（使用连字符）"></a>范围（使用连字符）</h4>
                  <p>带连字符的范围表明了包含的版本范围，意味着肯定是有边界的。其中连字符的左边表明了<code>\&gt;=</code>的版本，而连字符的右边情况则稍微有点复杂。如果右边的版本不是完整的版本号，则会被使用通配符进行补全。例如<code>1.0 - 2.0</code>等同于<code>\&gt;=1.0.0 &lt;2.1</code>（<code>2.0</code>相当于<code>2.0.*</code>），而<code>1.0.0 - 2.1.0</code>则等同于<code>\&gt;=1.0.0 &lt;=2.1.0</code>。 例子：<code>1.0 - 2.0</code></p>
                  <h4 id="通配符"><a href="#通配符" class="headerlink" title="通配符"></a>通配符</h4>
                  <p>可以使用通配符去定义版本。<code>1.0.*</code>相当于<code>\&gt;=1.0 &lt;1.1</code>。 例子：<code>1.0.*</code></p>
                  <h3 id="下一个重要版本操作符"><a href="#下一个重要版本操作符" class="headerlink" title="下一个重要版本操作符"></a>下一个重要版本操作符</h3>
                  <h4 id="波浪号"><a href="#波浪号" class="headerlink" title="波浪号~"></a>波浪号<code>~</code></h4>
                  <p>我们先通过后面这个例子去解释<code>~</code>操作符的用法：<code>~1.2</code>相当于<code>\&gt;=1.2 &lt;2.0.0</code>，而<code>~1.2.3</code>相当于<code>\&gt;=1.2.3 &lt;1.3.0</code>。对于使用<a href="http://semver.org/" target="_blank" rel="noopener">Semantic Versioning</a>作为版本号标准的项目来说，这种版本约束方式很实用。例如<code>~1.2</code>定义了最小的小版本号，然后你可以升级2.0以下的任何版本而不会出问题，因为按照<a href="http://semver.org/" target="_blank" rel="noopener">Semantic Versioning</a>的版本定义，小版本的升级不应该有兼容性的问题。简单来说，<code>~</code>定义了最小的版本，并且允许版本的最后一位版本号进行升级（没懂得话，请再看一边前面的例子）。 例子：<code>~1.2</code></p>
                  <blockquote>
                    <p>需要注意的是，如果<code>~</code>作用在主版本号上，例如<code>~1</code>，按照上面的说法，Composer可以安装版本1以后的主版本，但是事实上是<code>~1</code>会被当作<code>~1.0</code>对待，只能增加小版本，不能增加主版本。</p>
                  </blockquote>
                  <h4 id="折音号"><a href="#折音号" class="headerlink" title="折音号^"></a>折音号<code>^</code></h4>
                  <p><code>^</code>操作符的行为跟<a href="http://semver.org/" target="_blank" rel="noopener">Semantic Versioning</a>有比较大的关联，它允许升级版本到安全的版本。例如，<code>^1.2.3</code>相当于<code>\&gt;=1.2.3 &lt;2.0.0</code>，因为在2.0版本前的版本应该都没有兼容性的问题。而对于1.0之前的版本，这种约束方式也考虑到了安全问题，例如<code>^0.3</code>会被当作<code>\&gt;=0.3.0 &lt;0.4.0</code>对待。 例子：<code>^1.2.3</code></p>
                  <h3 id="版本稳定性"><a href="#版本稳定性" class="headerlink" title="版本稳定性"></a>版本稳定性</h3>
                  <p>如果你没有显式的指定版本的稳定性，Composer会根据使用的操作符，默认在内部指定为<code>\-dev</code>或者<code>\-stable</code>。例如：</p>
                  <p>约束</p>
                  <p>内部约束</p>
                  <p>1.2.3</p>
                  <p>\=1.2.3.0-stable</p>
                  <p>>1.2</p>
                  <p>>1.2.0.0-stable</p>
                  <p>>=1.2</p>
                  <p>>=1.2.0.0-dev</p>
                  <p>>=1.2-stable</p>
                  <p>>=1.2.0.0-stable</p>
                  <p>&lt;1.3</p>
                  <p>&lt;1.3.0.0-dev</p>
                  <p>&lt;=1.3</p>
                  <p>&lt;=1.3.0.0-stable</p>
                  <p>1 - 2</p>
                  <p>>=1.0.0.0-dev &lt;3.0.0.0-dev</p>
                  <p>~1.3</p>
                  <p>>=1.3.0.0-dev &lt;2.0.0.0-dev</p>
                  <p>1.4.*</p>
                  <p>>=1.4.0.0-dev &lt;1.5.0.0-dev</p>
                  <p>如果你想指定版本只要稳定版本，你可以在版本后面添加后缀<code>\-stable</code>。 <code>minimum-stability</code> 配置项定义了包在选择版本时对稳定性的选择的默认行为。默认是<code>stable</code>。它的值如下（按照稳定性排序）：<code>dev</code>，<code>alpha</code>，<code>beta</code>，<code>RC</code>和<code>stable</code>。除了修改这个配置去修改这个默认行为，我们还可以通过<a href="https://getcomposer.org/doc/04-schema.md#package-links" target="_blank" rel="noopener">稳定性标识</a>（例如<code>@stable</code>和<code>@dev</code>）来安装一个相比于默认配置不同稳定性的版本。例如：</p>
                  <figure class="highlight json">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">&#123;</span><br><span class="line">    <span class="attr">"require"</span>: &#123;</span><br><span class="line">        <span class="attr">"monolog/monolog"</span>: <span class="string">"1.0.*@beta"</span>,</span><br><span class="line">        <span class="attr">"acme/foo"</span>: <span class="string">"@dev"</span></span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>以上是版本约束的介绍</p>
                  <h2 id="参考"><a href="#参考" class="headerlink" title="参考"></a>参考</h2>
                  <ul>
                    <li><a href="https://segmentfault.com/a/1190000005898222" target="_blank" rel="noopener">https://segmentfault.com/a/1190000005898222</a></li>
                    <li><a href="https://getcomposer.org/doc/03-cli.md%5B2%5D" target="_blank" rel="noopener">https://getcomposer.org/doc/03-cli.md[2]</a></li>
                    <li><a href="https://getcomposer.org/doc/articles/versions.md%5B3%5D" target="_blank" rel="noopener">https://getcomposer.org/doc/articles/versions.md[3]</a></li>
                    <li><a href="http://semver.org/%5B1%5D" target="_blank" rel="noopener">http://semver.org/[1]</a></li>
                  </ul>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-11-26 18:00:10" itemprop="dateCreated datePublished" datetime="2016-11-26T18:00:10+08:00">2016-11-26</time>
                </span>
                <span id="/3494.html" class="post-meta-item leancloud_visitors" data-flag-title="Composer进阶使用之常用命令和版本约束" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>4k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>4 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3443.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3443.html" class="post-title-link" itemprop="url">Python爬虫进阶七之设置ADSL拨号服务器代理</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <h2 id="提示"><a href="#提示" class="headerlink" title="提示"></a>提示</h2>
                  <p>本教程方法已不是最优，最新解决方案请移步 <a href="http://cuiqingcai.com/4596.html">http://cuiqingcai.com/4596.html</a></p>
                  <h2 id="那夜"><a href="#那夜" class="headerlink" title="那夜"></a>那夜</h2>
                  <p>那是一个寂静的深夜，科比还没起床练球，虽然他真的可能不练了。 我废了好大劲，爬虫终于写好了！BUG也全部调通了！心想，终于可以坐享其成了！ 泡杯茶，安静地坐在椅子上看着屏幕上一行行文字在控制台跳出，一条条数据嗖嗖进入我的数据库，一张张图片悄悄存入我的硬盘。人生没有几个比这更惬意的事情了。 我端起茶杯，抿了一口，静静地回味着茶香。 这时，什么情况！屏幕爆红了！爆红了！一口茶的功夫啊喂！ 怎么回事！咋爬不动了，不动了！我用浏览器点开那一个个报错的链接，浏览器显示</p>
                  <blockquote>
                    <p>您的请求过于频繁，IP已经被暂时封禁，请稍后再试！</p>
                  </blockquote>
                  <p>沃日，我IP被封了？此时此刻，空气凝固了，茶也不再香了，请给我一个爱的抱抱啊。 时候不早了，还是洗洗睡吧。</p>
                  <h2 id="次日"><a href="#次日" class="headerlink" title="次日"></a>次日</h2>
                  <p>那一晚，辗转反侧难以入睡。 怎么办？怎么办？如果是你你该怎么办？ 手动换个IP？得了吧，一会又要封了，还能不能安心睡觉啊？ 找免费代理？可行，不过我之前测过不少免费代理IP，一大半都不好用，而且慢。不过可以一直维护一个代理池，定时更新。 买代理？可以可以，不过优质的代理服务商价格可是不菲的，我买过一些廉价的，比如几块钱套餐一次提取几百IP的，算了还是不说了都是泪。 然而最行之有效的方法是什么？那当然是ADSL拨号！ 这是个啥？且听我慢慢道来。</p>
                  <h2 id="什么是ADSL"><a href="#什么是ADSL" class="headerlink" title="什么是ADSL"></a>什么是ADSL</h2>
                  <p>ADSL （Asymmetric Digital Subscriber Line ，非对称数字用户环路）是一种新的数据传输方式。它因为上行和下行带宽不对称，因此称为非对称数字用户线环路。它采用频分复用技术把普通的电话线分成了电话、上行和下行三个相对独立的信道，从而避免了相互之间的干扰。 他有个独有的特点，每拨一次号，就获取一个新的IP。也就是它的IP是不固定的，不过既然是拨号上网嘛，速度也是有保障的，用它搭建一个代理，那既能保证可用，又能自由控制拨号切换。 如果你是用的ADSL上网方式，那就不用过多设置了，直接自己电脑调用一个拨号命令就好了，自动换IP，分分钟解决封IP的事。 然而，你可能说？我家宽带啊，我连得公司无线啊，我蹭的网上的啊！那咋办？ 这时，你就需要一台VPS拨号主机。</p>
                  <h2 id="购买服务器"><a href="#购买服务器" class="headerlink" title="购买服务器"></a>购买服务器</h2>
                  <p>某度广告做的那么好是吧？一搜一片，这点谷歌可是远远比不上啊。 于是乎，我就搜了搜，键入：拨号服务器，有什么骑士互联啊、无极网络啊、挂机宝啊等等的。我选了个价钱还凑合的，选了个无极网络（这里不是在打广告），80一个月的配置，一天两块钱多点。 2核、512M内存，10M带宽。 <a href="http://www.yunlifang.cn/" target="_blank" rel="noopener">云立方</a> 大家觉得有更便宜的更好用请告诉我呀！ 接下来开始装操作系统，进入后台，有一个自助装系统的页面。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-0.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-0-1024x399.png" alt="QQ20161121-0"></a> 我装的CentOS的，在后面设置代理啊，定时任务啊，远程SSH管理啊之类的比较方便。如果你想用Windows，能配置好代理那也没问题。 有的小伙伴可能会问了，既然它的IP是拨号变化的，你咋用SSH连？其实服务商提供了一个域名，做了动态解析和端口映射，映射到这台主机的22端口就好了，所以不用担心IP变化导致SSH断开的问题。 好了装好了服务器之后，服务商提供了一个ADSL的拨号操作过程，用pppoe命令都可以完成，如果你的是Linux的主机一般都是用这个。然后服务商还会给给你一个拨号账号和密码。 那么接下来就是试下拨号了。 服务商会提供详细的拨号流程说明。 比如无极的是这样的： <a href="http://cloud.871020.com/vpsadm/pppoe.html" target="_blank" rel="noopener">拨号流程</a> 设置好了之后，就有几个关键命令：</p>
                  <figure class="highlight sql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">pppoe-<span class="keyword">start</span> 拨号</span><br><span class="line">pppoe-<span class="keyword">stop</span>  断开拨号</span><br><span class="line">pppoe-<span class="keyword">status</span> 拨号连接状态</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>如果想重新拨号，那就执行stop、start就可以了。 反复执行，然后查看下ip地址，你会发现拨号一次换一个IP，是不是爽翻了！ 好，那接下来就设置代理吧。</p>
                  <h2 id="设置代理服务器"><a href="#设置代理服务器" class="headerlink" title="设置代理服务器"></a>设置代理服务器</h2>
                  <p>之前总是用别人的代理，没自己设置过吧？那么接下来我们就来亲自搭建HTTP代理。 Linux下搭建HTTP代理，推荐Squid和TinyProxy。都非常好配置，你想用哪个都行，且听我慢慢道来。 我的系统是CentOS，以它为例进行说明。</p>
                  <h3 id="Squid"><a href="#Squid" class="headerlink" title="Squid"></a>Squid</h3>
                  <p>首先利用yum安装squid</p>
                  <figure class="highlight cmake">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">yum -y <span class="keyword">install</span> squid</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>设置开机启动</p>
                  <figure class="highlight nginx">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">chkconfig</span> --level <span class="number">35</span> squid <span class="literal">on</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>修改配置文件</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">vi</span> /etc/squid/squid.<span class="keyword">conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>修改如下几个部分：</p>
                  <figure class="highlight yaml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="string">http_access</span> <span class="string">allow</span> <span class="type">!Safe_ports</span>    <span class="comment">#deny改成allow</span></span><br><span class="line"><span class="string">http_access</span> <span class="string">allow</span> <span class="string">CONNECT</span> <span class="type">!SSL_ports</span>  <span class="comment">#deny改成allow</span></span><br><span class="line"><span class="string">http_access</span> <span class="string">allow</span> <span class="string">all</span>  <span class="comment">#deny改成allow</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>其他的不需要过多配置。 启动squid</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">sudo<span class="built_in"> service </span>squid start</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>如此一来配置就完成了。 代理使用的端口是3128</p>
                  <h3 id="TinyProxy"><a href="#TinyProxy" class="headerlink" title="TinyProxy"></a>TinyProxy</h3>
                  <p>首先添加一下镜像源，然后安装</p>
                  <figure class="highlight properties">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">rpm</span> <span class="string">-Uvh http://dl.fedoraproject.org/pub/epel/5/i386/epel-release-5-4.noarch.rpm</span></span><br><span class="line"><span class="attr">yum</span> <span class="string">update</span></span><br><span class="line"><span class="attr">yum</span> <span class="string">install tinyproxy</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>修改配置</p>
                  <figure class="highlight vim">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">vi</span> /etc/tinyproxy/tinyproxy.<span class="keyword">conf</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以修改端口和允许的IP，如果想任意主机都连接那就把Allow这一行注释掉。</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Port <span class="number">8888</span> #预设是<span class="number">8888</span> Port,你可以更改</span><br><span class="line">Allow <span class="number">127.0</span><span class="number">.0</span><span class="number">.1</span> #将<span class="number">127.0</span><span class="number">.0</span><span class="number">.1</span>改成你自己的IP</span><br><span class="line">#例如你的IP 是<span class="number">1.2</span><span class="number">.3</span><span class="number">.4</span>,你改成Allow <span class="number">1.2</span><span class="number">.3</span><span class="number">.4</span>,那只有你才可以连上这个Proxy</span><br><span class="line">#若你想任何IP都可以脸到Proxy在Allow前面打#注释</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>启动TinyProxy</p>
                  <figure class="highlight crmsh">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">service tinyproxy <span class="literal">start</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>好了，两个代理都配置好了。 你想用那个都可以！ 不过你以为这样就完了吗？太天真了，我被困扰了好几天，怎么都连不上，我还在怀疑是不是我哪里设置得不对？各种搜，一直以为是哪里配置有遗漏，后来发现是iptables的锅，万恶的防火墙。踩过的的坑，那就不要让大家踩了，用下面的命令设置下iptables，放行3128和8888端口就好了。</p>
                  <figure class="highlight properties">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">service</span> <span class="string">iptables save</span></span><br><span class="line"><span class="attr">systemctl</span> <span class="string">stop firewalld</span></span><br><span class="line"><span class="attr">systemctl</span> <span class="string">disable  firewalld</span></span><br><span class="line"><span class="attr">systemctl</span> <span class="string">start iptables</span></span><br><span class="line"><span class="attr">systemctl</span> <span class="string">status iptables</span></span><br><span class="line"><span class="attr">systemctl</span> <span class="string">enable iptables</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>修改iptables配置</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">vi <span class="regexp">/etc/</span>sysconfig<span class="regexp">/iptables</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在</p>
                  <figure class="highlight brainfuck">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="literal">-</span><span class="comment">A</span> <span class="comment">IN_public_allow</span> <span class="literal">-</span><span class="comment">p</span> <span class="comment">tcp</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">tcp</span> --<span class="comment">dport</span> <span class="comment">22</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">conntrack</span> --<span class="comment">ctstate</span> <span class="comment">NEW</span> <span class="literal">-</span><span class="comment">j</span> <span class="comment">ACCEPT</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>的下面添加两条规则</p>
                  <figure class="highlight brainfuck">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="literal">-</span><span class="comment">A</span> <span class="comment">IN_public_allow</span> <span class="literal">-</span><span class="comment">p</span> <span class="comment">tcp</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">tcp</span> --<span class="comment">dport</span> <span class="comment">3128</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">conntrack</span> --<span class="comment">ctstate</span> <span class="comment">NEW</span> <span class="literal">-</span><span class="comment">j</span> <span class="comment">ACCEPT</span></span><br><span class="line"><span class="comment"></span><span class="literal">-</span><span class="comment">A</span> <span class="comment">IN_public_allow</span> <span class="literal">-</span><span class="comment">p</span> <span class="comment">tcp</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">tcp</span> --<span class="comment">dport</span> <span class="comment">8888</span> <span class="literal">-</span><span class="comment">m</span> <span class="comment">conntrack</span> --<span class="comment">ctstate</span> <span class="comment">NEW</span> <span class="literal">-</span><span class="comment">j</span> <span class="comment">ACCEPT</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>如图所示 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-0@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-0@2x-1024x688.png" alt="QQ20161121-0@2x"></a> 保存，然后重启iptables</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">sudo<span class="built_in"> service </span>iptabels restart</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>输入 ifconfig得到IP地址，在其他的主机上输入</p>
                  <figure class="highlight css">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="selector-tag">curl</span> <span class="selector-tag">-x</span> <span class="selector-tag">IP</span><span class="selector-pseudo">:8888</span> <span class="selector-tag">www</span><span class="selector-class">.baidu</span><span class="selector-class">.com</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>测试一下，如果能出现结果，那就说明没问题。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-1@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161121-1@2x-1024x688.png" alt="QQ20161121-1@2x"></a> 如果怎么配都连不上，那干脆关了你的防火墙吧。虽然不推荐。</p>
                  <h2 id="连接代理"><a href="#连接代理" class="headerlink" title="连接代理"></a>连接代理</h2>
                  <p>接下来才是重头戏，你咋知道你的服务器IP现在到底是多少啊？拨一次号IP就换一次，那这还了得？ 如果服务商提供了端口映射！那一切都解决了！直接用端口映射过去就好了。然而，我的并没有。 自力更生，艰苦创业！ 首先我研究了一下DDNS服务，也就是动态域名解析。即使你的IP在变化，那也可以通过一个域名来映射过来。 原理简单而统一：当前拨号主机定时向一个固定的服务器发请求，服务器获取remote_addr就好了，可以做到定时更新和解析。 那么我找了一下，国内做的比较好的就是花生壳了，然后又找到了DNSPOD的接口解析。 下面简单说下我的折腾过程，大家可以先不用试，后面有更有效的方法。</p>
                  <h3 id="花生壳"><a href="#花生壳" class="headerlink" title="花生壳"></a>花生壳</h3>
                  <p>现在花生壳出到3.0版本了，有免费版和付费版之分，我就试用了一下免费版的。这里是花生壳的一些配置和下载： <a href="http://service.oray.com/question/4287.html" target="_blank" rel="noopener">花生壳配置</a> 下载花生壳客户端之后，会生成SN码，用这个在花生壳的官网登录后，会分配给你一个免费的域名。 接下来这个域名就能解析到你的主机了。</p>
                  <h3 id="DNSPOD"><a href="#DNSPOD" class="headerlink" title="DNSPOD"></a>DNSPOD</h3>
                  <p>DNSPOD原理也是一样，不过好处是你可以配置自己的域名。 在GitHub上有脚本可以使用。 <a href="https://github.com/xdtianyu/scripts/tree/master/ddns" target="_blank" rel="noopener">脚本链接</a> 具体的细节我就不说了，实际上就是定时请求，利用remote_addr更新DNSPOD记录，做到动态解析。 <a href="https://www.dnspod.cn/docs/records.html#dns" target="_blank" rel="noopener">解析接口</a> 不过！这两个有个通病！慢！ 什么慢？解析慢！但这不是他们的锅，因为DNS修改后完全生效就是需要一定的时间，这一秒你拨号了，然后更新了IP，但是域名可能还是解析着原来的IP，需要过几分钟才能变过来。这能忍吗？ 我可是在跑爬虫啊，这还能忍？</p>
                  <h2 id="自力更生"><a href="#自力更生" class="headerlink" title="自力更生"></a>自力更生</h2>
                  <p>嗯，V2EX果然是个好地方，逛了一下，收获不小。 <a href="https://www.v2ex.com/t/249694" target="_blank" rel="noopener">链接在此</a> 参考了 abelyao 的思路，自己写了脚本来获取IP，保证秒级更新！ 此时，你还需要另一台固定IP的主机或者某个云服务器，只要是地址固定的就好。在这里我用了另一台有固定IP的阿里云主机，当然你如果有什么新浪云啊之类的也可以。 那么现在的思路就是，拨号VPS定时拨号换IP，然后请求阿里云主机，阿里云主机获取VPS的IP地址即可。 拨号VPS做的事情： 定时拨号，定时请求服务器。使用bash脚本，然后crontab定时执行。 远程服务器： 接收请求，获取remote_addr，保存起来。使用Flask搭建服务器，接收请求。 废话少说，上代码 <a href="https://github.com/Germey/AutoProxy" target="_blank" rel="noopener">AutoProxy</a></p>
                  <h3 id="功能"><a href="#功能" class="headerlink" title="功能"></a>功能</h3>
                  <p>由于DDNS生效时间过长，对于爬虫等一些时间要求比较紧迫的项目就不太适用，为此本项目根据DDNS基本原理来实现实时获取ADSL拨号主机IP。</p>
                  <h3 id="基本原理"><a href="#基本原理" class="headerlink" title="基本原理"></a>基本原理</h3>
                  <p>client文件夹由ADSL拨号客户机运行。它会定时执行拨号操作，然后请求某个固定地址的服务器，以便让服务器获取ADSL拨号客户机的IP，主要是定时bash脚本运行。 server文件夹是服务器端运行，利用Python的Flask搭建服务器，然后接收ADSL拨号客户机的请求，得到remote_addr，获取客户机拨号后的IP。</p>
                  <h3 id="项目结构"><a href="#项目结构" class="headerlink" title="项目结构"></a>项目结构</h3>
                  <h4 id="server"><a href="#server" class="headerlink" title="server"></a>server</h4>
                  <ul>
                    <li>config.py 配置文件。</li>
                    <li>ip 客户端请求后获取的客户端IP，文本保存。</li>
                    <li>main.py Flask主程序，提供两个接口，一个是接收客户端请求，然后将IP保存，另外一个是获取当前保存的IP。</li>
                  </ul>
                  <h4 id="client"><a href="#client" class="headerlink" title="client"></a>client</h4>
                  <ul>
                    <li>crontab 定时任务命令示例。</li>
                    <li>pppoe.sh 拨号脚本，主要是实现重新拨号的几个命令。</li>
                    <li>request.sh 请求服务器的脚本，主要是实现拨号后请求服务器的操作。</li>
                    <li>request.conf 配置文件。</li>
                  </ul>
                  <h3 id="使用"><a href="#使用" class="headerlink" title="使用"></a>使用</h3>
                  <h4 id="服务器"><a href="#服务器" class="headerlink" title="服务器"></a>服务器</h4>
                  <p>服务器提供两个功能，record方法是客户机定时请求，然后获取客户机IP并保存。proxy方法是供我们自己用，返回保存的客户机IP，提取代理。</p>
                  <h5 id="克隆项目"><a href="#克隆项目" class="headerlink" title="克隆项目"></a>克隆项目</h5>
                  <figure class="highlight crmsh">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">git <span class="keyword">clone</span> <span class="title">https</span>://github.com/Germey/AutoProxy.git</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h5 id="修改配置"><a href="#修改配置" class="headerlink" title="修改配置"></a>修改配置</h5>
                  <p>修改config.py文件</p>
                  <ul>
                    <li>KEY 是客户端请求服务器时的凭证，在client的request.conf也有相同的配置，二者保持一致即可。</li>
                    <li>NEED_AUTH 在获取当前保存的IP（即代理的IP）的时候，为防止自己的主机代理被滥用，在获取IP的时候，需要加权限验证。</li>
                    <li>AUTH_USER和AUTH_PASSWORD分别是认证用户名密码。</li>
                    <li>PORT默认端口，返回保存的结果中会自动添加这个端口，组成一个IP:PORT的代理形式。</li>
                  </ul>
                  <h4 id="运行"><a href="#运行" class="headerlink" title="运行"></a>运行</h4>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">cd server</span><br><span class="line">nohup python main.py</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h4 id="ADSL客户机"><a href="#ADSL客户机" class="headerlink" title="ADSL客户机"></a>ADSL客户机</h4>
                  <h5 id="克隆项目-1"><a href="#克隆项目-1" class="headerlink" title="克隆项目"></a>克隆项目</h5>
                  <figure class="highlight crmsh">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">git <span class="keyword">clone</span> <span class="title">https</span>://github.com/Germey/AutoProxy.git</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h5 id="修改配置-1"><a href="#修改配置-1" class="headerlink" title="修改配置"></a>修改配置</h5>
                  <p>修改reqeust.conf文件</p>
                  <ul>
                    <li>KEY 是客户端请求服务器时的凭证，在server的config.py也有相同的配置，二者保持一致即可。</li>
                    <li>SERVER是服务器项目运行后的地址，一般为http://&lt;服务器IP&gt;:&lt;服务器端口&gt;/record。如<code>http://120.27.14.24:5000/record</code>。</li>
                  </ul>
                  <p>修改pppoe.sh文件 这里面写上重新拨号的几条命令，记得在前两行配置一下环境变量，配置上拨号命令所在的目录，以防出现脚本无法运行的问题。</p>
                  <h4 id="运行-1"><a href="#运行-1" class="headerlink" title="运行"></a>运行</h4>
                  <p>设置定时任务</p>
                  <figure class="highlight ebnf">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attribute">crontab -e</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>输入crontab的实例命令</p>
                  <figure class="highlight awk">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">*<span class="regexp">/5 * * * * /</span>var<span class="regexp">/py/</span>AutoProxy<span class="regexp">/client/</span>request.sh <span class="regexp">/var/</span>py<span class="regexp">/AutoProxy/</span>client<span class="regexp">/request.conf &gt;&gt; /</span>var<span class="regexp">/py/</span>AutoProxy<span class="regexp">/client/</span>request.log</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>注意修改路径，你的项目在哪里，都统一修改成自己项目的路径。 最前面的*/5是5分钟执行一次。 好了，保存之后，定时任务就会开启。</p>
                  <h3 id="验证结果"><a href="#验证结果" class="headerlink" title="验证结果"></a>验证结果</h3>
                  <p>这样一来，访问服务器地址，就可以得到ADSL拨号客户机的IP了。</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import requests</span><br><span class="line"></span><br><span class="line">url = <span class="string">'http://120.27.14.24:5000'</span></span><br><span class="line">proxy = requests.<span class="builtin-name">get</span>(url, auth=(<span class="string">'admin'</span>, <span class="string">'123'</span>)).text</span><br><span class="line"><span class="builtin-name">print</span>(proxy)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>实例结果：</p>
                  <figure class="highlight accesslog">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="number">116.208.97.22:8888</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="扩展"><a href="#扩展" class="headerlink" title="扩展"></a>扩展</h3>
                  <p>如果你有域名，可以自己解析一个域名，这样就可以直接请求自己的域名，拿到实时好用的代理了，而且定时更新。 <img src="http://opencdn.cuiqingcai.com/proxy.png" alt=""></p>
                  <h3 id="代理设置"><a href="#代理设置" class="headerlink" title="代理设置"></a>代理设置</h3>
                  <h4 id="urllib2"><a href="#urllib2" class="headerlink" title="urllib2"></a>urllib2</h4>
                  <figure class="highlight reasonml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import urllib2</span><br><span class="line">proxy_handler = urllib2.<span class="constructor">ProxyHandler(&#123;<span class="string">"http"</span>: '<span class="params">http</span>:<span class="operator">/</span><span class="operator">/</span>' + <span class="params">proxy</span>&#125;)</span></span><br><span class="line">opener = urllib2.build<span class="constructor">_opener(<span class="params">proxy_handler</span>)</span></span><br><span class="line">urllib2.install<span class="constructor">_opener(<span class="params">opener</span>)</span></span><br><span class="line">response = urllib2.urlopen('http:<span class="comment">//httpbin.org/get')</span></span><br><span class="line">print response.read<span class="literal">()</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="requests"><a href="#requests" class="headerlink" title="requests"></a>requests</h3>
                  <figure class="highlight processing">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">import</span> requests</span><br><span class="line">proxies = &#123;</span><br><span class="line"><span class="string">'http'</span>: <span class="string">'http://'</span> + proxy,</span><br><span class="line">&#125;</span><br><span class="line">r = requests.<span class="built_in">get</span>(<span class="string">'http://httpbin.org/get'</span>, proxies=proxies)</span><br><span class="line"><span class="built_in">print</span>(r.<span class="built_in">text</span>)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>以上便秒级解决了动态IP解析，自己实现了一遍DDNS，爽！ 那这样以来，以后就可以直接请求你的主机获取一个最新可用的代理IP了，稳定可用，定时变化！ 以上便是ADSL拨号服务器配置的全过程，希望对大家有帮助！</p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-11-22 00:18:06" itemprop="dateCreated datePublished" datetime="2016-11-22T00:18:06+08:00">2016-11-22</time>
                </span>
                <span id="/3443.html" class="post-meta-item leancloud_visitors" data-flag-title="Python爬虫进阶七之设置ADSL拨号服务器代理" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>6k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>5 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3363.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3363.html" class="post-title-link" itemprop="url">小白爬虫第四弹之爬虫快跑（多进程+多线程）</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <p>ＰＳ：使用多线程时好像在目录切换的问题上存在问题，可以给线程加个锁试试 Hello 大家好！我又来了。 <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ图片20161102215153.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ图片20161102215153.jpg" alt="QQ图片20161102215153"></a> 你是不是发现下载图片速度特别慢、难以忍受啊！对于这种问题 一般解决办法就是多进程了！一个进程速度慢！我就用十个进程，相当于十个人一起干。速度就会快很多啦！（为什么不说多线程？懂点Python的小伙伴都知道、GIL的存在 导致Python的多线程有点坑啊！）今天就教大家来做一个多进程的爬虫（其实吧、可以用来做一个超简化版的分布式爬虫） 其实吧！还有一种加速的方法叫做“异步”！不过这玩意儿我没怎么整明白就不出来误人子弟了！（因为爬虫大部分时间都是在等待response中！‘异步’则能让程序在等待response的时间去做的其他事情。） <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/10/QQ图片20161022193315.gif" alt="QQ图片20161022193315"></a> 学过Python基础的同学都知道、在多进程中，进程之间是不能相互通信的，这就有一个很坑爹的问题的出现了！多个进程怎么知道那那些需要爬取、哪些已经被爬取了！ 这就涉及到一个东西！这玩意儿叫做队列！！队列！！队列！！其实吧正常来说应该给大家用队列来完成这个教程的， 比如 Tornado 的queue模块。（如果需要更为稳定健壮的队列，则请考虑使用Celery这一类的专用消息传递工具） 不过为了简化技术种类啊！（才不会告诉你们是我懒，嫌麻烦呢！）这次我们继续使用MongoDB。 好了！先来理一下思路： 每个进程需要知道那些URL爬取过了、哪些URL需要爬取！我们来给每个URL设置两种状态： outstanding:等待爬取的URL complete:爬取完成的URL 诶！等等我们好像忘了啥？ 失败的URL的怎么办啊？我们在增加一种状态： processing:正在进行的URL。 嗯！当一个所有初始的URL状态都为outstanding；当开始爬取的时候状态改为：processing；爬取完成状态改为：complete；失败的URL重置状态为：outstanding。为了能够处理URL进程被终止的情况、我们设置一个计时参数，当超过这个值时；我们则将状态重置为outstanding。 下面开整Go Go Go！ 首先我们需要一个模块：datetime(这个模块比内置time模块要好使一点)不会装？？不是吧！ pip install datetime 还有上一篇博文我们已经使用过的pymongo 下面是队列的代码：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> datetime <span class="keyword">import</span> datetime, timedelta</span><br><span class="line"><span class="keyword">from</span> pymongo <span class="keyword">import</span> MongoClient, errors</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MogoQueue</span><span class="params">()</span>:</span></span><br><span class="line"></span><br><span class="line">    OUTSTANDING = <span class="number">1</span> <span class="comment">##初始状态</span></span><br><span class="line">    PROCESSING = <span class="number">2</span> <span class="comment">##正在下载状态</span></span><br><span class="line">    COMPLETE = <span class="number">3</span> <span class="comment">##下载完成状态</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, db, collection, timeout=<span class="number">300</span>)</span>:</span><span class="comment">##初始mongodb连接</span></span><br><span class="line">        self.client = MongoClient()</span><br><span class="line">        self.Client = self.client[db]</span><br><span class="line">        self.db = self.Client[collection]</span><br><span class="line">        self.timeout = timeout</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__bool__</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        这个函数，我的理解是如果下面的表达为真，则整个类为真</span></span><br><span class="line"><span class="string">        至于有什么用，后面我会注明的（如果我的理解有误，请指点出来谢谢，我也是Python新手）</span></span><br><span class="line"><span class="string">        $ne的意思是不匹配</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        record = self.db.find_one(</span><br><span class="line">            &#123;<span class="string">'status'</span>: &#123;<span class="string">'$ne'</span>: self.COMPLETE&#125;&#125;</span><br><span class="line">        )</span><br><span class="line">        <span class="keyword">return</span> <span class="literal">True</span> <span class="keyword">if</span> record <span class="keyword">else</span> <span class="literal">False</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">push</span><span class="params">(self, url, title)</span>:</span> <span class="comment">##这个函数用来添加新的URL进队列</span></span><br><span class="line">        <span class="keyword">try</span>:</span><br><span class="line">            self.db.insert(&#123;<span class="string">'_id'</span>: url, <span class="string">'status'</span>: self.OUTSTANDING, <span class="string">'主题'</span>: title&#125;)</span><br><span class="line">            print(url, <span class="string">'插入队列成功'</span>)</span><br><span class="line">        <span class="keyword">except</span> errors.DuplicateKeyError <span class="keyword">as</span> e:  <span class="comment">##报错则代表已经存在于队列之中了</span></span><br><span class="line">            print(url, <span class="string">'已经存在于队列中了'</span>)</span><br><span class="line">            <span class="keyword">pass</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">push_imgurl</span><span class="params">(self, title, url)</span>:</span></span><br><span class="line">        <span class="keyword">try</span>:</span><br><span class="line">            self.db.insert(&#123;<span class="string">'_id'</span>: title, <span class="string">'statue'</span>: self.OUTSTANDING, <span class="string">'url'</span>: url&#125;)</span><br><span class="line">            print(<span class="string">'图片地址插入成功'</span>)</span><br><span class="line">        <span class="keyword">except</span> errors.DuplicateKeyError <span class="keyword">as</span> e:</span><br><span class="line">            print(<span class="string">'地址已经存在了'</span>)</span><br><span class="line">            <span class="keyword">pass</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">pop</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        这个函数会查询队列中的所有状态为OUTSTANDING的值，</span></span><br><span class="line"><span class="string">        更改状态，（query后面是查询）（update后面是更新）</span></span><br><span class="line"><span class="string">        并返回_id（就是我们的ＵＲＬ），MongDB好使吧，^_^</span></span><br><span class="line"><span class="string">        如果没有OUTSTANDING的值则调用repair()函数重置所有超时的状态为OUTSTANDING，</span></span><br><span class="line"><span class="string">        $set是设置的意思，和MySQL的set语法一个意思</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        record = self.db.find_and_modify(</span><br><span class="line">            query=&#123;<span class="string">'status'</span>: self.OUTSTANDING&#125;,</span><br><span class="line">            update=&#123;<span class="string">'$set'</span>: &#123;<span class="string">'status'</span>: self.PROCESSING, <span class="string">'timestamp'</span>: datetime.now()&#125;&#125;</span><br><span class="line">        )</span><br><span class="line">        <span class="keyword">if</span> record:</span><br><span class="line">            <span class="keyword">return</span> record[<span class="string">'_id'</span>]</span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            self.repair()</span><br><span class="line">            <span class="keyword">raise</span> KeyError</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">pop_title</span><span class="params">(self, url)</span>:</span></span><br><span class="line">        record = self.db.find_one(&#123;<span class="string">'_id'</span>: url&#125;)</span><br><span class="line">        <span class="keyword">return</span> record[<span class="string">'主题'</span>]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">peek</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="string">"""这个函数是取出状态为 OUTSTANDING的文档并返回_id(URL)"""</span></span><br><span class="line">        record = self.db.find_one(&#123;<span class="string">'status'</span>: self.OUTSTANDING&#125;)</span><br><span class="line">        <span class="keyword">if</span> record:</span><br><span class="line">            <span class="keyword">return</span> record[<span class="string">'_id'</span>]</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">complete</span><span class="params">(self, url)</span>:</span></span><br><span class="line">        <span class="string">"""这个函数是更新已完成的URL完成"""</span></span><br><span class="line">        self.db.update(&#123;<span class="string">'_id'</span>: url&#125;, &#123;<span class="string">'$set'</span>: &#123;<span class="string">'status'</span>: self.COMPLETE&#125;&#125;)</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">repair</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="string">"""这个函数是重置状态$lt是比较"""</span></span><br><span class="line">        record = self.db.find_and_modify(</span><br><span class="line">           query=&#123;</span><br><span class="line">               <span class="string">'timestamp'</span>: &#123;<span class="string">'$lt'</span>: datetime.now() - timedelta(seconds=self.timeout)&#125;,</span><br><span class="line">               <span class="string">'status'</span>: &#123;<span class="string">'$ne'</span>: self.COMPLETE&#125;</span><br><span class="line">           &#125;,</span><br><span class="line">            update=&#123;<span class="string">'$set'</span>: &#123;<span class="string">'status'</span>: self.OUTSTANDING&#125;&#125;</span><br><span class="line">        )</span><br><span class="line">        <span class="keyword">if</span> record:</span><br><span class="line">            print(<span class="string">'重置URL状态'</span>, record[<span class="string">'_id'</span>])</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">clear</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="string">"""这个函数只有第一次才调用、后续不要调用、因为这是删库啊！"""</span></span><br><span class="line">        self.db.drop()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>好了，队列我们做好了，下面是获取所有页面的代码：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> Download <span class="keyword">import</span> request</span><br><span class="line"><span class="keyword">from</span> mongodb_queue <span class="keyword">import</span> MogoQueue</span><br><span class="line"><span class="keyword">from</span> bs4 <span class="keyword">import</span> BeautifulSoup</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">spider_queue = MogoQueue(<span class="string">'meinvxiezhenji'</span>, <span class="string">'crawl_queue'</span>)</span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">start</span><span class="params">(url)</span>:</span></span><br><span class="line">    response = request.get(url, <span class="number">3</span>)</span><br><span class="line">    Soup = BeautifulSoup(response.text, <span class="string">'lxml'</span>)</span><br><span class="line">    all_a = Soup.find(<span class="string">'div'</span>, class_=<span class="string">'all'</span>).find_all(<span class="string">'a'</span>)</span><br><span class="line">    <span class="keyword">for</span> a <span class="keyword">in</span> all_a:</span><br><span class="line">        title = a.get_text()</span><br><span class="line">        url = a[<span class="string">'href'</span>]</span><br><span class="line">        spider_queue.push(url, title)</span><br><span class="line">    <span class="string">"""上面这个调用就是把URL写入MongoDB的队列了"""</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">"__main__"</span>:</span><br><span class="line">    start(<span class="string">'http://www.mzitu.com/all'</span>)</span><br><span class="line"></span><br><span class="line"><span class="string">"""这一段儿就不解释了哦！超级简单的"""</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>下面就是多进程+多线程的下载代码了：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">import</span> os</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"><span class="keyword">import</span> threading</span><br><span class="line"><span class="keyword">import</span> multiprocessing</span><br><span class="line"><span class="keyword">from</span> mongodb_queue <span class="keyword">import</span> MogoQueue</span><br><span class="line"><span class="keyword">from</span> Download <span class="keyword">import</span> request</span><br><span class="line"><span class="keyword">from</span> bs4 <span class="keyword">import</span> BeautifulSoup</span><br><span class="line"></span><br><span class="line">SLEEP_TIME = <span class="number">1</span></span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">mzitu_crawler</span><span class="params">(max_threads=<span class="number">10</span>)</span>:</span></span><br><span class="line">    crawl_queue = MogoQueue(<span class="string">'meinvxiezhenji'</span>, <span class="string">'crawl_queue'</span>) <span class="comment">##这个是我们获取URL的队列</span></span><br><span class="line">    <span class="comment">##img_queue = MogoQueue('meinvxiezhenji', 'img_queue')</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">pageurl_crawler</span><span class="params">()</span>:</span></span><br><span class="line">        <span class="keyword">while</span> <span class="literal">True</span>:</span><br><span class="line">            <span class="keyword">try</span>:</span><br><span class="line">                url = crawl_queue.pop()</span><br><span class="line">                print(url)</span><br><span class="line">            <span class="keyword">except</span> KeyError:</span><br><span class="line">                print(<span class="string">'队列没有数据'</span>)</span><br><span class="line">                <span class="keyword">break</span></span><br><span class="line">            <span class="keyword">else</span>:</span><br><span class="line">                img_urls = []</span><br><span class="line">                req = request.get(url, <span class="number">3</span>).text</span><br><span class="line">                title = crawl_queue.pop_title(url)</span><br><span class="line">                mkdir(title)</span><br><span class="line">                os.chdir(<span class="string">'D:\mzitu\\'</span> + title)</span><br><span class="line">                max_span = BeautifulSoup(req, <span class="string">'lxml'</span>).find(<span class="string">'div'</span>, class_=<span class="string">'pagenavi'</span>).find_all(<span class="string">'span'</span>)[<span class="number">-2</span>].get_text()</span><br><span class="line">                <span class="keyword">for</span> page <span class="keyword">in</span> range(<span class="number">1</span>, int(max_span) + <span class="number">1</span>):</span><br><span class="line">                    page_url = url + <span class="string">'/'</span> + str(page)</span><br><span class="line">                    img_url = BeautifulSoup(request.get(page_url, <span class="number">3</span>).text, <span class="string">'lxml'</span>).find(<span class="string">'div'</span>, class_=<span class="string">'main-image'</span>).find(<span class="string">'img'</span>)[<span class="string">'src'</span>]</span><br><span class="line">                    img_urls.append(img_url)</span><br><span class="line">                    save(img_url)</span><br><span class="line">                crawl_queue.complete(url) <span class="comment">##设置为完成状态</span></span><br><span class="line">                <span class="comment">##img_queue.push_imgurl(title, img_urls)</span></span><br><span class="line">                <span class="comment">##print('插入数据库成功')</span></span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">save</span><span class="params">(img_url)</span>:</span></span><br><span class="line">        name = img_url[<span class="number">-9</span>:<span class="number">-4</span>]</span><br><span class="line">        print(<span class="string">u'开始保存：'</span>, img_url)</span><br><span class="line">        img = request.get(img_url, <span class="number">3</span>)</span><br><span class="line">        f = open(name + <span class="string">'.jpg'</span>, <span class="string">'ab'</span>)</span><br><span class="line">        f.write(img.content)</span><br><span class="line">        f.close()</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">mkdir</span><span class="params">(path)</span>:</span></span><br><span class="line">        path = path.strip()</span><br><span class="line">        isExists = os.path.exists(os.path.join(<span class="string">"D:\mzitu"</span>, path))</span><br><span class="line">        <span class="keyword">if</span> <span class="keyword">not</span> isExists:</span><br><span class="line">            print(<span class="string">u'建了一个名字叫做'</span>, path, <span class="string">u'的文件夹！'</span>)</span><br><span class="line">            os.makedirs(os.path.join(<span class="string">"D:\mzitu"</span>, path))</span><br><span class="line">            <span class="keyword">return</span> <span class="literal">True</span></span><br><span class="line">        <span class="keyword">else</span>:</span><br><span class="line">            print(<span class="string">u'名字叫做'</span>, path, <span class="string">u'的文件夹已经存在了！'</span>)</span><br><span class="line">            <span class="keyword">return</span> <span class="literal">False</span></span><br><span class="line"></span><br><span class="line">    threads = []</span><br><span class="line">    <span class="keyword">while</span> threads <span class="keyword">or</span> crawl_queue:</span><br><span class="line">        <span class="string">"""</span></span><br><span class="line"><span class="string">        这儿crawl_queue用上了，就是我们__bool__函数的作用，为真则代表我们MongoDB队列里面还有数据</span></span><br><span class="line"><span class="string">        threads 或者 crawl_queue为真都代表我们还没下载完成，程序就会继续执行</span></span><br><span class="line"><span class="string">        """</span></span><br><span class="line">        <span class="keyword">for</span> thread <span class="keyword">in</span> threads:</span><br><span class="line">            <span class="keyword">if</span> <span class="keyword">not</span> thread.is_alive(): <span class="comment">##is_alive是判断是否为空,不是空则在队列中删掉</span></span><br><span class="line">                threads.remove(thread)</span><br><span class="line">        <span class="keyword">while</span> len(threads) &lt; max_threads <span class="keyword">or</span> crawl_queue.peek(): <span class="comment">##线程池中的线程少于max_threads 或者 crawl_qeue时</span></span><br><span class="line">            thread = threading.Thread(target=pageurl_crawler) <span class="comment">##创建线程</span></span><br><span class="line">            thread.setDaemon(<span class="literal">True</span>) <span class="comment">##设置守护线程</span></span><br><span class="line">            thread.start() <span class="comment">##启动线程</span></span><br><span class="line">            threads.append(thread) <span class="comment">##添加进线程队列</span></span><br><span class="line">        time.sleep(SLEEP_TIME)</span><br><span class="line"></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">process_crawler</span><span class="params">()</span>:</span></span><br><span class="line">    process = []</span><br><span class="line">    num_cpus = multiprocessing.cpu_count()</span><br><span class="line">    print(<span class="string">'将会启动进程数为：'</span>, num_cpus)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(num_cpus):</span><br><span class="line">        p = multiprocessing.Process(target=mzitu_crawler) <span class="comment">##创建进程</span></span><br><span class="line">        p.start() <span class="comment">##启动进程</span></span><br><span class="line">        process.append(p) <span class="comment">##添加进进程队列</span></span><br><span class="line">    <span class="keyword">for</span> p <span class="keyword">in</span> process:</span><br><span class="line">        p.join() <span class="comment">##等待进程队列里面的进程结束</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">"__main__"</span>:</span><br><span class="line">    process_crawler()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>好啦！一个多进程多线的爬虫就完成了，（其实你可以设置一下MongoDB，然后调整一下连接配置，在多台机器上跑哦！！嗯，就是超级简化版的分布式爬虫了，虽然很是简陋。） 本来还想下载图片那一块儿加上异步（毕竟下载图片是Ｉ＼Ｏ等待最久的时间了，），可惜异步我也没怎么整明白，就不拿出来贻笑大方了。 另外，各位小哥儿可以参考上面代码，单独处理图片地址试试（就是多个进程直接下载图片）？ 我测试了一下八分钟下载100套图 <em><strong>PS：请务必使用 第二篇博文中的下载模块，或者自己写一个自动更换代理的下载模块！！！不然寸步难行，分分钟被服务器BAN掉！</strong></em> <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ图片20161102215153.jpg" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ图片20161102215153.jpg" alt="QQ图片20161102215153"></a>小白教程就到此结束了，后面我教大家玩玩Scrapy；目标 顶点小说网， 爬完全站的小说。 再后面带大家玩玩 抓新浪 汤不热、模拟登录 之类的。或许维护一个公共代理IP池之类的。 这个所有代码我放在这个位置了：<a href="https://github.com/thsheep/mzitu/" target="_blank" rel="noopener">https://github.com/thsheep/mzitu/</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/哎哟卧槽" class="author" itemprop="url" rel="index">哎哟卧槽</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-11-20 18:33:20" itemprop="dateCreated datePublished" datetime="2016-11-20T18:33:20+08:00">2016-11-20</time>
                </span>
                <span id="/3363.html" class="post-meta-item leancloud_visitors" data-flag-title="小白爬虫第四弹之爬虫快跑（多进程+多线程）" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>6.3k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>6 分钟</span>
                </span>
              </div>
            </article>
            <article itemscope itemtype="http://schema.org/Article" class="post-block index" lang="zh-CN">
              <link itemprop="mainEntityOfPage" href="https://cuiqingcai.com/3335.html">
              <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
                <meta itemprop="image" content="/images/avatar.png">
                <meta itemprop="name" content="崔庆才">
                <meta itemprop="description" content="崔庆才的个人站点，记录生活的瞬间，分享学习的心得。">
              </span>
              <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
                <meta itemprop="name" content="静觅">
              </span>
              <header class="post-header">
                <h2 class="post-title" itemprop="name headline">
                  <a class="label"> Python <i class="label-arrow"></i>
                  </a>
                  <a href="/3335.html" class="post-title-link" itemprop="url">Python爬虫进阶六之多进程的用法</a>
                </h2>
              </header>
              <div class="post-body" itemprop="articleBody">
                <div class="thumb">
                  <img itemprop="contentUrl" class="random">
                </div>
                <div class="excerpt">
                  <p>
                  <h2 id="前言"><a href="#前言" class="headerlink" title="前言"></a>前言</h2>
                  <p>在上一节中介绍了thread多线程库。python中的多线程其实并不是真正的多线程，并不能做到充分利用多核CPU资源。 如果想要充分利用，在python中大部分情况需要使用多进程，那么这个包就叫做 multiprocessing。 借助它，可以轻松完成从单进程到并发执行的转换。multiprocessing支持子进程、通信和共享数据、执行不同形式的同步，提供了Process、Queue、Pipe、Lock等组件。 那么本节要介绍的内容有：</p>
                  <ul>
                    <li>Process</li>
                    <li>Lock</li>
                    <li>Semaphore</li>
                    <li>Queue</li>
                    <li>Pipe</li>
                    <li>Pool</li>
                  </ul>
                  <h2 id="Process"><a href="#Process" class="headerlink" title="Process"></a>Process</h2>
                  <h3 id="基本使用"><a href="#基本使用" class="headerlink" title="基本使用"></a>基本使用</h3>
                  <p>在multiprocessing中，每一个进程都用一个Process类来表示。首先看下它的API</p>
                  <figure class="highlight reasonml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="constructor">Process([<span class="params">group</span> [, <span class="params">target</span> [, <span class="params">name</span> [, <span class="params">args</span> [, <span class="params">kwargs</span>]]]]])</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <ul>
                    <li>target表示调用对象，你可以传入方法的名字</li>
                    <li>args表示被调用对象的位置参数元组，比如target是函数a，他有两个参数m，n，那么args就传入(m, n)即可</li>
                    <li>kwargs表示调用对象的字典</li>
                    <li>name是别名，相当于给这个进程取一个名字</li>
                    <li>group分组，实际上不使用</li>
                  </ul>
                  <p>我们先用一个实例来感受一下：</p>
                  <figure class="highlight livecodeserver">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import multiprocessing</span><br><span class="line"></span><br><span class="line">def <span class="built_in">process</span>(<span class="built_in">num</span>):</span><br><span class="line">    print <span class="string">'Process:'</span>, <span class="built_in">num</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">5</span>):</span><br><span class="line">        p = multiprocessing.Process(target=<span class="built_in">process</span>, args=(i,))</span><br><span class="line">        p.<span class="built_in">start</span>()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>最简单的创建Process的过程如上所示，target传入函数名，args是函数的参数，是元组的形式，如果只有一个参数，那就是长度为1的元组。 然后调用start()方法即可启动多个进程了。 另外你还可以通过 cpu_count() 方法还有 active_children() 方法获取当前机器的 CPU 核心数量以及得到目前所有的运行的进程。 通过一个实例来感受一下：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">import multiprocessing</span><br><span class="line">import time</span><br><span class="line"></span><br><span class="line">def process(num):</span><br><span class="line">    time.sleep(num)</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'Process:'</span>, num</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(5):</span><br><span class="line">        p = multiprocessing.Process(<span class="attribute">target</span>=process, args=(i,))</span><br><span class="line">        p.start()</span><br><span class="line"></span><br><span class="line">    <span class="builtin-name">print</span>(<span class="string">'CPU number:'</span> + str(multiprocessing.cpu_count()))</span><br><span class="line">    <span class="keyword">for</span> p <span class="keyword">in</span> multiprocessing.active_children():</span><br><span class="line">        <span class="builtin-name">print</span>(<span class="string">'Child process name: '</span> + p.name + <span class="string">' id: '</span> + str(p.pid))</span><br><span class="line"></span><br><span class="line">    <span class="builtin-name">print</span>(<span class="string">'Process Ended'</span>)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>运行结果：</p>
                  <figure class="highlight yaml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">Process:</span> <span class="number">0</span></span><br><span class="line"><span class="string">CPU</span> <span class="string">number:8</span></span><br><span class="line"><span class="attr">Child process name: Process-2 id:</span> <span class="number">9641</span></span><br><span class="line"><span class="attr">Child process name: Process-4 id:</span> <span class="number">9643</span></span><br><span class="line"><span class="attr">Child process name: Process-5 id:</span> <span class="number">9644</span></span><br><span class="line"><span class="attr">Child process name: Process-3 id:</span> <span class="number">9642</span></span><br><span class="line"><span class="string">Process</span> <span class="string">Ended</span></span><br><span class="line"><span class="attr">Process:</span> <span class="number">1</span></span><br><span class="line"><span class="attr">Process:</span> <span class="number">2</span></span><br><span class="line"><span class="attr">Process:</span> <span class="number">3</span></span><br><span class="line"><span class="attr">Process:</span> <span class="number">4</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <h3 id="自定义类"><a href="#自定义类" class="headerlink" title="自定义类"></a>自定义类</h3>
                  <p>另外你还可以继承Process类，自定义进程类，实现run方法即可。 用一个实例来感受一下：</p>
                  <figure class="highlight haskell">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="title">from</span> multiprocessing <span class="keyword">import</span> Process</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="type">MyProcess</span>(<span class="type">Process</span>):</span></span><br><span class="line"><span class="class">    def __init__(<span class="title">self</span>, <span class="title">loop</span>):</span></span><br><span class="line"><span class="class">        <span class="type">Process</span>.__init__(<span class="title">self</span>)</span></span><br><span class="line"><span class="class">        self.loop = loop</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">    def run(<span class="title">self</span>):</span></span><br><span class="line"><span class="class">        for count in range(<span class="title">self</span>.<span class="title">loop</span>):</span></span><br><span class="line"><span class="class">            time.sleep(1)</span></span><br><span class="line"><span class="class">            print('<span class="type">Pid</span>: ' + <span class="title">str</span>(<span class="title">self</span>.<span class="title">pid</span>) + ' <span class="type">LoopCount</span>: ' + str(<span class="title">count</span>))</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">if __name__ == '__main__':</span></span><br><span class="line"><span class="class">    for i in range(2, 5):</span></span><br><span class="line"><span class="class">        p = <span class="type">MyProcess</span>(<span class="title">i</span>)</span></span><br><span class="line"><span class="class">        p.start()</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在上面的例子中，我们继承了 Process 这个类，然后实现了run方法。打印出来了进程号和参数。 运行结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Pid: <span class="number">28116</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">28117</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">28118</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">28116</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">28117</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">28118</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">28117</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">28118</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">28118</span> LoopCount: <span class="number">3</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以看到，三个进程分别打印出了2、3、4条结果。 我们可以把一些方法独立的写在每个类里封装好，等用的时候直接初始化一个类运行即可。</p>
                  <h3 id="deamon"><a href="#deamon" class="headerlink" title="deamon"></a>deamon</h3>
                  <p>在这里介绍一个属性，叫做deamon。每个线程都可以单独设置它的属性，如果设置为True，当父进程结束后，子进程会自动被终止。 用一个实例来感受一下，还是原来的例子，增加了deamon属性：</p>
                  <figure class="highlight python">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing <span class="keyword">import</span> Process</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">MyProcess</span><span class="params">(Process)</span>:</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(self, loop)</span>:</span></span><br><span class="line">        Process.__init__(self)</span><br><span class="line">        self.loop = loop</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(self)</span>:</span></span><br><span class="line">        <span class="keyword">for</span> count <span class="keyword">in</span> range(self.loop):</span><br><span class="line">            time.sleep(<span class="number">1</span>)</span><br><span class="line">            print(<span class="string">'Pid: '</span> + str(self.pid) + <span class="string">' LoopCount: '</span> + str(count))</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">2</span>, <span class="number">5</span>):</span><br><span class="line">        p = MyProcess(i)</span><br><span class="line">        p.daemon = <span class="literal">True</span></span><br><span class="line">        p.start()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">    <span class="keyword">print</span> <span class="string">'Main process Ended!'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里，调用的时候增加了设置deamon，最后的主进程（即父进程）打印输出了一句话。 运行结果：</p>
                  <figure class="highlight arduino">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Main <span class="built_in">process</span> Ended!</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>结果很简单，因为主进程没有做任何事情，直接输出一句话结束，所以在这时也直接终止了子进程的运行。 这样可以有效防止无控制地生成子进程。如果这样写了，你在关闭这个主程序运行时，就无需额外担心子进程有没有被关闭了。 不过这样并不是我们想要达到的效果呀，能不能让所有子进程都执行完了然后再结束呢？那当然是可以的，只需要加入join()方法即可。</p>
                  <figure class="highlight haskell">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="title">from</span> multiprocessing <span class="keyword">import</span> Process</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="type">MyProcess</span>(<span class="type">Process</span>):</span></span><br><span class="line"><span class="class">    def __init__(<span class="title">self</span>, <span class="title">loop</span>):</span></span><br><span class="line"><span class="class">        <span class="type">Process</span>.__init__(<span class="title">self</span>)</span></span><br><span class="line"><span class="class">        self.loop = loop</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">    def run(<span class="title">self</span>):</span></span><br><span class="line"><span class="class">        for count in range(<span class="title">self</span>.<span class="title">loop</span>):</span></span><br><span class="line"><span class="class">            time.sleep(1)</span></span><br><span class="line"><span class="class">            print('<span class="type">Pid</span>: ' + <span class="title">str</span>(<span class="title">self</span>.<span class="title">pid</span>) + ' <span class="type">LoopCount</span>: ' + str(<span class="title">count</span>))</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">if __name__ == '__main__':</span></span><br><span class="line"><span class="class">    for i in range(2, 5):</span></span><br><span class="line"><span class="class">        p = <span class="type">MyProcess</span>(<span class="title">i</span>)</span></span><br><span class="line"><span class="class">        p.daemon = <span class="type">True</span></span></span><br><span class="line"><span class="class">        p.start()</span></span><br><span class="line"><span class="class">        p.join()</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">    print '<span class="type">Main</span> process <span class="type">Ended</span>!'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里，每个子进程都调用了join()方法，这样父进程（主进程）就会等待子进程执行完毕。 运行结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Pid: <span class="number">29902</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">29902</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">29905</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">29905</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">29905</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">29912</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">29912</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">29912</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">29912</span> LoopCount: <span class="number">3</span></span><br><span class="line">Main process Ended!</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>发现所有子进程都执行完毕之后，父进程最后打印出了结束的结果。</p>
                  <h2 id="Lock"><a href="#Lock" class="headerlink" title="Lock"></a>Lock</h2>
                  <p>在上面的一些小实例中，你可能会遇到如下的运行结果： <a href="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161113-0@2x.png" target="_blank" rel="noopener"><img src="http://qiniu.cuiqingcai.com/wp-content/uploads/2016/11/QQ20161113-0@2x-300x126.png" alt=""></a> 什么问题？有的输出错位了。这是由于并行导致的，两个进程同时进行了输出，结果第一个进程的换行没有来得及输出，第二个进程就输出了结果。所以导致这种排版的问题。 那这归根结底是因为线程同时资源（输出操作）而导致的。 那怎么来避免这种问题？那自然是在某一时间，只能一个进程输出，其他进程等待。等刚才那个进程输出完毕之后，另一个进程再进行输出。这种现象就叫做“互斥”。 我们可以通过 Lock 来实现，在一个进程输出时，加锁，其他进程等待。等此进程执行结束后，释放锁，其他进程可以进行输出。 我们现用一个实例来感受一下：</p>
                  <figure class="highlight sql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from multiprocessing import Process, <span class="keyword">Lock</span></span><br><span class="line"><span class="keyword">import</span> <span class="built_in">time</span></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> MyProcess(Process):</span><br><span class="line">    <span class="keyword">def</span> __init__(<span class="keyword">self</span>, <span class="keyword">loop</span>, <span class="keyword">lock</span>):</span><br><span class="line">        Process.__init__(<span class="keyword">self</span>)</span><br><span class="line">        self.loop = <span class="keyword">loop</span></span><br><span class="line">        self.lock = <span class="keyword">lock</span></span><br><span class="line"></span><br><span class="line">    <span class="keyword">def</span> run(<span class="keyword">self</span>):</span><br><span class="line">        <span class="keyword">for</span> <span class="keyword">count</span> <span class="keyword">in</span> <span class="keyword">range</span>(self.loop):</span><br><span class="line">            time.sleep(<span class="number">0.1</span>)</span><br><span class="line">            <span class="comment">#self.lock.acquire()</span></span><br><span class="line">            print(<span class="string">'Pid: '</span> + <span class="keyword">str</span>(self.pid) + <span class="string">' LoopCount: '</span> + <span class="keyword">str</span>(<span class="keyword">count</span>))</span><br><span class="line">            <span class="comment">#self.lock.release()</span></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="keyword">lock</span> = <span class="keyword">Lock</span>()</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> <span class="keyword">range</span>(<span class="number">10</span>, <span class="number">15</span>):</span><br><span class="line">        p = MyProcess(i, <span class="keyword">lock</span>)</span><br><span class="line">        p.start()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>首先看一下不加锁的输出结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">2</span>Pid: <span class="number">45756</span> LoopCount: <span class="number">2</span></span><br><span class="line"></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">6</span>Pid: <span class="number">45755</span> LoopCount: <span class="number">6</span></span><br><span class="line"></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">7</span>Pid: <span class="number">45756</span> LoopCount: <span class="number">7</span></span><br><span class="line"></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">8</span>Pid: <span class="number">45755</span> LoopCount: <span class="number">8</span></span><br><span class="line"></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">8</span>Pid: <span class="number">45759</span> LoopCount: <span class="number">8</span></span><br><span class="line"></span><br><span class="line">Pid: <span class="number">45755</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45756</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45757</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45758</span> LoopCount: <span class="number">12</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">12</span></span><br><span class="line">Pid: <span class="number">45759</span> LoopCount: <span class="number">13</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以看到有些输出已经造成了影响。 然后我们对其加锁：</p>
                  <figure class="highlight pgsql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing <span class="keyword">import</span> Process, <span class="keyword">Lock</span></span><br><span class="line"><span class="keyword">import</span> <span class="type">time</span></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> MyProcess(Process):</span><br><span class="line">    def __init__(self, <span class="keyword">loop</span>, <span class="keyword">lock</span>):</span><br><span class="line">        Process.__init__(self)</span><br><span class="line">        self.<span class="keyword">loop</span> = <span class="keyword">loop</span></span><br><span class="line">        self.<span class="keyword">lock</span> = <span class="keyword">lock</span></span><br><span class="line"></span><br><span class="line">    def run(self):</span><br><span class="line">        <span class="keyword">for</span> count <span class="keyword">in</span> range(self.<span class="keyword">loop</span>):</span><br><span class="line">            <span class="type">time</span>.sleep(<span class="number">0.1</span>)</span><br><span class="line">            self.<span class="keyword">lock</span>.acquire()</span><br><span class="line">            print(<span class="string">'Pid: '</span> + str(self.pid) + <span class="string">' LoopCount: '</span> + str(count))</span><br><span class="line">            self.<span class="keyword">lock</span>.<span class="keyword">release</span>()</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    <span class="keyword">lock</span> = <span class="keyword">Lock</span>()</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> range(<span class="number">10</span>, <span class="number">15</span>):</span><br><span class="line">        p = MyProcess(i, <span class="keyword">lock</span>)</span><br><span class="line">        p.<span class="keyword">start</span>()</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>我们在print方法的前后分别添加了获得锁和释放锁的操作。这样就能保证在同一时间只有一个print操作。 看一下运行结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">0</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">1</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">2</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">3</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">4</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">5</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">6</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">7</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">8</span></span><br><span class="line">Pid: <span class="number">45889</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">9</span></span><br><span class="line">Pid: <span class="number">45890</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">10</span></span><br><span class="line">Pid: <span class="number">45891</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">11</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">12</span></span><br><span class="line">Pid: <span class="number">45892</span> LoopCount: <span class="number">12</span></span><br><span class="line">Pid: <span class="number">45893</span> LoopCount: <span class="number">13</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>嗯，一切都没问题了。 所以在访问临界资源时，使用Lock就可以避免进程同时占用资源而导致的一些问题。</p>
                  <h2 id="Semaphore"><a href="#Semaphore" class="headerlink" title="Semaphore"></a>Semaphore</h2>
                  <p>信号量，是在进程同步过程中一个比较重要的角色。可以控制临界资源的数量，保证各个进程之间的互斥和同步。 如果你学过操作系统，那么一定对这方面非常了解，如果你还不了解信号量是什么，可以参考 <a href="http://blog.csdn.net/qinxiongxu/article/details/7830537" target="_blank" rel="noopener">信号量解析</a> 来了解一下它是做什么的。 那么接下来我们就用一个实例来演示一下进程之间利用Semaphore做到同步和互斥，以及控制临界资源数量。</p>
                  <figure class="highlight haskell">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="title">from</span> multiprocessing <span class="keyword">import</span> Process, Semaphore, Lock, Queue</span><br><span class="line"><span class="keyword">import</span> time</span><br><span class="line"></span><br><span class="line"><span class="title">buffer</span> = <span class="type">Queue</span>(<span class="number">10</span>)</span><br><span class="line"><span class="title">empty</span> = <span class="type">Semaphore</span>(<span class="number">2</span>)</span><br><span class="line"><span class="title">full</span> = <span class="type">Semaphore</span>(<span class="number">0</span>)</span><br><span class="line"><span class="title">lock</span> = <span class="type">Lock</span>()</span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="type">Consumer</span>(<span class="type">Process</span>):</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">    def run(<span class="title">self</span>):</span></span><br><span class="line"><span class="class">        global buffer, empty, full, lock</span></span><br><span class="line"><span class="class">        while <span class="type">True</span>:</span></span><br><span class="line"><span class="class">            full.acquire()</span></span><br><span class="line"><span class="class">            lock.acquire()</span></span><br><span class="line"><span class="class">            buffer.get()</span></span><br><span class="line"><span class="class">            print('<span class="type">Consumer</span> <span class="title">pop</span> <span class="title">an</span> <span class="title">element'</span>)</span></span><br><span class="line"><span class="class">            time.sleep(1)</span></span><br><span class="line"><span class="class">            lock.release()</span></span><br><span class="line"><span class="class">            empty.release()</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="type">Producer</span>(<span class="type">Process</span>):</span></span><br><span class="line"><span class="class">    def run(<span class="title">self</span>):</span></span><br><span class="line"><span class="class">        global buffer, empty, full, lock</span></span><br><span class="line"><span class="class">        while <span class="type">True</span>:</span></span><br><span class="line"><span class="class">            empty.acquire()</span></span><br><span class="line"><span class="class">            lock.acquire()</span></span><br><span class="line"><span class="class">            buffer.put(1)</span></span><br><span class="line"><span class="class">            print('<span class="type">Producer</span> <span class="title">append</span> <span class="title">an</span> <span class="title">element'</span>)</span></span><br><span class="line"><span class="class">            time.sleep(1)</span></span><br><span class="line"><span class="class">            lock.release()</span></span><br><span class="line"><span class="class">            full.release()</span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class"></span></span><br><span class="line"><span class="class">if __name__ == '__main__':</span></span><br><span class="line"><span class="class">    p = <span class="type">Producer</span>()</span></span><br><span class="line"><span class="class">    c = <span class="type">Consumer</span>()</span></span><br><span class="line"><span class="class">    p.daemon = c.daemon = <span class="type">True</span></span></span><br><span class="line"><span class="class">    p.start()</span></span><br><span class="line"><span class="class">    c.start()</span></span><br><span class="line"><span class="class">    p.join()</span></span><br><span class="line"><span class="class">    c.join()</span></span><br><span class="line"><span class="class">    print '<span class="type">Ended</span>!'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>如上代码实现了注明的生产者和消费者问题，定义了两个进程类，一个是消费者，一个是生产者。 定义了一个共享队列，利用了Queue数据结构，然后定义了两个信号量，一个代表缓冲区空余数，一个表示缓冲区占用数。 生产者Producer使用empty.acquire()方法来占用一个缓冲区位置，然后缓冲区空闲区大小减小1，接下来进行加锁，对缓冲区进行操作。然后释放锁，然后让代表占用的缓冲区位置数量+1，消费者则相反。 运行结果如下：</p>
                  <figure class="highlight livecodeserver">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Consumer pop <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br><span class="line">Producer append <span class="keyword">an</span> <span class="keyword">element</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以发现两个进程在交替运行，生产者先放入缓冲区物品，然后消费者取出，不停地进行循环。 通过上面的例子来体会一下信号量的用法。</p>
                  <h2 id="Queue"><a href="#Queue" class="headerlink" title="Queue"></a>Queue</h2>
                  <p>在上面的例子中我们使用了Queue，可以作为进程通信的共享队列使用。 在上面的程序中，如果你把Queue换成普通的list，是完全起不到效果的。即使在一个进程中改变了这个list，在另一个进程也不能获取到它的状态。 因此进程间的通信，队列需要用Queue。当然这里的队列指的是 multiprocessing.Queue 依然是用上面那个例子，我们一个进程向队列中放入数据，然后另一个进程取出数据。</p>
                  <figure class="highlight sql">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from multiprocessing import Process, Semaphore, <span class="keyword">Lock</span>, Queue</span><br><span class="line"><span class="keyword">import</span> <span class="built_in">time</span></span><br><span class="line"><span class="keyword">from</span> random <span class="keyword">import</span> random</span><br><span class="line"></span><br><span class="line">buffer = Queue(<span class="number">10</span>)</span><br><span class="line"><span class="keyword">empty</span> = Semaphore(<span class="number">2</span>)</span><br><span class="line"><span class="keyword">full</span> = Semaphore(<span class="number">0</span>)</span><br><span class="line"><span class="keyword">lock</span> = <span class="keyword">Lock</span>()</span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> Consumer(Process):</span><br><span class="line"></span><br><span class="line">    <span class="keyword">def</span> run(<span class="keyword">self</span>):</span><br><span class="line">        <span class="keyword">global</span> buffer, <span class="keyword">empty</span>, <span class="keyword">full</span>, <span class="keyword">lock</span></span><br><span class="line">        <span class="keyword">while</span> <span class="literal">True</span>:</span><br><span class="line">            full.acquire()</span><br><span class="line">            lock.acquire()</span><br><span class="line">            print <span class="string">'Consumer get'</span>, buffer.get()</span><br><span class="line">            time.sleep(<span class="number">1</span>)</span><br><span class="line">            lock.release()</span><br><span class="line">            empty.release()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">class</span> Producer(Process):</span><br><span class="line">    <span class="keyword">def</span> run(<span class="keyword">self</span>):</span><br><span class="line">        <span class="keyword">global</span> buffer, <span class="keyword">empty</span>, <span class="keyword">full</span>, <span class="keyword">lock</span></span><br><span class="line">        <span class="keyword">while</span> <span class="literal">True</span>:</span><br><span class="line">            empty.acquire()</span><br><span class="line">            lock.acquire()</span><br><span class="line">            <span class="keyword">num</span> = random()</span><br><span class="line">            print <span class="string">'Producer put '</span>, <span class="keyword">num</span></span><br><span class="line">            buffer.put(<span class="keyword">num</span>)</span><br><span class="line">            time.sleep(<span class="number">1</span>)</span><br><span class="line">            lock.release()</span><br><span class="line">            full.release()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">    p = Producer()</span><br><span class="line">    c = Consumer()</span><br><span class="line">    p.daemon = c.daemon = <span class="literal">True</span></span><br><span class="line">    p.start()</span><br><span class="line">    c.start()</span><br><span class="line">    p.join()</span><br><span class="line">    c.join()</span><br><span class="line">    print <span class="string">'Ended!'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>运行结果：</p>
                  <figure class="highlight angelscript">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Producer put  <span class="number">0.719213647437</span></span><br><span class="line">Producer put  <span class="number">0.44287326683</span></span><br><span class="line">Consumer <span class="keyword">get</span> <span class="number">0.719213647437</span></span><br><span class="line">Consumer <span class="keyword">get</span> <span class="number">0.44287326683</span></span><br><span class="line">Producer put  <span class="number">0.722859424381</span></span><br><span class="line">Producer put  <span class="number">0.525321338921</span></span><br><span class="line">Consumer <span class="keyword">get</span> <span class="number">0.722859424381</span></span><br><span class="line">Consumer <span class="keyword">get</span> <span class="number">0.525321338921</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以看到生产者放入队列中数据，然后消费者将数据取出来。 get方法有两个参数，blocked和timeout，意思为阻塞和超时时间。默认blocked是true，即阻塞式。 当一个队列为空的时候如果再用get取则会阻塞，所以这时候就需要吧blocked设置为false，即非阻塞式，实际上它就会调用get_nowait()方法，此时还需要设置一个超时时间，在这么长的时间内还没有取到队列元素，那就抛出Queue.Empty异常。 当一个队列为满的时候如果再用put放则会阻塞，所以这时候就需要吧blocked设置为false，即非阻塞式，实际上它就会调用put_nowait()方法，此时还需要设置一个超时时间，在这么长的时间内还没有放进去元素，那就抛出Queue.Full异常。 另外队列中常用的方法 Queue.qsize() 返回队列的大小 ，不过在 Mac OS 上没法运行。 原因：</p>
                  <blockquote>
                    <p>def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value()</p>
                  </blockquote>
                  <p>Queue.empty() 如果队列为空，返回True, 反之False Queue.full() 如果队列满了，返回True,反之False Queue.get([block[, timeout]]) 获取队列，timeout等待时间 Queue.get_nowait() 相当Queue.get(False) Queue.put(item) 阻塞式写入队列，timeout等待时间 Queue.put_nowait(item) 相当Queue.put(item, False)</p>
                  <h2 id="Pipe"><a href="#Pipe" class="headerlink" title="Pipe"></a>Pipe</h2>
                  <p>管道，顾名思义，一端发一端收。 Pipe可以是单向(half-duplex)，也可以是双向(duplex)。我们通过mutiprocessing.Pipe(duplex=False)创建单向管道 (默认为双向)。一个进程从PIPE一端输入对象，然后被PIPE另一端的进程接收，单向管道只允许管道一端的进程输入，而双向管道则允许从两端输入。 用一个实例来感受一下：</p>
                  <figure class="highlight ruby">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">from multiprocessing import Process, Pipe</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Consumer</span>(<span class="title">Process</span>):</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(<span class="keyword">self</span>, pipe)</span></span><span class="symbol">:</span></span><br><span class="line">        Process.__init_<span class="number">_</span>(<span class="keyword">self</span>)</span><br><span class="line">        <span class="keyword">self</span>.pipe = pipe</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(<span class="keyword">self</span>)</span></span><span class="symbol">:</span></span><br><span class="line">        <span class="keyword">self</span>.pipe.send(<span class="string">'Consumer Words'</span>)</span><br><span class="line">        print <span class="string">'Consumer Received:'</span>, <span class="keyword">self</span>.pipe.recv()</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="class"><span class="keyword">class</span> <span class="title">Producer</span>(<span class="title">Process</span>):</span></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">__init__</span><span class="params">(<span class="keyword">self</span>, pipe)</span></span><span class="symbol">:</span></span><br><span class="line">        Process.__init_<span class="number">_</span>(<span class="keyword">self</span>)</span><br><span class="line">        <span class="keyword">self</span>.pipe = pipe</span><br><span class="line"></span><br><span class="line">    <span class="function"><span class="keyword">def</span> <span class="title">run</span><span class="params">(<span class="keyword">self</span>)</span></span><span class="symbol">:</span></span><br><span class="line">        print <span class="string">'Producer Received:'</span>, <span class="keyword">self</span>.pipe.recv()</span><br><span class="line">        <span class="keyword">self</span>.pipe.send(<span class="string">'Producer Words'</span>)</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name_<span class="number">_</span> == <span class="string">'__main__'</span><span class="symbol">:</span></span><br><span class="line">    pipe = Pipe()</span><br><span class="line">    p = Producer(pipe[<span class="number">0</span>])</span><br><span class="line">    c = Consumer(pipe[<span class="number">1</span>])</span><br><span class="line">    p.daemon = c.daemon = True</span><br><span class="line">    p.start()</span><br><span class="line">    c.start()</span><br><span class="line">    p.join()</span><br><span class="line">    c.join()</span><br><span class="line">    print <span class="string">'Ended!'</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里声明了一个默认为双向的管道，然后将管道的两端分别传给两个进程。两个进程互相收发。观察一下结果：</p>
                  <figure class="highlight groovy">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Producer <span class="string">Received:</span> Consumer Words</span><br><span class="line">Consumer <span class="string">Received:</span> Producer Words</span><br><span class="line">Ended!</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>以上是对pipe的简单介绍。</p>
                  <h2 id="Pool"><a href="#Pool" class="headerlink" title="Pool"></a>Pool</h2>
                  <p>在利用Python进行系统管理的时候，特别是同时操作多个文件目录，或者远程控制多台主机，并行操作可以节约大量的时间。当被操作对象数目不大时，可以直接利用multiprocessing中的Process动态成生多个进程，十几个还好，但如果是上百个，上千个目标，手动的去限制进程数量却又太过繁琐，此时可以发挥进程池的功效。 Pool可以提供指定数量的进程，供用户调用，当有新的请求提交到pool中时，如果池还没有满，那么就会创建一个新的进程用来执行该请求；但如果池中的进程数已经达到规定最大值，那么该请求就会等待，直到池中有进程结束，才会创建新的进程来它。 在这里需要了解阻塞和非阻塞的概念。 阻塞和非阻塞关注的是程序在等待调用结果（消息，返回值）时的状态。 阻塞即要等到回调结果出来，在有结果之前，当前进程会被挂起。 Pool的用法有阻塞和非阻塞两种方式。非阻塞即为添加进程后，不一定非要等到改进程执行完就添加其他进程运行，阻塞则相反。 现用一个实例感受一下非阻塞的用法：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing import Lock, Pool</span><br><span class="line">import time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">def function(index):</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'Start process: '</span>, index</span><br><span class="line">    time.sleep(3)</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'End process'</span>, index</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">   <span class="built_in"> pool </span>= Pool(<span class="attribute">processes</span>=3)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> xrange(4):</span><br><span class="line">        pool.apply_async(function, (i,))</span><br><span class="line"></span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Started processes"</span></span><br><span class="line">    pool.close()</span><br><span class="line">    pool.join()</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Subprocess done."</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里利用了apply_async方法，即非阻塞。 运行结果：</p>
                  <figure class="highlight yaml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="string">Started</span> <span class="string">processes</span></span><br><span class="line"><span class="attr">Start process: Start process:</span>  <span class="number">0</span> </span><br><span class="line"><span class="number">1</span></span><br><span class="line"><span class="attr">Start process:</span>  <span class="number">2</span></span><br><span class="line"><span class="string">End</span> <span class="string">processEnd</span> <span class="string">process</span> <span class="number">0</span> </span><br><span class="line"><span class="number">1</span></span><br><span class="line"><span class="attr">Start process:</span>  <span class="number">3</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">2</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">3</span></span><br><span class="line"><span class="string">Subprocess</span> <span class="string">done.</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以发现在这里添加三个进程进去后，立马就开始执行，不用非要等到某个进程结束后再添加新的进程进去。 下面再看看阻塞的用法：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing import Lock, Pool</span><br><span class="line">import time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">def function(index):</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'Start process: '</span>, index</span><br><span class="line">    time.sleep(3)</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'End process'</span>, index</span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">   <span class="built_in"> pool </span>= Pool(<span class="attribute">processes</span>=3)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> xrange(4):</span><br><span class="line">        pool.apply(function, (i,))</span><br><span class="line"></span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Started processes"</span></span><br><span class="line">    pool.close()</span><br><span class="line">    pool.join()</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Subprocess done."</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里只需要把apply_async改成apply即可。 运行结果如下：</p>
                  <figure class="highlight powershell">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">Start <span class="keyword">process</span>:  <span class="number">0</span></span><br><span class="line"><span class="keyword">End</span> <span class="keyword">process</span> <span class="number">0</span></span><br><span class="line">Start <span class="keyword">process</span>:  <span class="number">1</span></span><br><span class="line"><span class="keyword">End</span> <span class="keyword">process</span> <span class="number">1</span></span><br><span class="line">Start <span class="keyword">process</span>:  <span class="number">2</span></span><br><span class="line"><span class="keyword">End</span> <span class="keyword">process</span> <span class="number">2</span></span><br><span class="line">Start <span class="keyword">process</span>:  <span class="number">3</span></span><br><span class="line"><span class="keyword">End</span> <span class="keyword">process</span> <span class="number">3</span></span><br><span class="line">Started processes</span><br><span class="line">Subprocess done.</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>这样一来就好理解了吧？ 下面对函数进行解释： apply_async(func[, args[, kwds[, callback]]]) 它是非阻塞，apply(func[, args[, kwds]])是阻塞的。 close() 关闭pool，使其不在接受新的任务。 terminate() 结束工作进程，不在处理未完成的任务。 join() 主进程阻塞，等待子进程的退出， join方法要在close或terminate之后使用。 当然每个进程可以在各自的方法返回一个结果。apply或apply_async方法可以拿到这个结果并进一步进行处理。</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing import Lock, Pool</span><br><span class="line">import time</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">def function(index):</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'Start process: '</span>, index</span><br><span class="line">    time.sleep(3)</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">'End process'</span>, index</span><br><span class="line">    return index</span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">   <span class="built_in"> pool </span>= Pool(<span class="attribute">processes</span>=3)</span><br><span class="line">    <span class="keyword">for</span> i <span class="keyword">in</span> xrange(4):</span><br><span class="line">        result = pool.apply_async(function, (i,))</span><br><span class="line">        <span class="builtin-name">print</span> result.<span class="builtin-name">get</span>()</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Started processes"</span></span><br><span class="line">    pool.close()</span><br><span class="line">    pool.join()</span><br><span class="line">    <span class="builtin-name">print</span> <span class="string">"Subprocess done."</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>运行结果：</p>
                  <figure class="highlight yaml">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="attr">Start process:</span>  <span class="number">0</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">0</span></span><br><span class="line"><span class="number">0</span></span><br><span class="line"><span class="attr">Start process:</span>  <span class="number">1</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">1</span></span><br><span class="line"><span class="number">1</span></span><br><span class="line"><span class="attr">Start process:</span>  <span class="number">2</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">2</span></span><br><span class="line"><span class="number">2</span></span><br><span class="line"><span class="attr">Start process:</span>  <span class="number">3</span></span><br><span class="line"><span class="string">End</span> <span class="string">process</span> <span class="number">3</span></span><br><span class="line"><span class="number">3</span></span><br><span class="line"><span class="string">Started</span> <span class="string">processes</span></span><br><span class="line"><span class="string">Subprocess</span> <span class="string">done.</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>另外还有一个非常好用的map方法。 如果你现在有一堆数据要处理，每一项都需要经过一个方法来处理，那么map非常适合。 比如现在你有一个数组，包含了所有的URL，而现在已经有了一个方法用来抓取每个URL内容并解析，那么可以直接在map的第一个参数传入方法名，第二个参数传入URL数组。 现在我们用一个实例来感受一下：</p>
                  <figure class="highlight routeros">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line"><span class="keyword">from</span> multiprocessing import Pool</span><br><span class="line">import requests</span><br><span class="line"><span class="keyword">from</span> requests.exceptions import ConnectionError</span><br><span class="line"></span><br><span class="line"></span><br><span class="line">def scrape(url):</span><br><span class="line">    try:</span><br><span class="line">        <span class="builtin-name">print</span> requests.<span class="builtin-name">get</span>(url)</span><br><span class="line">    except ConnectionError:</span><br><span class="line">        <span class="builtin-name">print</span> <span class="string">'Error Occured '</span>, url</span><br><span class="line">    finally:</span><br><span class="line">        <span class="builtin-name">print</span> <span class="string">'URL '</span>, url, <span class="string">' Scraped'</span></span><br><span class="line"></span><br><span class="line"></span><br><span class="line"><span class="keyword">if</span> __name__ == <span class="string">'__main__'</span>:</span><br><span class="line">   <span class="built_in"> pool </span>= Pool(<span class="attribute">processes</span>=3)</span><br><span class="line">    urls = [</span><br><span class="line">        <span class="string">'https://www.baidu.com'</span>,</span><br><span class="line">        <span class="string">'http://www.meituan.com/'</span>,</span><br><span class="line">        <span class="string">'http://blog.csdn.net/'</span>,</span><br><span class="line">        <span class="string">'http://xxxyxxx.net'</span></span><br><span class="line">    ]</span><br><span class="line">    pool.map(scrape, urls)</span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>在这里初始化一个Pool，指定进程数为3，如果不指定，那么会自动根据CPU内核来分配进程数。 然后有一个链接列表，map函数可以遍历每个URL，然后对其分别执行scrape方法。 运行结果：</p>
                  <figure class="highlight groovy">
                    <table>
                      <tr>
                        <td class="gutter">
                          <pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre>
                        </td>
                        <td class="code">
                          <pre><span class="line">&lt;Response [<span class="number">403</span>]&gt;</span><br><span class="line">URL  <span class="string">http:</span><span class="comment">//blog.csdn.net/  Scraped</span></span><br><span class="line">&lt;Response [<span class="number">200</span>]&gt;</span><br><span class="line">URL  <span class="string">https:</span><span class="comment">//www.baidu.com  Scraped</span></span><br><span class="line">Error Occured  <span class="string">http:</span><span class="comment">//xxxyxxx.net</span></span><br><span class="line">URL  <span class="string">http:</span><span class="comment">//xxxyxxx.net  Scraped</span></span><br><span class="line">&lt;Response [<span class="number">200</span>]&gt;</span><br><span class="line">URL  <span class="string">http:</span><span class="comment">//www.meituan.com/  Scraped</span></span><br></pre>
                        </td>
                      </tr>
                    </table>
                  </figure>
                  <p>可以看到遍历就这么轻松地实现了。</p>
                  <h2 id="结语"><a href="#结语" class="headerlink" title="结语"></a>结语</h2>
                  <p>多进程multiprocessing相比多线程功能强大太多，而且使用范围更广，希望本文对大家有帮助！</p>
                  <h2 id="本文参考"><a href="#本文参考" class="headerlink" title="本文参考"></a>本文参考</h2>
                  <p><a href="https://docs.python.org/2/library/multiprocessing.html" target="_blank" rel="noopener">https://docs.python.org/2/library/multiprocessing.html</a> <a href="http://www.cnblogs.com/vamei/archive/2012/10/12/2721484.html" target="_blank" rel="noopener">http://www.cnblogs.com/vamei/archive/2012/10/12/2721484.html</a> <a href="http://www.cnblogs.com/kaituorensheng/p/4445418.html" target="_blank" rel="noopener">http://www.cnblogs.com/kaituorensheng/p/4445418.html</a> <a href="https://my.oschina.net/yangyanxing/blog/296052" target="_blank" rel="noopener">https://my.oschina.net/yangyanxing/blog/296052</a></p>
                  </p>
                </div>
              </div>
              <div class="post-meta">
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-user"></i>
                  </span>
                  <span class="post-meta-item-text">作者</span>
                  <span><a href="/authors/崔庆才" class="author" itemprop="url" rel="index">崔庆才</a></span>
                </span>
                <span class="post-meta-item">
                  <span class="post-meta-item-icon">
                    <i class="far fa-calendar"></i>
                  </span>
                  <span class="post-meta-item-text">发表于</span>
                  <time title="创建时间：2016-11-13 20:25:32" itemprop="dateCreated datePublished" datetime="2016-11-13T20:25:32+08:00">2016-11-13</time>
                </span>
                <span id="/3335.html" class="post-meta-item leancloud_visitors" data-flag-title="Python爬虫进阶六之多进程的用法" title="阅读次数">
                  <span class="post-meta-item-icon">
                    <i class="fa fa-eye"></i>
                  </span>
                  <span class="post-meta-item-text">阅读次数：</span>
                  <span class="leancloud-visitors-count"></span>
                </span>
                <span class="post-meta-item" title="本文字数">
                  <span class="post-meta-item-icon">
                    <i class="far fa-file-word"></i>
                  </span>
                  <span class="post-meta-item-text">本文字数：</span>
                  <span>15k</span>
                </span>
                <span class="post-meta-item" title="阅读时长">
                  <span class="post-meta-item-icon">
                    <i class="far fa-clock"></i>
                  </span>
                  <span class="post-meta-item-text">阅读时长 &asymp;</span>
                  <span>14 分钟</span>
                </span>
              </div>
            </article>
            <script>
              document.querySelectorAll('.random').forEach(item => item.src = "https://picsum.photos/id/" + Math.floor(Math.random() * Math.floor(300)) + "/200/133")

            </script>
            <nav class="pagination">
              <a class="extend prev" rel="prev" href="/page/21/"><i class="fa fa-angle-left" aria-label="上一页"></i></a><a class="page-number" href="/">1</a><span class="space">&hellip;</span><a class="page-number" href="/page/21/">21</a><span class="page-number current">22</span><a class="page-number" href="/page/23/">23</a><span class="space">&hellip;</span><a class="page-number" href="/page/31/">31</a><a class="extend next" rel="next" href="/page/23/"><i class="fa fa-angle-right" aria-label="下一页"></i></a>
            </nav>
          </div>
          <script>
            window.addEventListener('tabs:register', () =>
            {
              let
              {
                activeClass
              } = CONFIG.comments;
              if (CONFIG.comments.storage)
              {
                activeClass = localStorage.getItem('comments_active') || activeClass;
              }
              if (activeClass)
              {
                let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
                if (activeTab)
                {
                  activeTab.click();
                }
              }
            });
            if (CONFIG.comments.storage)
            {
              window.addEventListener('tabs:click', event =>
              {
                if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
                let commentClass = event.target.classList[1];
                localStorage.setItem('comments_active', commentClass);
              });
            }

          </script>
        </div>
        <div class="toggle sidebar-toggle">
          <span class="toggle-line toggle-line-first"></span>
          <span class="toggle-line toggle-line-middle"></span>
          <span class="toggle-line toggle-line-last"></span>
        </div>
        <aside class="sidebar">
          <div class="sidebar-inner">
            <ul class="sidebar-nav motion-element">
              <li class="sidebar-nav-toc"> 文章目录 </li>
              <li class="sidebar-nav-overview"> 站点概览 </li>
            </ul>
            <!--noindex-->
            <div class="post-toc-wrap sidebar-panel">
            </div>
            <!--/noindex-->
            <div class="site-overview-wrap sidebar-panel">
              <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
                <img class="site-author-image" itemprop="image" alt="崔庆才" src="/images/avatar.png">
                <p class="site-author-name" itemprop="name">崔庆才</p>
                <div class="site-description" itemprop="description">崔庆才的个人站点，记录生活的瞬间，分享学习的心得。</div>
              </div>
              <div class="site-state-wrap motion-element">
                <nav class="site-state">
                  <div class="site-state-item site-state-posts">
                    <a href="/archives/">
                      <span class="site-state-item-count">608</span>
                      <span class="site-state-item-name">日志</span>
                    </a>
                  </div>
                  <div class="site-state-item site-state-categories">
                    <a href="/categories/">
                      <span class="site-state-item-count">24</span>
                      <span class="site-state-item-name">分类</span></a>
                  </div>
                  <div class="site-state-item site-state-tags">
                    <a href="/tags/">
                      <span class="site-state-item-count">156</span>
                      <span class="site-state-item-name">标签</span></a>
                  </div>
                </nav>
              </div>
              <div class="links-of-author motion-element">
                <span class="links-of-author-item">
                  <a href="https://github.com/Germey" title="GitHub → https:&#x2F;&#x2F;github.com&#x2F;Germey" rel="noopener" target="_blank"><i class="fab fa-github fa-fw"></i>GitHub</a>
                </span>
                <span class="links-of-author-item">
                  <a href="mailto:cqc@cuiqingcai.com.com" title="邮件 → mailto:cqc@cuiqingcai.com.com" rel="noopener" target="_blank"><i class="fa fa-envelope fa-fw"></i>邮件</a>
                </span>
                <span class="links-of-author-item">
                  <a href="https://weibo.com/cuiqingcai" title="微博 → https:&#x2F;&#x2F;weibo.com&#x2F;cuiqingcai" rel="noopener" target="_blank"><i class="fab fa-weibo fa-fw"></i>微博</a>
                </span>
                <span class="links-of-author-item">
                  <a href="https://www.zhihu.com/people/Germey" title="知乎 → https:&#x2F;&#x2F;www.zhihu.com&#x2F;people&#x2F;Germey" rel="noopener" target="_blank"><i class="fa fa-magic fa-fw"></i>知乎</a>
                </span>
              </div>
            </div>
            <div style=" width: 100%;" class="sidebar-panel sidebar-panel-image sidebar-panel-active">
              <a href="https://tutorial.lengyue.video/?coupon=12ef4b1a-a3db-11ea-bb37-0242ac130002_cqx_850" target="_blank" rel="noopener">
                <img src="https://qiniu.cuiqingcai.com/bco2a.png" style=" width: 100%;">
              </a>
            </div>
            <div style=" width: 100%;" class="sidebar-panel sidebar-panel-image sidebar-panel-active">
              <a href="http://www.ipidea.net/?utm-source=cqc&utm-keyword=?cqc" target="_blank" rel="noopener">
                <img src="https://qiniu.cuiqingcai.com/0ywun.png" style=" width: 100%;">
              </a>
            </div>
            <div class="sidebar-panel sidebar-panel-tags sidebar-panel-active">
              <h4 class="name"> 标签云 </h4>
              <div class="content">
                <a href="/tags/2048/" style="font-size: 10px;">2048</a> <a href="/tags/API/" style="font-size: 10px;">API</a> <a href="/tags/Bootstrap/" style="font-size: 11.25px;">Bootstrap</a> <a href="/tags/CDN/" style="font-size: 10px;">CDN</a> <a href="/tags/CQC/" style="font-size: 10px;">CQC</a> <a href="/tags/CSS/" style="font-size: 10px;">CSS</a> <a href="/tags/CSS-%E5%8F%8D%E7%88%AC%E8%99%AB/" style="font-size: 10px;">CSS 反爬虫</a> <a href="/tags/CV/" style="font-size: 10px;">CV</a> <a href="/tags/Django/" style="font-size: 10px;">Django</a> <a href="/tags/Eclipse/" style="font-size: 11.25px;">Eclipse</a> <a href="/tags/FTP/" style="font-size: 10px;">FTP</a> <a href="/tags/Git/" style="font-size: 10px;">Git</a> <a href="/tags/GitHub/" style="font-size: 13.75px;">GitHub</a> <a href="/tags/HTML5/" style="font-size: 10px;">HTML5</a> <a href="/tags/Hexo/" style="font-size: 10px;">Hexo</a> <a href="/tags/IT/" style="font-size: 10px;">IT</a> <a href="/tags/JSP/" style="font-size: 10px;">JSP</a> <a href="/tags/JavaScript/" style="font-size: 10px;">JavaScript</a> <a href="/tags/K8s/" style="font-size: 10px;">K8s</a> <a href="/tags/LOGO/" style="font-size: 10px;">LOGO</a> <a href="/tags/Linux/" style="font-size: 10px;">Linux</a> <a href="/tags/MIUI/" style="font-size: 10px;">MIUI</a> <a href="/tags/MongoDB/" style="font-size: 10px;">MongoDB</a> <a href="/tags/Mysql/" style="font-size: 10px;">Mysql</a> <a href="/tags/NBA/" style="font-size: 10px;">NBA</a> <a href="/tags/PHP/" style="font-size: 11.25px;">PHP</a> <a href="/tags/PS/" style="font-size: 10px;">PS</a> <a href="/tags/Pathlib/" style="font-size: 10px;">Pathlib</a> <a href="/tags/PhantomJS/" style="font-size: 10px;">PhantomJS</a> <a href="/tags/Python/" style="font-size: 15px;">Python</a> <a href="/tags/Python3/" style="font-size: 12.5px;">Python3</a> <a href="/tags/Pythonic/" style="font-size: 10px;">Pythonic</a> <a href="/tags/QQ/" style="font-size: 10px;">QQ</a> <a href="/tags/Redis/" style="font-size: 10px;">Redis</a> <a href="/tags/SAE/" style="font-size: 10px;">SAE</a> <a href="/tags/SSH/" style="font-size: 10px;">SSH</a> <a href="/tags/SVG/" style="font-size: 10px;">SVG</a> <a href="/tags/Scrapy/" style="font-size: 10px;">Scrapy</a> <a href="/tags/Scrapy-redis/" style="font-size: 10px;">Scrapy-redis</a> <a href="/tags/Scrapy%E5%88%86%E5%B8%83%E5%BC%8F/" style="font-size: 10px;">Scrapy分布式</a> <a href="/tags/Selenium/" style="font-size: 10px;">Selenium</a> <a href="/tags/TKE/" style="font-size: 10px;">TKE</a> <a href="/tags/Ubuntu/" style="font-size: 11.25px;">Ubuntu</a> <a href="/tags/VS-Code/" style="font-size: 10px;">VS Code</a> <a href="/tags/Vs-Code/" style="font-size: 10px;">Vs Code</a> <a href="/tags/Vue/" style="font-size: 11.25px;">Vue</a> <a href="/tags/Webpack/" style="font-size: 10px;">Webpack</a> <a href="/tags/Windows/" style="font-size: 10px;">Windows</a> <a href="/tags/Winpcap/" style="font-size: 10px;">Winpcap</a> <a href="/tags/WordPress/" style="font-size: 13.75px;">WordPress</a> <a href="/tags/Youtube/" style="font-size: 11.25px;">Youtube</a> <a href="/tags/android/" style="font-size: 10px;">android</a> <a href="/tags/ansible/" style="font-size: 10px;">ansible</a> <a href="/tags/cocos2d-x/" style="font-size: 10px;">cocos2d-x</a> <a href="/tags/e6/" style="font-size: 10px;">e6</a> <a href="/tags/fitvids/" style="font-size: 10px;">fitvids</a> <a href="/tags/git/" style="font-size: 11.25px;">git</a> <a href="/tags/json/" style="font-size: 10px;">json</a> <a href="/tags/js%E9%80%86%E5%90%91/" style="font-size: 10px;">js逆向</a> <a href="/tags/kubernetes/" style="font-size: 10px;">kubernetes</a> <a href="/tags/log/" style="font-size: 10px;">log</a> <a href="/tags/logging/" style="font-size: 10px;">logging</a> <a href="/tags/matlab/" style="font-size: 11.25px;">matlab</a> <a href="/tags/python/" style="font-size: 20px;">python</a> <a href="/tags/pytube/" style="font-size: 11.25px;">pytube</a> <a href="/tags/pywin32/" style="font-size: 10px;">pywin32</a> <a href="/tags/style/" style="font-size: 10px;">style</a> <a href="/tags/tomcat/" style="font-size: 10px;">tomcat</a> <a href="/tags/ubuntu/" style="font-size: 10px;">ubuntu</a> <a href="/tags/uwsgi/" style="font-size: 10px;">uwsgi</a> <a href="/tags/vsftpd/" style="font-size: 10px;">vsftpd</a> <a href="/tags/wamp/" style="font-size: 10px;">wamp</a> <a href="/tags/wineQQ/" style="font-size: 10px;">wineQQ</a> <a href="/tags/%E4%B8%83%E7%89%9B/" style="font-size: 11.25px;">七牛</a> <a href="/tags/%E4%B8%8A%E6%B5%B7/" style="font-size: 10px;">上海</a> <a href="/tags/%E4%B8%AA%E4%BA%BA%E7%BD%91%E7%AB%99/" style="font-size: 10px;">个人网站</a> <a href="/tags/%E4%B8%BB%E9%A2%98/" style="font-size: 10px;">主题</a> <a href="/tags/%E4%BA%91%E4%BA%A7%E5%93%81/" style="font-size: 10px;">云产品</a> <a href="/tags/%E4%BA%91%E5%AD%98%E5%82%A8/" style="font-size: 10px;">云存储</a> <a href="/tags/%E4%BA%AC%E4%B8%9C%E4%BA%91/" style="font-size: 10px;">京东云</a> <a href="/tags/%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD/" style="font-size: 12.5px;">人工智能</a> <a href="/tags/%E4%BB%A3%E7%90%86/" style="font-size: 10px;">代理</a> <a href="/tags/%E4%BB%A3%E7%A0%81/" style="font-size: 10px;">代码</a> <a href="/tags/%E4%BB%A3%E7%A0%81%E5%88%86%E4%BA%AB%E5%9B%BE/" style="font-size: 10px;">代码分享图</a> <a href="/tags/%E4%BC%98%E5%8C%96/" style="font-size: 10px;">优化</a> <a href="/tags/%E4%BD%8D%E8%BF%90%E7%AE%97/" style="font-size: 10px;">位运算</a> <a href="/tags/%E5%85%AC%E4%BC%97%E5%8F%B7/" style="font-size: 10px;">公众号</a> <a href="/tags/%E5%88%86%E4%BA%AB/" style="font-size: 10px;">分享</a> <a href="/tags/%E5%88%86%E5%B8%83%E5%BC%8F/" style="font-size: 10px;">分布式</a> <a href="/tags/%E5%88%9B%E4%B8%9A/" style="font-size: 10px;">创业</a> <a href="/tags/%E5%89%8D%E7%AB%AF/" style="font-size: 12.5px;">前端</a> <a href="/tags/%E5%8D%9A%E5%AE%A2/" style="font-size: 10px;">博客</a> <a href="/tags/%E5%8E%9F%E7%94%9FAPP/" style="font-size: 10px;">原生APP</a> <a href="/tags/%E5%8F%8D%E7%88%AC%E8%99%AB/" style="font-size: 12.5px;">反爬虫</a> <a href="/tags/%E5%91%BD%E4%BB%A4/" style="font-size: 10px;">命令</a> <a href="/tags/%E5%93%8D%E5%BA%94%E5%BC%8F%E5%B8%83%E5%B1%80/" style="font-size: 10px;">响应式布局</a> <a href="/tags/%E5%9E%83%E5%9C%BE%E9%82%AE%E4%BB%B6/" style="font-size: 10px;">垃圾邮件</a> <a href="/tags/%E5%9F%9F%E5%90%8D%E7%BB%91%E5%AE%9A/" style="font-size: 10px;">域名绑定</a> <a href="/tags/%E5%A4%8D%E7%9B%98/" style="font-size: 10px;">复盘</a> <a href="/tags/%E5%A4%A7%E4%BC%97%E7%82%B9%E8%AF%84/" style="font-size: 10px;">大众点评</a> <a href="/tags/%E5%AD%97%E4%BD%93%E5%8F%8D%E7%88%AC%E8%99%AB/" style="font-size: 10px;">字体反爬虫</a> <a href="/tags/%E5%AD%97%E7%AC%A6%E9%97%AE%E9%A2%98/" style="font-size: 10px;">字符问题</a> <a href="/tags/%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/" style="font-size: 10px;">学习方法</a> <a href="/tags/%E5%AE%89%E5%8D%93/" style="font-size: 10px;">安卓</a> <a href="/tags/%E5%AE%9E%E7%94%A8/" style="font-size: 10px;">实用</a> <a href="/tags/%E5%B0%81%E9%9D%A2/" style="font-size: 10px;">封面</a> <a href="/tags/%E5%B4%94%E5%BA%86%E6%89%8D/" style="font-size: 18.75px;">崔庆才</a> <a href="/tags/%E5%B7%A5%E5%85%B7/" style="font-size: 12.5px;">工具</a> <a href="/tags/%E5%BC%80%E5%8F%91%E5%B7%A5%E5%85%B7/" style="font-size: 10px;">开发工具</a> <a href="/tags/%E5%BE%AE%E8%BD%AF/" style="font-size: 10px;">微软</a> <a href="/tags/%E6%80%9D%E8%80%83/" style="font-size: 10px;">思考</a> <a href="/tags/%E6%89%8B%E6%9C%BA%E8%AE%BF%E9%97%AE/" style="font-size: 10px;">手机访问</a> <a href="/tags/%E6%95%99%E7%A8%8B/" style="font-size: 10px;">教程</a> <a href="/tags/%E6%95%99%E8%82%B2/" style="font-size: 10px;">教育</a> <a href="/tags/%E6%96%B0%E4%B9%A6/" style="font-size: 12.5px;">新书</a> <a href="/tags/%E6%96%B9%E6%B3%95%E8%AE%BA/" style="font-size: 10px;">方法论</a> <a href="/tags/%E6%97%85%E6%B8%B8/" style="font-size: 10px;">旅游</a> <a href="/tags/%E6%97%A5%E5%BF%97/" style="font-size: 10px;">日志</a> <a href="/tags/%E6%9A%97%E6%97%B6%E9%97%B4/" style="font-size: 10px;">暗时间</a> <a href="/tags/%E6%9D%9C%E5%85%B0%E7%89%B9/" style="font-size: 11.25px;">杜兰特</a> <a href="/tags/%E6%A1%8C%E9%9D%A2/" style="font-size: 10px;">桌面</a> <a href="/tags/%E6%AD%8C%E5%8D%95/" style="font-size: 10px;">歌单</a> <a href="/tags/%E6%B1%9F%E5%8D%97/" style="font-size: 10px;">江南</a> <a href="/tags/%E6%B8%B8%E6%88%8F/" style="font-size: 10px;">游戏</a> <a href="/tags/%E7%84%A6%E8%99%91/" style="font-size: 10px;">焦虑</a> <a href="/tags/%E7%88%AC%E8%99%AB/" style="font-size: 16.25px;">爬虫</a> <a href="/tags/%E7%88%AC%E8%99%AB%E4%B9%A6%E7%B1%8D/" style="font-size: 11.25px;">爬虫书籍</a> <a href="/tags/%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F/" style="font-size: 10px;">环境变量</a> <a href="/tags/%E7%94%9F%E6%B4%BB%E7%AC%94%E8%AE%B0/" style="font-size: 10px;">生活笔记</a> <a href="/tags/%E7%99%BB%E5%BD%95/" style="font-size: 10px;">登录</a> <a href="/tags/%E7%9F%A5%E4%B9%8E/" style="font-size: 10px;">知乎</a> <a href="/tags/%E7%9F%AD%E4%BF%A1/" style="font-size: 10px;">短信</a> <a href="/tags/%E7%9F%AD%E4%BF%A1%E9%AA%8C%E8%AF%81%E7%A0%81/" style="font-size: 10px;">短信验证码</a> <a href="/tags/%E7%AC%94%E8%AE%B0%E8%BD%AF%E4%BB%B6/" style="font-size: 10px;">笔记软件</a> <a href="/tags/%E7%AF%AE%E7%BD%91/" style="font-size: 10px;">篮网</a> <a href="/tags/%E7%BA%B8%E5%BC%A0/" style="font-size: 10px;">纸张</a> <a href="/tags/%E7%BB%84%E4%BB%B6/" style="font-size: 10px;">组件</a> <a href="/tags/%E7%BD%91%E7%AB%99/" style="font-size: 10px;">网站</a> <a href="/tags/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB/" style="font-size: 11.25px;">网络爬虫</a> <a href="/tags/%E7%BE%8E%E5%AD%A6/" style="font-size: 10px;">美学</a> <a href="/tags/%E8%82%89%E5%A4%B9%E9%A6%8D/" style="font-size: 10px;">肉夹馍</a> <a href="/tags/%E8%85%BE%E8%AE%AF%E4%BA%91/" style="font-size: 10px;">腾讯云</a> <a href="/tags/%E8%87%AA%E5%BE%8B/" style="font-size: 10px;">自律</a> <a href="/tags/%E8%A5%BF%E5%B0%91%E7%88%B7/" style="font-size: 10px;">西少爷</a> <a href="/tags/%E8%A7%86%E9%A2%91/" style="font-size: 10px;">视频</a> <a href="/tags/%E8%B0%B7%E6%AD%8C%E9%AA%8C%E8%AF%81%E7%A0%81/" style="font-size: 10px;">谷歌验证码</a> <a href="/tags/%E8%BF%90%E8%90%A5/" style="font-size: 10px;">运营</a> <a href="/tags/%E8%BF%9C%E7%A8%8B/" style="font-size: 10px;">远程</a> <a href="/tags/%E9%80%86%E5%90%91/" style="font-size: 10px;">逆向</a> <a href="/tags/%E9%85%8D%E7%BD%AE/" style="font-size: 10px;">配置</a> <a href="/tags/%E9%87%8D%E8%A3%85/" style="font-size: 10px;">重装</a> <a href="/tags/%E9%98%BF%E6%9D%9C/" style="font-size: 10px;">阿杜</a> <a href="/tags/%E9%9D%99%E8%A7%85/" style="font-size: 17.5px;">静觅</a> <a href="/tags/%E9%A2%A0%E8%A6%86/" style="font-size: 10px;">颠覆</a> <a href="/tags/%E9%A3%9E%E4%BF%A1/" style="font-size: 10px;">飞信</a> <a href="/tags/%E9%B8%BF%E8%92%99/" style="font-size: 10px;">鸿蒙</a>
              </div>
              <script>
                const tagsColors = ['#00a67c', '#5cb85c', '#d9534f', '#567e95', '#b37333', '#f4843d', '#15a287']
                const tagsElements = document.querySelectorAll('.sidebar-panel-tags .content a')
                tagsElements.forEach((item) =>
                {
                  item.style.backgroundColor = tagsColors[Math.floor(Math.random() * tagsColors.length)]
                })

              </script>
            </div>
            <div class="sidebar-panel sidebar-panel-categories sidebar-panel-active">
              <h4 class="name"> 分类 </h4>
              <div class="content">
                <ul class="category-list">
                  <li class="category-list-item"><a class="category-list-link" href="/categories/C-C/">C/C++</a><span class="category-list-count">23</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/HTML/">HTML</a><span class="category-list-count">14</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Java/">Java</a><span class="category-list-count">5</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/JavaScript/">JavaScript</a><span class="category-list-count">26</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Linux/">Linux</a><span class="category-list-count">15</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Markdown/">Markdown</a><span class="category-list-count">1</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Net/">Net</a><span class="category-list-count">4</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Other/">Other</a><span class="category-list-count">39</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/PHP/">PHP</a><span class="category-list-count">27</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Paper/">Paper</a><span class="category-list-count">2</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/Python/">Python</a><span class="category-list-count">261</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/TypeScript/">TypeScript</a><span class="category-list-count">2</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E4%B8%AA%E4%BA%BA%E5%B1%95%E7%A4%BA/">个人展示</a><span class="category-list-count">1</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E4%B8%AA%E4%BA%BA%E6%97%A5%E8%AE%B0/">个人日记</a><span class="category-list-count">9</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E4%B8%AA%E4%BA%BA%E8%AE%B0%E5%BD%95/">个人记录</a><span class="category-list-count">4</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E4%B8%AA%E4%BA%BA%E9%9A%8F%E7%AC%94/">个人随笔</a><span class="category-list-count">15</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E5%AE%89%E8%A3%85%E9%85%8D%E7%BD%AE/">安装配置</a><span class="category-list-count">59</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E6%8A%80%E6%9C%AF%E6%9D%82%E8%B0%88/">技术杂谈</a><span class="category-list-count">88</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E6%9C%AA%E5%88%86%E7%B1%BB/">未分类</a><span class="category-list-count">1</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E7%94%9F%E6%B4%BB%E7%AC%94%E8%AE%B0/">生活笔记</a><span class="category-list-count">1</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E7%A6%8F%E5%88%A9%E4%B8%93%E5%8C%BA/">福利专区</a><span class="category-list-count">6</span></li>
                  <li class="category-list-item"><a class="category-list-link" href="/categories/%E8%81%8C%E4%BD%8D%E6%8E%A8%E8%8D%90/">职位推荐</a><span class="category-list-count">2</span></li>
                </ul>
              </div>
            </div>
            <div class="sidebar-panel sidebar-panel-friends sidebar-panel-active">
              <h4 class="name"> 友情链接 </h4>
              <ul class="friends">
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/j2dub.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.findhao.net/" target="_blank" rel="noopener">FindHao</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/ou6mm.jpg">
                  </span>
                  <span class="link">
                    <a href="https://diygod.me/" target="_blank" rel="noopener">DIYgod</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/6apxu.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.51dev.com/" target="_blank" rel="noopener">IT技术社区</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://www.jankl.com/img/titleshu.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.jankl.com/" target="_blank" rel="noopener">liberalist</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/bqlbs.png">
                  </span>
                  <span class="link">
                    <a href="http://www.urselect.com/" target="_blank" rel="noopener">优社电商</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/8s88c.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.yuanrenxue.com/" target="_blank" rel="noopener">猿人学</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/2wgg5.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.yunlifang.cn/" target="_blank" rel="noopener">云立方</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/shwr6.png">
                  </span>
                  <span class="link">
                    <a href="http://lanbing510.info/" target="_blank" rel="noopener">冰蓝</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/blvoh.jpg">
                  </span>
                  <span class="link">
                    <a href="https://lengyue.me/" target="_blank" rel="noopener">冷月</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="http://qianxunclub.com/favicon.png">
                  </span>
                  <span class="link">
                    <a href="http://qianxunclub.com/" target="_blank" rel="noopener">千寻啊千寻</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/0044u.jpg">
                  </span>
                  <span class="link">
                    <a href="http://kodcloud.com/" target="_blank" rel="noopener">可道云</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/ygnpn.jpg">
                  </span>
                  <span class="link">
                    <a href="http://www.kunkundashen.cn/" target="_blank" rel="noopener">坤坤大神</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/22uv1.png">
                  </span>
                  <span class="link">
                    <a href="http://www.cenchong.com/" target="_blank" rel="noopener">岑冲博客</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/ev9kl.png">
                  </span>
                  <span class="link">
                    <a href="http://www.zxiaoji.com/" target="_blank" rel="noopener">张小鸡</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://www.503error.com/favicon.ico">
                  </span>
                  <span class="link">
                    <a href="https://www.503error.com/" target="_blank" rel="noopener">张志明个人博客</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/x714o.jpg">
                  </span>
                  <span class="link">
                    <a href="http://www.hubwiz.com/" target="_blank" rel="noopener">汇智网</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/129d8.png">
                  </span>
                  <span class="link">
                    <a href="https://www.bysocket.com/" target="_blank" rel="noopener">泥瓦匠BYSocket</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://www.xiongge.club/favicon.ico">
                  </span>
                  <span class="link">
                    <a href="https://www.xiongge.club/" target="_blank" rel="noopener">熊哥club</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/3w4fe.png">
                  </span>
                  <span class="link">
                    <a href="https://zerlong.com/" target="_blank" rel="noopener">知语</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/44hxf.png">
                  </span>
                  <span class="link">
                    <a href="http://redstonewill.com/" target="_blank" rel="noopener">红色石头</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/8g1fk.jpg">
                  </span>
                  <span class="link">
                    <a href="http://www.laodong.me/" target="_blank" rel="noopener">老董博客</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/wkaus.jpg">
                  </span>
                  <span class="link">
                    <a href="https://zhaoshuai.me/" target="_blank" rel="noopener">碎念</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/pgo0r.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.chenwenguan.com/" target="_blank" rel="noopener">陈文管的博客</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/kk82a.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.lxlinux.net/" target="_blank" rel="noopener">良许Linux教程网</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/lj0t2.jpg">
                  </span>
                  <span class="link">
                    <a href="https://tanqingbo.cn/" target="_blank" rel="noopener">IT码农</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/i8cdr.png">
                  </span>
                  <span class="link">
                    <a href="https://junyiseo.com/" target="_blank" rel="noopener">均益个人博客</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/chwv2.png">
                  </span>
                  <span class="link">
                    <a href="https://brucedone.com/" target="_blank" rel="noopener">大鱼的鱼塘</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/2y43o.png">
                  </span>
                  <span class="link">
                    <a href="http://bbs.nightteam.cn/" target="_blank" rel="noopener">夜幕爬虫安全论坛</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/zvc3w.jpg">
                  </span>
                  <span class="link">
                    <a href="https://www.weishidong.com/" target="_blank" rel="noopener">韦世东的技术专栏</a>
                  </span>
                </li>
                <li class="friend">
                  <span class="logo">
                    <img src="https://qiniu.cuiqingcai.com/ebudy.jpg">
                  </span>
                  <span class="link">
                    <a href="https://chuanjiabing.com/" target="_blank" rel="noopener">穿甲兵技术社区</a>
                  </span>
                </li>
              </ul>
            </div>
          </div>
        </aside>
        <div id="sidebar-dimmer"></div>
      </div>
    </main>
    <footer class="footer">
      <div class="footer-inner">
        <div class="copyright"> &copy; <span itemprop="copyrightYear">2021</span>
          <span class="with-love">
            <i class="fa fa-heart"></i>
          </span>
          <span class="author" itemprop="copyrightHolder">崔庆才丨静觅</span>
          <span class="post-meta-divider">|</span>
          <span class="post-meta-item-icon">
            <i class="fa fa-chart-area"></i>
          </span>
          <span title="站点总字数">2.6m</span>
          <span class="post-meta-divider">|</span>
          <span class="post-meta-item-icon">
            <i class="fa fa-coffee"></i>
          </span>
          <span title="站点阅读时长">39:54</span>
        </div>
        <div class="powered-by">由 <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> & <a href="https://pisces.theme-next.org/" class="theme-link" rel="noopener" target="_blank">NexT.Pisces</a> 强力驱动 </div>
        <div class="beian"><a href="https://beian.miit.gov.cn/" rel="noopener" target="_blank">京ICP备18015597号-1 </a>
        </div>
        <script>
          (function ()
          {
            function leancloudSelector(url)
            {
              url = encodeURI(url);
              return document.getElementById(url).querySelector('.leancloud-visitors-count');
            }

            function addCount(Counter)
            {
              var visitors = document.querySelector('.leancloud_visitors');
              var url = decodeURI(visitors.id);
              var title = visitors.dataset.flagTitle;
              Counter('get', '/classes/Counter?where=' + encodeURIComponent(JSON.stringify(
              {
                url
              }))).then(response => response.json()).then((
              {
                results
              }) =>
              {
                if (results.length > 0)
                {
                  var counter = results[0];
                  leancloudSelector(url).innerText = counter.time + 1;
                  Counter('put', '/classes/Counter/' + counter.objectId,
                  {
                    time:
                    {
                      '__op': 'Increment',
                      'amount': 1
                    }
                  }).catch(error =>
                  {
                    console.error('Failed to save visitor count', error);
                  });
                }
                else
                {
                  Counter('post', '/classes/Counter',
                  {
                    title,
                    url,
                    time: 1
                  }).then(response => response.json()).then(() =>
                  {
                    leancloudSelector(url).innerText = 1;
                  }).catch(error =>
                  {
                    console.error('Failed to create', error);
                  });
                }
              }).catch(error =>
              {
                console.error('LeanCloud Counter Error', error);
              });
            }

            function showTime(Counter)
            {
              var visitors = document.querySelectorAll('.leancloud_visitors');
              var entries = [...visitors].map(element =>
              {
                return decodeURI(element.id);
              });
              Counter('get', '/classes/Counter?where=' + encodeURIComponent(JSON.stringify(
              {
                url:
                {
                  '$in': entries
                }
              }))).then(response => response.json()).then((
              {
                results
              }) =>
              {
                for (let url of entries)
                {
                  let target = results.find(item => item.url === url);
                  leancloudSelector(url).innerText = target ? target.time : 0;
                }
              }).catch(error =>
              {
                console.error('LeanCloud Counter Error', error);
              });
            }
            let
            {
              app_id,
              app_key,
              server_url
            } = {
              "enable": true,
              "app_id": "6X5dRQ0pnPWJgYy8SXOg0uID-gzGzoHsz",
              "app_key": "ziLDVEy73ne5HtFTiGstzHMS",
              "server_url": "https://6x5drq0p.lc-cn-n1-shared.com",
              "security": false
            };

            function fetchData(api_server)
            {
              var Counter = (method, url, data) =>
              {
                return fetch(`${api_server}/1.1${url}`,
                {
                  method,
                  headers:
                  {
                    'X-LC-Id': app_id,
                    'X-LC-Key': app_key,
                    'Content-Type': 'application/json',
                  },
                  body: JSON.stringify(data)
                });
              };
              if (CONFIG.page.isPost)
              {
                if (CONFIG.hostname !== location.hostname) return;
                addCount(Counter);
              }
              else if (document.querySelectorAll('.post-title-link').length >= 1)
              {
                showTime(Counter);
              }
            }
            let api_server = app_id.slice(-9) !== '-MdYXbMMI' ? server_url : `https://${app_id.slice(0, 8).toLowerCase()}.api.lncldglobal.com`;
            if (api_server)
            {
              fetchData(api_server);
            }
            else
            {
              fetch('https://app-router.leancloud.cn/2/route?appId=' + app_id).then(response => response.json()).then((
              {
                api_server
              }) =>
              {
                fetchData('https://' + api_server);
              });
            }
          })();

        </script>
      </div>
      <div class="footer-stat">
        <span id="cnzz_stat_icon_1279355174"></span>
        <script type="text/javascript">
          document.write(unescape("%3Cspan id='cnzz_stat_icon_1279355174'%3E%3C/span%3E%3Cscript src='https://v1.cnzz.com/z_stat.php%3Fid%3D1279355174%26online%3D1%26show%3Dline' type='text/javascript'%3E%3C/script%3E"));

        </script>
      </div>
    </footer>
  </div>
  <script src="//cdn.jsdelivr.net/npm/animejs@3.2.1/lib/anime.min.js"></script>
  <script src="//cdn.jsdelivr.net/npm/pangu@4/dist/browser/pangu.min.js"></script>
  <script src="/js/utils.js"></script>
  <script src="/.js"></script>
  <script src="/js/schemes/pisces.js"></script>
  <script src="/.js"></script>
  <script src="/js/next-boot.js"></script>
  <script src="/.js"></script>
  <script>
    (function ()
    {
      var canonicalURL, curProtocol;
      //Get the <link> tag
      var x = document.getElementsByTagName("link");
      //Find the last canonical URL
      if (x.length > 0)
      {
        for (i = 0; i < x.length; i++)
        {
          if (x[i].rel.toLowerCase() == 'canonical' && x[i].href)
          {
            canonicalURL = x[i].href;
          }
        }
      }
      //Get protocol
      if (!canonicalURL)
      {
        curProtocol = window.location.protocol.split(':')[0];
      }
      else
      {
        curProtocol = canonicalURL.split(':')[0];
      }
      //Get current URL if the canonical URL does not exist
      if (!canonicalURL) canonicalURL = window.location.href;
      //Assign script content. Replace current URL with the canonical URL
      ! function ()
      {
        var e = /([http|https]:\/\/[a-zA-Z0-9\_\.]+\.baidu\.com)/gi,
          r = canonicalURL,
          t = document.referrer;
        if (!e.test(r))
        {
          var n = (String(curProtocol).toLowerCase() === 'https') ? "https://sp0.baidu.com/9_Q4simg2RQJ8t7jm9iCKT-xh_/s.gif" : "//api.share.baidu.com/s.gif";
          t ? (n += "?r=" + encodeURIComponent(document.referrer), r && (n += "&l=" + r)) : r && (n += "?l=" + r);
          var i = new Image;
          i.src = n
        }
      }(window);
    })();

  </script>
  <script src="/js/local-search.js"></script>
  <script src="/.js"></script>
</body>

</html>
