{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "#PRD1.数据价值宣言\n",
    "#随着互联网逐渐渗透着人们的生活，大家开始把越来越多的事情搬到电脑或者手机上来做，手机就是其中之一。\n",
    "#手机由于方便，越来越受到大家欢迎。现在市场上售卖手机的网站很多，今天对“当当网手机销量”进行一些简单的分析。\n",
    "#当当网作为在手机市场的巨头，积累了大量的手机市场渠道资源以及用户资源，现在当当正不断加大对手机售卖市场的投入，\n",
    "#以巩固其在手机市场上的地位。通过对当当网手机售卖情况的分析，我们可以很好的查找出如今市面上哪款手机的销量更加好，\n",
    "#研究销量好的手机有什么鲜明的特点，可以供手机开发商进行研究分析，帮助手机开放商公司研究手机的新型用户定位。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#PED2.MVP的数据加值\n",
    "#随着移动互联网渗透的加速及运营商5G推广的深入，智能5G手机在用户种所占的比例也越来越大。同时，具有GPS功能，\n",
    "#高像素拍照的手机用户关注度也将更加集中。\n",
    "#如今，手机产品已经进入“大屏时代”，大屏，触摸成为主流，2019年第一季度上市的新品种，大屏，触摸的产品数量剧增，用户关注比例也水涨船高。\n",
    "#因此，对于手机开发商来说，抓住用户喜好主流，研究主要的发展方向“高像素，多内存，大屏幕”为主要的发展主流以及当下的手机市场定位，\n",
    "#该数据将为手机研发公司创造无限的商业价值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 数据如何解决问题\n",
    "#参数设计：一共设计了2个参数。pg参数和srsort_sale_amt_desc参数\n",
    " #过对链接分析发现pg参数可以令所挖页面进行翻页处理，通过循环遍历一共爬了17页数据。\n",
    "#一共有4类数据。分别是店铺名称，价钱，评论人数以及链接。\n",
    "#数据处理加值\n",
    "#通过对链接分析发现srsort_sale_amt_desc参数可以令数据关于销量进行从大到小进行排列，从而得到我所需要的数据。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基本模块\n",
    "import pandas as pd\n",
    "from requests_html import HTMLSession"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "New Scrapy project 'autopjt', using template directory 'c:\\users\\administrator\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\scrapy\\templates\\project', created in:\n",
      "    C:\\TBB\\autopjt\n",
      "\n",
      "You can start your first spider with:\n",
      "    cd autopjt\n",
      "    scrapy genspider example example.com\n"
     ]
    }
   ],
   "source": [
    "#  scrapy startproject 项目\n",
    "! scrapy startproject autopjt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "文件夹 PATH 列表\n",
      "卷序列号为 629E-86BB\n",
      "C:.\n",
      "│  20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓鐖�铏�.ipynb\n",
      "│  AutospdSpider.py\n",
      "│  guangzhouta.jpg\n",
      "│  LiePinWang.py\n",
      "│  Twisted-20.3.0-cp38-cp38-win_amd64.whl\n",
      "│  Untitled.ipynb\n",
      "│  Untitled1.ipynb\n",
      "│  Untitled2.ipynb\n",
      "│  当当手机销量排行榜.xlsx\n",
      "│  \n",
      "├─.ipynb_checkpoints\n",
      "│      20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓鐖�铏�-checkpoint.ipynb\n",
      "│      Untitled-checkpoint.ipynb\n",
      "│      Untitled1-checkpoint.ipynb\n",
      "│      Untitled2-checkpoint.ipynb\n",
      "│      \n",
      "├─20鏄�_Web_Mining_week15\n",
      "│  │  20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓椤圭洰.ipynb\n",
      "│  │  \n",
      "│  ├─.ipynb_checkpoints\n",
      "│  │      20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓椤圭洰-checkpoint.ipynb\n",
      "│  │      \n",
      "│  ├─autopjt\n",
      "│  │  │  scrapy.cfg\n",
      "│  │  │  \n",
      "│  │  └─autopjt\n",
      "│  │      │  items.py\n",
      "│  │      │  middlewares.py\n",
      "│  │      │  pipelines.py\n",
      "│  │      │  settings.py\n",
      "│  │      │  __init__.py\n",
      "│  │      │  当当手机销量排行榜.xlsx\n",
      "│  │      │  \n",
      "│  │      ├─spiders\n",
      "│  │      │  │  AutospdSpider.py\n",
      "│  │      │  │  __init__.py\n",
      "│  │      │  │  \n",
      "│  │      │  └─__pycache__\n",
      "│  │      │          autopjt.cpython-38.pyc\n",
      "│  │      │          autospd.cpython-38.pyc\n",
      "│  │      │          AutospdSpider.cpython-38.pyc\n",
      "│  │      │          doubanmovie.cpython-38.pyc\n",
      "│  │      │          run.cpython-38.pyc\n",
      "│  │      │          spider.cpython-38.pyc\n",
      "│  │      │          __init__.cpython-38.pyc\n",
      "│  │      │          \n",
      "│  │      └─__pycache__\n",
      "│  │              items.cpython-38.pyc\n",
      "│  │              pipelines.cpython-38.pyc\n",
      "│  │              settings.cpython-38.pyc\n",
      "│  │              __init__.cpython-38.pyc\n",
      "│  │              \n",
      "│  ├─wxapp\n",
      "│  │  │  20鏄�_Web_Mining_week15_Scrapinghub.ipynb\n",
      "│  │  │  item.png\n",
      "│  │  │  scrapinghub.png\n",
      "│  │  │  scrapinghub.yml\n",
      "│  │  │  scrapy.cfg\n",
      "│  │  │  scrapy_cloud.png\n",
      "│  │  │  Scrapy妗嗘灦鍥_png\n",
      "│  │  │  setup.py\n",
      "│  │  │  start.py\n",
      "│  │  │  wxapp.json\n",
      "│  │  │  \n",
      "│  │  ├─.ipynb_checkpoints\n",
      "│  │  │      20鏄�_Web_Mining_week15_Scrapinghub-checkpoint.ipynb\n",
      "│  │  │      \n",
      "│  │  ├─build\n",
      "│  │  │  ├─bdist.win-amd64\n",
      "│  │  │  └─lib\n",
      "│  │  │      └─wxapp\n",
      "│  │  │          │  items.py\n",
      "│  │  │          │  middlewares.py\n",
      "│  │  │          │  pipelines.py\n",
      "│  │  │          │  settings.py\n",
      "│  │  │          │  __init__.py\n",
      "│  │  │          │  \n",
      "│  │  │          └─spiders\n",
      "│  │  │                  wxapp_spider.py\n",
      "│  │  │                  __init__.py\n",
      "│  │  │                  \n",
      "│  │  ├─project.egg-info\n",
      "│  │  │      dependency_links.txt\n",
      "│  │  │      entry_points.txt\n",
      "│  │  │      PKG-INFO\n",
      "│  │  │      SOURCES.txt\n",
      "│  │  │      top_level.txt\n",
      "│  │  │      \n",
      "│  │  └─wxapp\n",
      "│  │      │  items.py\n",
      "│  │      │  middlewares.py\n",
      "│  │      │  pipelines.py\n",
      "│  │      │  settings.py\n",
      "│  │      │  __init__.py\n",
      "│  │      │  \n",
      "│  │      ├─spiders\n",
      "│  │      │  │  wxapp_spider.py\n",
      "│  │      │  │  __init__.py\n",
      "│  │      │  │  \n",
      "│  │      │  └─__pycache__\n",
      "│  │      │          wxapp_spider.cpython-37.pyc\n",
      "│  │      │          wxapp_spider.cpython-38.pyc\n",
      "│  │      │          __init__.cpython-37.pyc\n",
      "│  │      │          __init__.cpython-38.pyc\n",
      "│  │      │          \n",
      "│  │      └─__pycache__\n",
      "│  │              items.cpython-37.pyc\n",
      "│  │              items.cpython-38.pyc\n",
      "│  │              pipelines.cpython-37.pyc\n",
      "│  │              pipelines.cpython-38.pyc\n",
      "│  │              settings.cpython-37.pyc\n",
      "│  │              settings.cpython-38.pyc\n",
      "│  │              __init__.cpython-37.pyc\n",
      "│  │              __init__.cpython-38.pyc\n",
      "│  │              \n",
      "│  └─xinshidai\n",
      "│      │  20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓鐖�铏�.ipynb\n",
      "│      │  article.json\n",
      "│      │  scrapy.cfg\n",
      "│      │  \n",
      "│      ├─.ipynb_checkpoints\n",
      "│      │      20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓鐖�铏�-checkpoint.ipynb\n",
      "│      │      \n",
      "│      └─xinshidai\n",
      "│          │  article.json\n",
      "│          │  items.py\n",
      "│          │  middlewares.py\n",
      "│          │  pipelines.py\n",
      "│          │  settings.py\n",
      "│          │  __init__.py\n",
      "│          │  \n",
      "│          ├─spiders\n",
      "│          │  │  TheoryPeopleSpider.py\n",
      "│          │  │  __init__.py\n",
      "│          │  │  \n",
      "│          │  └─__pycache__\n",
      "│          │          BingImageSpider.cpython-37.pyc\n",
      "│          │          TheoryPeopleSpider.cpython-37.pyc\n",
      "│          │          TheoryPeopleSpider.cpython-38.pyc\n",
      "│          │          __init__.cpython-37.pyc\n",
      "│          │          __init__.cpython-38.pyc\n",
      "│          │          \n",
      "│          └─__pycache__\n",
      "│                  items.cpython-37.pyc\n",
      "│                  items.cpython-38.pyc\n",
      "│                  pipelines.cpython-37.pyc\n",
      "│                  pipelines.cpython-38.pyc\n",
      "│                  settings.cpython-37.pyc\n",
      "│                  settings.cpython-38.pyc\n",
      "│                  __init__.cpython-37.pyc\n",
      "│                  __init__.cpython-38.pyc\n",
      "│                  \n",
      "├─autopjt\n",
      "│  │  scrapy.cfg\n",
      "│  │  \n",
      "│  └─autopjt\n",
      "│      │  items.py\n",
      "│      │  middlewares.py\n",
      "│      │  pipelines.py\n",
      "│      │  settings.py\n",
      "│      │  __init__.py\n",
      "│      │  \n",
      "│      ├─spiders\n",
      "│      │  │  __init__.py\n",
      "│      │  │  \n",
      "│      │  └─__pycache__\n",
      "│      └─__pycache__\n",
      "├─LiePinWang\n",
      "│  │  scrapy.cfg\n",
      "│  │  Untitled1.ipynb\n",
      "│  │  新能源就业研究黄嘉雯(2).ipynb\n",
      "│  │  \n",
      "│  ├─.ipynb_checkpoints\n",
      "│  │      新能源就业研究黄嘉雯(2)-checkpoint.ipynb\n",
      "│  │      \n",
      "│  ├─20春_Web_Mining_week14\n",
      "│  │  ├─.ipynb_checkpoints\n",
      "│  │  └─bing_img\n",
      "│  │      │  20200609各个模块+Scrapy图.rar\n",
      "│  │      │  20春_Web_Mining_week14_Scrapy.ipynb\n",
      "│  │      │  scrapy.cfg\n",
      "│  │      │  Scrapy框架图.png\n",
      "│  │      │  各个模块的协作流程.png\n",
      "│  │      │  \n",
      "│  │      ├─.ipynb_checkpoints\n",
      "│  │      │      20春_Web_Mining_week13_Scrapy-checkpoint.ipynb\n",
      "│  │      │      20春_Web_Mining_week14_Scrapy-checkpoint.ipynb\n",
      "│  │      │      20鏄�_Web鏁版嵁鎸栨帢_week13_Scrapy-checkpoint.ipynb\n",
      "│  │      │      \n",
      "│  │      ├─20200609各个模块+Scrapy图\n",
      "│  │      │  ├─Scrapy框架图\n",
      "│  │      │  │      Scrapy框架图.jpg\n",
      "│  │      │  │      Scrapy框架图.png\n",
      "│  │      │  │      Scrapy框架图.pos\n",
      "│  │      │  │      \n",
      "│  │      │  └─各个模块的协作流程\n",
      "│  │      │          各个模块的协作流程.jpg\n",
      "│  │      │          各个模块的协作流程.png\n",
      "│  │      │          各个模块的协作流程.pos\n",
      "│  │      │          \n",
      "│  │      ├─bing_img\n",
      "│  │      │  │  items.py\n",
      "│  │      │  │  middlewares.py\n",
      "│  │      │  │  pipelines.py\n",
      "│  │      │  │  settings.py\n",
      "│  │      │  │  __init__.py\n",
      "│  │      │  │  \n",
      "│  │      │  ├─spiders\n",
      "│  │      │  │  │  BingImageSpider.py\n",
      "│  │      │  │  │  __init__.py\n",
      "│  │      │  │  │  \n",
      "│  │      │  │  ├─.ipynb_checkpoints\n",
      "│  │      │  │  └─__pycache__\n",
      "│  │      │  │          BingImageSpider.cpython-37.pyc\n",
      "│  │      │  │          BingWallpaper.cpython-37.pyc\n",
      "│  │      │  │          __init__.cpython-37.pyc\n",
      "│  │      │  │          \n",
      "│  │      │  └─__pycache__\n",
      "│  │      │          items.cpython-37.pyc\n",
      "│  │      │          pipelines.cpython-37.pyc\n",
      "│  │      │          settings.cpython-37.pyc\n",
      "│  │      │          __init__.cpython-37.pyc\n",
      "│  │      │          \n",
      "│  │      └─dld\n",
      "│  │          └─full\n",
      "│  │                  1b88779e44fefb86da815efe37446176b0057924.jpg\n",
      "│  │                  39969f3a591aa446b2ef1c6ef54ef19a2467fd77.jpg\n",
      "│  │                  6cf13d61b0559d36e65e2afed16b4854f257def7.jpg\n",
      "│  │                  71c7bc7fa15751f9e12100b49c6f85961cd716fe.jpg\n",
      "│  │                  b2904b0a506d1e89ea0b6bf96b6bc691582429f9.jpg\n",
      "│  │                  d5e03c58ad847a2e4e0fc76450be575543694eaa.jpg\n",
      "│  │                  e7975208c36829369b436bde04fbd18a4b570653.jpg\n",
      "│  │                  f25601f490973c4e728a81ff83c4e6464de1580d.jpg\n",
      "│  │                  \n",
      "│  └─LiePinWang\n",
      "│      │  items.py\n",
      "│      │  middlewares.py\n",
      "│      │  pipelines.py\n",
      "│      │  settings.py\n",
      "│      │  __init__.py\n",
      "│      │  \n",
      "│      ├─spiders\n",
      "│      │  │  Liepin.py\n",
      "│      │  │  __init__.py\n",
      "│      │  │  \n",
      "│      │  └─__pycache__\n",
      "│      │          Liepin.cpython-38.pyc\n",
      "│      │          __init__.cpython-38.pyc\n",
      "│      │          \n",
      "│      └─__pycache__\n",
      "│              items.cpython-38.pyc\n",
      "│              pipelines.cpython-38.pyc\n",
      "│              settings.cpython-38.pyc\n",
      "│              __init__.cpython-38.pyc\n",
      "│              \n",
      "└─__pycache__\n",
      "        LiePinWang.cpython-38.pyc\n",
      "        \n"
     ]
    }
   ],
   "source": [
    "#   tree/f 查看项目目录结构\n",
    "! tree/f"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 驱动器 C 中的卷没有标签。\n",
      " 卷的序列号是 629E-86BB\n",
      "\n",
      " C:\\TBB 的目录\n",
      "\n",
      "2020/07/19  16:36    <DIR>          .\n",
      "2020/07/19  16:36    <DIR>          ..\n",
      "2020/07/19  16:05    <DIR>          .ipynb_checkpoints\n",
      "2020/07/19  16:36    <DIR>          20鏄�_Web_Mining_week15\n",
      "2020/07/08  22:11           596,860 20鏄�_Web鏁版嵁鎸栨帢_week15_xinshidai_鍒涘缓鐖�铏�.ipynb\n",
      "2020/07/19  16:36    <DIR>          autopjt\n",
      "2020/07/19  16:27               211 AutospdSpider.py\n",
      "2020/07/15  15:53            68,113 guangzhouta.jpg\n",
      "2020/07/19  16:02    <DIR>          LiePinWang\n",
      "2020/07/07  12:20               261 LiePinWang.py\n",
      "2020/07/06  22:55         3,098,601 Twisted-20.3.0-cp38-cp38-win_amd64.whl\n",
      "2020/07/08  21:59            14,598 Untitled.ipynb\n",
      "2020/07/17  23:04            21,464 Untitled1.ipynb\n",
      "2020/07/19  16:35            15,276 Untitled2.ipynb\n",
      "2020/07/19  15:34    <DIR>          __pycache__\n",
      "2020/07/10  17:10            60,192 当当手机销量排行榜.xlsx\n",
      "               9 个文件      3,875,576 字节\n",
      "               7 个目录 61,406,519,296 可用字节\n"
     ]
    }
   ],
   "source": [
    "#   tree/f 查看项目目录结构\n",
    "! dir"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Created spider 'AutospdSpider' using template 'basic' \n"
     ]
    }
   ],
   "source": [
    "! scrapy genspider AutospdSpider \"dangdang.com\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#   AutospdSpider.py移到 autopjt/autopjt/spiders\n",
    "!cd autopjt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Scrapy 2.2.0 - no active project\n",
      "\n",
      "Usage:\n",
      "  scrapy <command> [options] [args]\n",
      "\n",
      "Available commands:\n",
      "  bench         Run quick benchmark test\n",
      "  commands      \n",
      "  fetch         Fetch a URL using the Scrapy downloader\n",
      "  genspider     Generate new spider using pre-defined templates\n",
      "  runspider     Run a self-contained spider (without creating a project)\n",
      "  settings      Get settings values\n",
      "  shell         Interactive scraping console\n",
      "  startproject  Create new project\n",
      "  version       Print Scrapy version\n",
      "  view          Open URL in browser, as seen by Scrapy\n",
      "\n",
      "  [ more ]      More commands available when run from project directory\n",
      "\n",
      "Use \"scrapy <command> -h\" to see more info about a command\n"
     ]
    }
   ],
   "source": [
    "! scrapy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "! cd autopjt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Scrapy 2.2.0 - no active project\n",
      "\n",
      "Unknown command: crawl\n",
      "\n",
      "Use \"scrapy\" to see available commands\n"
     ]
    }
   ],
   "source": [
    "# E1 scrapy crawl 爬虫\n",
    "! scrapy crawl autospd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# E2 定义 items.py\n",
    "import scrapy\n",
    "\n",
    "\n",
    "class AutopjtItem(scrapy.Item):\n",
    "    # 定义好name用来存储商品名\n",
    "    name = scrapy.Field()\n",
    "    # 定义好price用来存储商品价格\n",
    "    price = scrapy.Field()\n",
    "    # 定义好comnum用来存储商品评论数\n",
    "    comnum = scrapy.Field()\n",
    "    # 定义好link用来存储商品链接\n",
    "    link = scrapy.Field()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# E3 定义 pipelines.py\n",
    "from openpyxl import Workbook\n",
    "class AutopjtPipeline(object):\n",
    "    def __init__(self):\n",
    "        self.wb = Workbook()\n",
    "        self.ws = self.wb.active\n",
    "        self.ws.append(['店铺名称','价钱','评论人数','链接',])\n",
    "    def process_item(self, item, spider):\n",
    "        # 每一页中包含多个商品信息，所以可以通过循环，每一次处理一个商品\n",
    "        # 其中len(item[\"name\"])为当前页中商品的总数，依次遍历\n",
    "        for j in range(0, len(item[\"name\"])):\n",
    "            # 将当前页的第j个商品的名称赋值给变量name\n",
    "            name = item[\"name\"][j]\n",
    "            price = item[\"price\"][j]\n",
    "            comnum = item[\"comnum\"][j]\n",
    "            link = item[\"link\"][j]\n",
    "            line = [name,price,comnum,link]\n",
    "            self.ws.append(line)  # 将数据以行的形式添加到xlsx中\n",
    "            self.wb.save('当当手机销量排行榜.xlsx') \n",
    "            # 返回item\n",
    "        return item\n",
    "\n",
    "    def close_spider(self, spider):\n",
    "        self.file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# E4 定义 settings.py下的 AutopjtPipeline\n",
    "BOT_NAME = 'autopjt'\n",
    "\n",
    "SPIDER_MODULES = ['autopjt.spiders']\n",
    "NEWSPIDER_MODULE = 'autopjt.spiders'\n",
    "\n",
    "# Obey robots.txt rules\n",
    "ROBOTSTXT_OBEY = True\n",
    "\n",
    "# Disable cookies (enabled by default)\n",
    "COOKIES_ENABLED = False\n",
    "\n",
    "# Configure item pipelines\n",
    "# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n",
    "ITEM_PIPELINES = {\n",
    "    'autopjt.pipelines.AutopjtPipeline': 300,\n",
    "}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# E5 定义 Spider代码\n",
    "import scrapy\n",
    "from autopjt.items import AutopjtItem\n",
    "from scrapy.http import Request\n",
    "\n",
    "\n",
    "class AutospdSpider(scrapy.Spider):\n",
    "    name = \"autospd\"\n",
    "    allowed_domains = [\"dangdang.com\"]\n",
    "    start_urls = (\n",
    "        'http://category.dangdang.com/pg1-cid4004279-srsort_sale_amt_desc.html',\n",
    "    )\n",
    "\n",
    "    def parse(self, response):\n",
    "        item = AutopjtItem()\n",
    "        # 通过各Xpath表达式分别提取商品的名称、价格、链接、评论数等信息\n",
    "        item[\"name\"] = response.xpath(\"//a[@class='pic']/@title\").extract()\n",
    "        item[\"price\"] = response.xpath(\"//span[@class='price_n']/text()\").extract()\n",
    "        item[\"link\"] = response.xpath(\"//a[@class='pic']/@href\").extract()\n",
    "        item[\"comnum\"] = response.xpath(\"//a[@name='itemlist-review']/text()\").extract()\n",
    "        # 提取完后返回item\n",
    "        yield item\n",
    "        # 接下来很关键，通过循环自动爬取17页的数据\n",
    "        for i in range(2, 17):\n",
    "            # 通过上面总结的网址格式构造要爬取的网址\n",
    "            url = \"http://category.dangdang.com/pg\" + str(i) + \"-cid4004279-srsort_sale_amt_desc.html\"\n",
    "            # 通过yield返回Request，并指定要爬取的网址和回调函数\n",
    "            # 实现自动爬取\n",
    "            yield Request(url, callback=self.parse)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# E6 scrapy crawl 爬虫\n",
    "! scrapy crawl autospd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>店铺名称</th>\n",
       "      <th>价钱</th>\n",
       "      <th>评论人数</th>\n",
       "      <th>链接</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>【当当自营】Apple 苹果 iPhone 11 苹果2019年新品 全网通手机【可用当当礼卡】</td>\n",
       "      <td>¥4548</td>\n",
       "      <td>413条评论</td>\n",
       "      <td>http://product.dangdang.com/1509704171.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>全店支持当当卡【当当自营】Huawei/华为 Mate30 (5G)麒麟990超感光徕卡三...</td>\n",
       "      <td>¥3899</td>\n",
       "      <td>531条评论</td>\n",
       "      <td>http://product.dangdang.com/1557914810.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>全店支持当当卡【当当自营】Huawei/华为 Mate30 Pro (5G)麒麟990徕卡...</td>\n",
       "      <td>¥5099</td>\n",
       "      <td>446条评论</td>\n",
       "      <td>http://product.dangdang.com/1557886030.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>全店支持当当卡【当当自营】Huawei/华为nova 5 Pro超级夜景4800万AI四摄...</td>\n",
       "      <td>¥2199</td>\n",
       "      <td>351条评论</td>\n",
       "      <td>http://product.dangdang.com/1507241300.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>【当当自营】Apple 苹果 iPhone 11 Pro Max 苹果2019年新品 全网...</td>\n",
       "      <td>¥7488</td>\n",
       "      <td>162条评论</td>\n",
       "      <td>http://product.dangdang.com/1509704441.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>763</th>\n",
       "      <td>【当当自营】华为 畅享10 Plus 全网通8GB+128GB 天空之境 移动联通电信4G...</td>\n",
       "      <td>¥2099</td>\n",
       "      <td>0条评论</td>\n",
       "      <td>http://product.dangdang.com/1574623914.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>764</th>\n",
       "      <td>【当当自营】华为P30 天空之境 6GB+128GB 徕卡三摄 未来影像 移动联通电信4G...</td>\n",
       "      <td>¥3998</td>\n",
       "      <td>0条评论</td>\n",
       "      <td>http://product.dangdang.com/1510433395.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>765</th>\n",
       "      <td>【当当自营】Apple iPhone 11 (A2223) 128GB 绿色 移动联通电信...</td>\n",
       "      <td>¥5399</td>\n",
       "      <td>11条评论</td>\n",
       "      <td>http://product.dangdang.com/61887264.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>766</th>\n",
       "      <td>小米 Redmi 红米K30 Pro 5G 手机</td>\n",
       "      <td>¥2456</td>\n",
       "      <td>25条评论</td>\n",
       "      <td>http://product.dangdang.com/1611905955.html</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>767</th>\n",
       "      <td>华为（HUAWEI） mate20pro手机</td>\n",
       "      <td>¥3088</td>\n",
       "      <td>0条评论</td>\n",
       "      <td>http://product.dangdang.com/1611893915.html</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>768 rows × 4 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                  店铺名称     价钱    评论人数  \\\n",
       "0     【当当自营】Apple 苹果 iPhone 11 苹果2019年新品 全网通手机【可用当当礼卡】  ¥4548  413条评论   \n",
       "1     全店支持当当卡【当当自营】Huawei/华为 Mate30 (5G)麒麟990超感光徕卡三...  ¥3899  531条评论   \n",
       "2     全店支持当当卡【当当自营】Huawei/华为 Mate30 Pro (5G)麒麟990徕卡...  ¥5099  446条评论   \n",
       "3     全店支持当当卡【当当自营】Huawei/华为nova 5 Pro超级夜景4800万AI四摄...  ¥2199  351条评论   \n",
       "4     【当当自营】Apple 苹果 iPhone 11 Pro Max 苹果2019年新品 全网...  ¥7488  162条评论   \n",
       "..                                                 ...    ...     ...   \n",
       "763   【当当自营】华为 畅享10 Plus 全网通8GB+128GB 天空之境 移动联通电信4G...  ¥2099    0条评论   \n",
       "764   【当当自营】华为P30 天空之境 6GB+128GB 徕卡三摄 未来影像 移动联通电信4G...  ¥3998    0条评论   \n",
       "765   【当当自营】Apple iPhone 11 (A2223) 128GB 绿色 移动联通电信...  ¥5399   11条评论   \n",
       "766                           小米 Redmi 红米K30 Pro 5G 手机  ¥2456   25条评论   \n",
       "767                             华为（HUAWEI） mate20pro手机  ¥3088    0条评论   \n",
       "\n",
       "                                              链接  \n",
       "0    http://product.dangdang.com/1509704171.html  \n",
       "1    http://product.dangdang.com/1557914810.html  \n",
       "2    http://product.dangdang.com/1557886030.html  \n",
       "3    http://product.dangdang.com/1507241300.html  \n",
       "4    http://product.dangdang.com/1509704441.html  \n",
       "..                                           ...  \n",
       "763  http://product.dangdang.com/1574623914.html  \n",
       "764  http://product.dangdang.com/1510433395.html  \n",
       "765    http://product.dangdang.com/61887264.html  \n",
       "766  http://product.dangdang.com/1611905955.html  \n",
       "767  http://product.dangdang.com/1611893915.html  \n",
       "\n",
       "[768 rows x 4 columns]"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_excel('当当手机销量排行榜.xlsx')\n",
    "df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
