{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "###协程\n",
    "#同步[速度慢]：爬虫每发起一个请求，都要等服务器返回响应后，才会执行下一步。\n",
    "#多协程：一个任务在执行过程中，如果遇到等待，就先去执行其他的任务，当等待结束，再回来继续之前的那个任务。\n",
    "# 注：用三个#表示该章节的每个分节"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 同步运行\n",
    "import time,requests\n",
    "\n",
    "start = time.time()\n",
    "#记录程序开始时间。\n",
    "\n",
    "url_list = ['https://www.baidu.com/',\n",
    "'https://www.sina.com.cn/',\n",
    "'http://www.sohu.com/',\n",
    "'https://www.qq.com/',\n",
    "'https://www.163.com/',\n",
    "'http://www.iqiyi.com/',\n",
    "'https://www.tmall.com/',\n",
    "'http://www.ifeng.com/']\n",
    "#把8个网站封装成列表。\n",
    "\n",
    "def crawler(url):\n",
    "    r = requests.get(url)\n",
    "    #用requests.get()函数爬取网站。\n",
    "    print(url,time.time()-start,r.status_code)\n",
    "    #打印网址、请求运行时间、状态码。\n",
    "\n",
    "for url in url_list:\n",
    "    crawler(url)\n",
    "    \n",
    "end = time.time()\n",
    "#记录程序结束时间。\n",
    "print(end-start)\n",
    "#打印程序最终所需时间。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "### gevent模块: 多协程运行\n",
    "from gevent import monkey  #monkey补丁可将程序转换成可异步的程序\n",
    "monkey.patch_all()\n",
    "#规范写法：monkey.patch_all()能把程序变成协作式运行，就是可以帮助程序实现异步。\n",
    "import gevent,time,requests\n",
    "#导入gevent、time、requests。\n",
    "\n",
    "start = time.time()\n",
    "#记录程序开始时间。\n",
    "\n",
    "url_list = ['https://www.baidu.com/',\n",
    "'https://www.sina.com.cn/',\n",
    "'http://www.sohu.com/',\n",
    "'https://www.qq.com/',\n",
    "'https://www.163.com/',\n",
    "'http://www.iqiyi.com/',\n",
    "'https://www.tmall.com/',\n",
    "'http://www.ifeng.com/']\n",
    "#把8个网站封装成列表。\n",
    "\n",
    "def crawler(url):\n",
    "# 定义单个任务的函数\n",
    "    r = requests.get(url)\n",
    "    #用requests.get()函数爬取网站。\n",
    "    print(url,time.time()-start,r.status_code)\n",
    "    #打印网址、请求运行时间、状态码。\n",
    "\n",
    "tasks_list = [ ]\n",
    "#创建空的任务列表。gevent只能处理gevent的任务对象\n",
    "\n",
    "for url in url_list:\n",
    "    task = gevent.spawn(crawler,url)\n",
    "    #创建一个任务\n",
    "    #★★★用gevent.spawn()将函数转化为任务对象，gevent只能处理任务对象。\n",
    "    #gevent.spawn()的参数需为(要调用的函数名,该函数的参数)\n",
    "    tasks_list.append(task)\n",
    "    #往任务列表添加任务。\n",
    "    \n",
    "gevent.joinall(tasks_list)\n",
    "#★★★开始执行任务列表里的所有异步任务。\n",
    "\n",
    "end = time.time()\n",
    "#记录程序结束时间。\n",
    "print(end-start)\n",
    "#打印程序最终所需时间。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 当url_list有1000个网址时：将任务分组，避免服务器崩溃\n",
    "from gevent import monkey \n",
    "monkey.patch_all()\n",
    "import gevent,time,requests\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "url_list = ['https://www.baidu.com/',\n",
    "'https://www.sina.com.cn/',\n",
    "'http://www.sohu.com/',\n",
    "'https://www.qq.com/',\n",
    "'https://www.163.com/',\n",
    "'http://www.iqiyi.com/',\n",
    "'https://www.tmall.com/',\n",
    "'http://www.ifeng.com/']\n",
    "\n",
    "def crawler(url_list):\n",
    "    for url in url_list:\n",
    "        r = requests.get(url)\n",
    "        print(url,time.time()-start,r.status_code)\n",
    "\n",
    "tasks_list = [ ]\n",
    "\n",
    "for i in range(2):\n",
    "    task = gevent.spawn(crawler,url_list[i*4:(i+1)*4])\n",
    "# 分析：2个task之间是异步执行的，但是每个task（爬取4个网站）内部是同步的\n",
    "# 【问题提出】单个task里面仍然存在同步响应等待，短板效应\n",
    "    tasks_list.append(task)\n",
    "\n",
    "# print(tasks_list)\n",
    "gevent.joinall(tasks_list)\n",
    "\n",
    "end = time.time()\n",
    "print(end-start)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### queue模块：由叫号系统分配客户到不同的窗口去办理业务\n",
    "# 核心：队列创建>>数据存储进队列>>从队列里提取出数据\n",
    "from gevent import monkey\n",
    "monkey.patch_all()\n",
    "import gevent,time,requests\n",
    "from gevent.queue import Queue\n",
    "#从gevent库里导入queue模块\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "url_list = ['https://www.baidu.com/',\n",
    "'https://www.sina.com.cn/',\n",
    "'http://www.sohu.com/',\n",
    "'https://www.qq.com/',\n",
    "'https://www.163.com/',\n",
    "'http://www.iqiyi.com/',\n",
    "'https://www.tmall.com/',\n",
    "'http://www.ifeng.com/']\n",
    "\n",
    "work = Queue()\n",
    "#创建队列对象，并赋值给work，\n",
    "#如果Queue(10)，则表示这个队列只能存储10个任务。\n",
    "for url in url_list:\n",
    "    work.put_nowait(url)\n",
    "    #用put_nowait()函数可以把网址都放进队列里。\n",
    "    \n",
    "def crawler():\n",
    "    while not work.empty():\n",
    "    #当队列不是空的时候，就执行下面的程序。\n",
    "        Url = work.get_nowait()\n",
    "        #用get_nowait()函数可以把队列里的网址都取出。\n",
    "        r = requests.get(Url)\n",
    "        print(Url,work.qsize(),r.status_code)\n",
    "        #打印网址、队列长度、抓取请求的状态码。\n",
    "        \n",
    "tasks_list  = [ ]\n",
    "for x in range(2):\n",
    "#★★★创建了两个爬虫\n",
    "    task = gevent.spawn(crawler)\n",
    "    # 创建一个爬虫任务。函数crawler无传入参数\n",
    "    tasks_list.append(task)\n",
    "    \n",
    "gevent.joinall(tasks_list)\n",
    "\n",
    "end = time.time()\n",
    "print(end-start)\n",
    "\n",
    "#说明：我们创建了两只可以异步爬取的爬虫。\n",
    "#它们会从队列里取走网址，执行爬取任务。\n",
    "#一旦一个网址被一只爬虫取走，另一只爬虫就取不到了，另一只爬虫就会取走下一个网址。\n",
    "#直至所有网址都被取走，队列为空时，爬虫就停止工作。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 拓展：多协程，其实只占用了CPU的一个核运行，没有充分利用到其他核。\n",
    "# 利用CPU的多个核同时执行任务的技术，我们把它叫做“多进程”。\n",
    "# 更大型的爬虫程序需要 分布式爬虫:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 11关作业：煲剧达人\n",
    "# 不使用多线程，花费46秒\n",
    "import requests\n",
    "from gevent import monkey\n",
    "monkey.patch_all()\n",
    "import gevent,time,requests\n",
    "from gevent.queue import Queue\n",
    "import csv\n",
    "\n",
    "url = 'http://front-gateway.mtime.com/library/index/app/topList.api?tt=1618382950538&'\n",
    "r = requests.get(url)\n",
    "tv_json = r.json()\n",
    "tv_lists = tv_json['data']['tvTopList']['topListInfos'][0]['items']\n",
    "tv_num_lists = []\n",
    "for list in tv_lists:\n",
    "    tv_num_lists.append(list['itemId'])\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "Url = 'http://front-gateway.mtime.com/library/movie/detail.api?'\n",
    "for num in tv_num_lists:\n",
    "    params = {\n",
    "        #     'tt':'1618386950010',\n",
    "            'movieId': num,\n",
    "            'locationId': '290'\n",
    "        }\n",
    "    r2 = requests.get(Url,params=params)\n",
    "    print(r2.status_code)\n",
    "    \n",
    "    basic_info = r2.json()['data']['basic']\n",
    "    name = basic_info['name']\n",
    "    try:\n",
    "        director = basic_info['director']['name']\n",
    "    except:\n",
    "        director = '无'\n",
    "    actors = basic_info['actors']\n",
    "    actor = []\n",
    "    for a in actors:\n",
    "        act_name = a['name']\n",
    "        actor.append(act_name)\n",
    "    actor = ','.join(actor[:3])    #取前几个元素并用逗号分隔\n",
    "    story = basic_info['story']\n",
    "\n",
    "    with open (r'F:\\NOW\\工作\\web crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\11~15关：第4个小目标：拨云见日\\test16.csv',newline = '','a',encoding='utf-8') as f:\n",
    "        f_csv = csv.writer(f) \n",
    "        f_csv.writerow([name,director,actor,story])\n",
    "\n",
    "end = time.time()\n",
    "print(end-start)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 11关作业：煲剧达人\n",
    "# 使用多协程和队列，爬取时光网电视剧TOP100的数据（剧名、导演、主演和简介），并用csv模块将数据存储下来\n",
    "# 花费13秒，问题在于：Queue提取数据不按顺序，收集到的剧情不按原来的顺序排列\n",
    "import requests\n",
    "from gevent import monkey\n",
    "monkey.patch_all()\n",
    "import gevent,time,requests\n",
    "from gevent.queue import Queue\n",
    "import csv\n",
    "\n",
    "url = 'http://front-gateway.mtime.com/library/index/app/topList.api?tt=1618382950538&'\n",
    "r = requests.get(url)\n",
    "tv_json = r.json()\n",
    "tv_lists = tv_json['data']['tvTopList']['topListInfos'][0]['items']\n",
    "tv_num_lists = []\n",
    "for list in tv_lists:\n",
    "    tv_num_lists.append(list['itemId'])\n",
    "\n",
    "start = time.time()\n",
    "\n",
    "work = Queue()\n",
    "for num in tv_num_lists:\n",
    "    work.put_nowait(num)\n",
    "    \n",
    "def crawler():\n",
    "    while not work.empty():\n",
    "        Url = 'http://front-gateway.mtime.com/library/movie/detail.api?'\n",
    "        Num = work.get_nowait()\n",
    "        params = {\n",
    "        #     'tt':'1618386950010',\n",
    "            'movieId': Num,\n",
    "            'locationId': '290'\n",
    "        }\n",
    "        r2 = requests.get(Url,params=params)\n",
    "        print(r2.status_code)\n",
    "        basic_info = r2.json()['data']['basic']\n",
    "        name = basic_info['name']\n",
    "        try:\n",
    "            director = basic_info['director']['name']\n",
    "        except:\n",
    "            director = '无'\n",
    "        actors = basic_info['actors']\n",
    "        actor = []\n",
    "        for a in actors:\n",
    "            act_name = a['name']\n",
    "            actor.append(act_name)\n",
    "        actor = ','.join(actor[:3])    #取前几个元素并用逗号分隔\n",
    "        story = basic_info['story']\n",
    "\n",
    "        with open (r'F:\\NOW\\工作\\web crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\11~15关：第4个小目标：拨云见日\\test16.csv','a',encoding='gbk') as f:\n",
    "            f_csv = csv.writer(f) \n",
    "            f_csv.writerow([name,director,actor,story])\n",
    "            \n",
    "tasks_list  = [ ]\n",
    "for x in range(3):\n",
    "    task = gevent.spawn(crawler)\n",
    "    tasks_list.append(task)\n",
    "    \n",
    "gevent.joinall(tasks_list)\n",
    "\n",
    "end = time.time()\n",
    "print(end-start)\n",
    "        \n"
   ]
  },
  {
   "source": [
    "# 爬取单个影片的程序\n",
    "import requests\n",
    "import csv\n",
    "\n",
    "Url = 'http://front-gateway.mtime.com/library/movie/detail.api?'\n",
    "\n",
    "params = {\n",
    "#     'tt':'1618386950010',\n",
    "    'movieId': '269369',\n",
    "    'locationId': '290'\n",
    "}\n",
    "r2 = requests.get(Url,params=params)\n",
    "print(r2.status_code)\n",
    "basic_info = r2.json()['data']['basic']\n",
    "name = basic_info['name']\n",
    "director = basic_info['director']['name']\n",
    "actors = basic_info['actors']\n",
    "actor = []\n",
    "for a in actors:\n",
    "    act_name = a['name']\n",
    "    actor.append(act_name)\n",
    "actor = ','.join(actor[:3])    #取前几个元素并用逗号分隔\n",
    "story = basic_info['story']\n",
    "print(name,director,actor,story)\n",
    "\n",
    "with open (r'F:\\NOW\\工作\\web crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\11~15关：第4个小目标：拨云见日\\test16.csv','w') as f:\n",
    "    f_csv = csv.writer(f) \n",
    "    f_csv.writerow(['片名','导演','演员','简介'])\n",
    "    f_csv.writerow([name,director,actor,story])"
   ],
   "cell_type": "code",
   "metadata": {},
   "execution_count": 2,
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "200\n亚森·罗宾 路易斯·莱特里尔 奥马·希,, None\n"
     ]
    }
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gevent,requests, bs4, csv\n",
    "from gevent.queue import Queue\n",
    "from gevent import monkey\n",
    "monkey.patch_all()\n",
    "#让程序变成异步模式。\n",
    "work = Queue()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "酸奶 热量：70 大卡(每100毫升)\n",
      "牛奶，又叫纯牛奶、牛乳、全脂牛奶 热量：65 大卡(每100毫升)\n",
      "无糖全脂拿铁，又叫拿铁咖啡、拿铁（全脂，无糖） 热量：44 大卡(每100毫升)\n",
      "奶酪，又叫乳酪、芝士、起司、计司 热量：328 大卡(每100克)\n",
      "酸奶(中脂) 热量：64 大卡(每100毫升)\n",
      "脱脂奶粉 热量：361 大卡(每100克)\n",
      "酸奶(调味) 热量：88 大卡(每100毫升)\n",
      "酸奶(果料)，又叫果料酸奶 热量：67 大卡(每100毫升)\n",
      "酸奶(果粒)，又叫果粒酸奶 热量：98 大卡(每100毫升)\n",
      "蒙牛 高钙牛奶，又叫蒙牛袋装高钙牛奶 热量：59 大卡(每100毫升)\n",
      "Easy Fun 高蛋白小酥鱼(藤椒味) 热量：351 大卡(每100克)\n",
      "鸡蛋，又叫鸡子、鸡卵、蛋 热量：139 大卡(每100克)\n",
      "Easy Fun 低脂鸡胸肉肠(香辣味)，又叫Easy Fun easy fun 低脂鸡胸肉肠、鸡胸肉肠 热量：136 大卡(每100克)\n",
      "Easy Fun 鸡胸肉丝(原味) 热量：418 大卡(每100克)\n",
      "Easy Fun 高蛋白小酥鱼(海苔味)，又叫Easy Fun 高蛋白海苔鱼酥 热量：305 大卡(每100克)\n",
      "Easy Fun 低脂鸡胸肉肠(原味)，又叫Easy Fun 低脂鸡胸肉肠、鸡胸肉肠、easyfun 低脂鸡胸肉肠 热量：149 大卡(每100克)\n",
      "猪小排，又叫排骨、猪排、猪脊骨 热量：295 大卡(每100克)\n",
      "鸡(土鸡，家养) 热量：124 大卡(每100克)\n",
      "鸡(母鸡，一年内) 热量：256 大卡(每100克)\n",
      "鸡(肉鸡，肥) 热量：389 大卡(每100克)\n",
      "Easy Fun 营养粉丝(香菇炖鸡)，又叫Easy Fun 营养粉丝（香菇炖鸡味） 热量：357 大卡(每100克)\n",
      "白粥，又叫白粥（粳米），稀饭，大米粥，白米粥，米粥，大米汤汤 热量：46 大卡(每100克)\n",
      "Easy Fun 营养粉丝(番茄鸡蛋)，又叫Easy Fun 营养粉丝（番茄鸡蛋味） 热量：348 大卡(每100克)\n",
      "Easy Fun 低脂咖喱鸡饭 热量：179 大卡(每100克)\n",
      "Easy Fun 抹茶红豆麦片 热量：381 大卡(每100克)\n",
      "Easy Fun 高蛋白微波蛋糕预拌粉(香浓可可味) 热量：389 大卡(每100克)\n",
      "Easy Fun 红枣黑米圈，又叫红枣黑米、Easy Fun 薄荷健康红枣黑米圈 热量：392 大卡(每100克)\n",
      "Easy Fun 山药紫薯圈 热量：391 大卡(每100克)\n",
      "稀饭，又叫白粥（籼米），大米粥，白米粥 热量：59 大卡(每100克)\n",
      "鲜玉米，又叫玉米（鲜）、苞谷、珍珠米、棒子、玉蜀黍、苞米、六谷、 热量：112 大卡(每100克)\n",
      "炒上海青，又叫炒青菜 热量：47 大卡(每100克)\n",
      "番茄炒蛋，又叫番茄炒鸡蛋、西红柿炒蛋、柿子炒鸡蛋、番茄炒鸡蛋、西红柿炒鸡蛋、西虹市炒鸡蛋、番茄炒蛋 热量：71 大卡(每100克)\n",
      "鸡蛋羹，又叫蒸蛋 热量：58 大卡(每100克)\n",
      "绿豆汤 热量：37 大卡(每100毫升)\n",
      "素炒小白菜，又叫小青菜 热量：38 大卡(每100克)\n",
      "烧茄子 热量：59 大卡(每100克)\n",
      "绿豆粥，又叫绿豆稀饭 热量：56 大卡(每100克)\n",
      "菜包子，又叫香菇菜包、菜包子、素包子、素包、香菇青菜包、素菜包、香菇青菜包、香菇包子 热量：140 大卡(每100克)\n",
      "蛋炒饭，又叫黄金炒饭、蛋炒饭 热量：143 大卡(每100克)\n",
      "红烧鳓鱼 热量：194 大卡(每100克)\n",
      "虾，又叫对虾、鲜虾仁、虾仁 热量：93 大卡(每100克)\n",
      "鸭肉，又叫鸭子、鹜肉、家凫肉 热量：240 大卡(每100克)\n",
      "猪蹄，又叫猪脚、猪手、猪蹄爪 热量：260 大卡(每100克)\n",
      "猪肉(瘦)，又叫猪精肉，瘦肉 热量：143 大卡(每100克)\n",
      "鸡蛋白(鸡蛋清)，又叫鸡蛋白、鸡蛋清、蛋清、蛋白 热量：60 大卡(每100克)\n",
      "火腿肠 热量：212 大卡(每100克)\n",
      "鸡胸肉，又叫鸡柳肉、鸡里脊肉、鸡胸、鸡胸脯肉 热量：118 大卡(每100克)\n",
      "荷包蛋(油煎)，又叫荷包蛋、煎蛋、煎荷包蛋、煎鸡蛋 热量：195 大卡(每100克)\n",
      "咸鸭蛋，又叫盐蛋、腌蛋、味蛋 热量：190 大卡(每100克)\n",
      "猪肉(肥瘦)，又叫豕肉、彘肉 热量：395 大卡(每100克)\n",
      "小麦粉(标准粉)，又叫面粉，小麦标准粉 热量：362 大卡(每100克)\n",
      "澳洲燕麦片 热量：359 大卡(每100克)\n",
      "挂面 热量：353 大卡(每100克)\n",
      "思念 奶香馒头 热量：268 大卡(每100克)\n",
      "顶味 鸡蛋面 热量：358 大卡(每100克)\n",
      "油面筋，又叫面筋泡、油炸生筋 热量：492 大卡(每100克)\n",
      "意大利面(通心粉)，又叫意面、通心粉 热量：351 大卡(每100克)\n",
      "面条(干切面) 热量：355 大卡(每100克)\n",
      "面条(富强粉，切面) 热量：277 大卡(每100克)\n",
      "水面筋(泡发后)，又叫生麸、子面筋 热量：142 大卡(每100克)\n",
      "光明 e+益生菌酸牛奶(原味)220ml (袋装) 热量：45 大卡(每100毫升)\n",
      "早餐奶 热量：68 大卡(每100毫升)\n",
      "酸奶(高蛋白) 热量：62 大卡(每100毫升)\n",
      "奶片 热量：472 大卡(每100克)\n",
      "全脂牛奶粉 热量：478 大卡(每100克)\n",
      "光明 纯牛奶，又叫光明牛奶 热量：66 大卡(每100毫升)\n",
      "光明 优倍 高品质鲜牛奶，又叫光明 优倍高品质鲜牛奶 热量：65 大卡(每100毫升)\n",
      "光明 优倍 0脂肪 高品质脱脂鲜牛奶 热量：35 大卡(每100毫升)\n",
      "光明 优倍 0乳糖 巴士杀菌调制乳 热量：67 大卡(每100毫升)\n",
      "光明 致优 全鲜乳，又叫光明 致优全鲜乳 热量：63 大卡(每100毫升)\n",
      "盐水虾，又叫焖鲜虾 热量：92 大卡(每100克)\n",
      "清炒绿豆芽，又叫有机活体豆苗、炒绿豆芽 热量：36 大卡(每100克)\n",
      "葱油饼，又叫葱花饼、葱油饼 热量：252 大卡(每100克)\n",
      "清炒西葫芦，又叫炒西葫、西葫芦丝 热量：44 大卡(每100克)\n",
      "西红柿鸡蛋面，又叫番茄蛋面、番茄鸡蛋面 热量：98 大卡(每100克)\n",
      "酸辣土豆丝 热量：101 大卡(每100克)\n",
      "红烧肉 热量：529 大卡(每100克)\n",
      "韭菜包子 热量：171 大卡(每100克)\n",
      "卤蛋，又叫卤鸡蛋 热量：141 大卡(每100克)\n",
      "清炒土豆丝 热量：96 大卡(每100克)\n",
      "曼可顿 粗粮吐司面包，又叫曼可顿粗粮土司面包、曼克顿 粗粮吐司面包 热量：247 大卡(每100克)\n",
      "面条(特粉，切面) 热量：287 大卡(每100克)\n",
      "空锅饼 热量：278 大卡(每100克)\n",
      "RiverMill 全麦土司 热量：244 大卡(每100克)\n",
      "曼可顿 特选系列高纤维全麦面包，又叫曼可顿高纤维全麦面包 热量：265 大卡(每100克)\n",
      "麦可麦 燕麦片 热量：359 大卡(每100克)\n",
      "思念 印度飞饼 热量：389 大卡(每100克)\n",
      "挂面(精制龙须面) 热量：348 大卡(每100克)\n",
      "面条(虾蓉面) 热量：436 大卡(每100克)\n",
      "小麦面粉(特制) 热量：366 大卡(每100克)\n",
      "烧麦，又叫烧卖、糯米烧卖 热量：221 大卡(每100克)\n",
      "炒大白菜，又叫大白菜 热量：45 大卡(每100克)\n",
      "西红柿鸡蛋汤，又叫西红柿蛋汤、西红柿蛋花汤 热量：27 大卡(每100克)\n",
      "大饼，又叫饼，家常饼，死面饼 热量：273 大卡(每100克)\n",
      "清蒸鱼，又叫清蒸鱼、蒸鱼、鱼、蒸洄鱼 热量：108 大卡(每100克)\n",
      "酸菜鱼，又叫酸汤鱼、酸辣鱼、酸菜鱼、酸辣鱼汤 热量：97 大卡(每100克)\n",
      "寿司 自制1，又叫寿司卷 热量：141 大卡(每100克)\n",
      "麻婆豆腐，又叫麻婆豆腐 热量：122 大卡(每100克)\n",
      "牛肉面，又叫兰州拉面、牛腩面、牛肉拌面 热量：102 大卡(每100克)\n",
      "烧包菜丝 热量：48 大卡(每100克)\n",
      "瓦罐鸡汤(含料)，又叫瓦罐汤 热量：190 大卡(每100克)\n",
      "瓦罐鸡汤(无料) 热量：27 大卡(每100克)\n",
      "猪小排(良杂猪) 热量：351 大卡(每100克)\n",
      "猪肉(奶脯)，又叫软五花、奶脯、五花肉 热量：349 大卡(每100克)\n",
      "猪大排，又叫猪排 热量：264 大卡(每100克)\n",
      "牛肉(腑肋)，又叫牛腩 热量：123 大卡(每100克)\n",
      "Easy Fun 低脂鸡胸肉肠(原味)，又叫Easy Fun 低脂鸡胸肉肠(原味)、鸡胸肉肠 热量：142 大卡(每100克)\n",
      "Easy Fun 低脂鸡蛋干(五香味) 热量：145 大卡(每100克)\n",
      "Easy Fun 低脂蛋清鸡肉饼(原味)，又叫Easy Fun 低脂蛋清鸡肉饼 热量：139 大卡(每100克)\n",
      "草鱼，又叫鲩鱼、混子、草鲩、草包鱼、草根鱼、草青、白鲩 热量：113 大卡(每100克)\n",
      "Birds Eye 玉米粒 热量：84 大卡(每100克)\n",
      "NATCO玉米面 热量：375 大卡(每100克)\n",
      "都乐 Dole 甜玉米 热量：95 大卡(每100克)\n",
      "得众玉米粥 热量：362 大卡(每100毫升)\n",
      "FUDCO爆米花玉米 热量：518 大卡(每100克)\n",
      "KATOZ 鸡蛋面 热量：334 大卡(每100克)\n",
      "Crispix 早餐谷物 热量：367 大卡(每100克)\n",
      "Kornmühle 炸玉米片 热量：379 大卡(每100克)\n",
      "旌胜 玉米粉 热量：314 大卡(每100克)\n",
      "Dempster‘s dempster's  墨西哥玉米卷饼 热量：265 大卡(每100克)\n"
     ]
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[<Greenlet at 0x1ac2bab3708: _run>,\n",
       " <Greenlet at 0x1ac2bab3948: _run>,\n",
       " <Greenlet at 0x1ac2b8cbdc8: _run>]"
      ]
     },
     "metadata": {},
     "execution_count": 1
    },
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "D:\\software\\Python\\conda\\Lib\\site-packages\\gevent\\hub.py:161: UserWarning: libuv only supports millisecond timer resolution; all times less will be set to 1 ms\n  with loop.timer(seconds, ref=ref) as t:\n"
     ]
    }
   ],
   "source": [
    "#用多协程爬取薄荷网11个常见食物分类里的食物信息（包含食物名、热量、食物详情页面链接）\n",
    "#导入所需的库和模块。\n",
    "from gevent.queue import Queue\n",
    "from gevent import monkey\n",
    "monkey.patch_all()\n",
    "import gevent,requests, bs4, csv\n",
    "#让程序变成异步模式。\n",
    "work = Queue()\n",
    "\n",
    "url_lists = []\n",
    "for i in range(1,4):\n",
    "    for j in range(1,4):\n",
    "        url = 'http://www.boohee.com/food/group/'+str(j)+'?'+'page='+str(i)\n",
    "        work.put_nowait(url)\n",
    "    url2 = 'http://www.boohee.com/food/view_menu?page='+str(i)\n",
    "    work.put_nowait(url2)\n",
    "#另一种写法\n",
    "# for x in range(1, 4):\n",
    "#     for y in range(1, 4):\n",
    "#         real_url = url_1.format(type=x, page=y)\n",
    "#         work.put_nowait(real_url)\n",
    "\n",
    "def crawler():    \n",
    "    headers = {\n",
    "    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "    }\n",
    "    while not work.empty():\n",
    "        Url = work.get_nowait()\n",
    "        r = requests.get(Url,headers=headers)\n",
    "        bs_res = bs4.BeautifulSoup(r.text,'html.parser')\n",
    "        foods = bs_res.find_all('li',class_='item clearfix')\n",
    "\n",
    "        for food in foods:\n",
    "            name = food.find_all('a')[1]['title']\n",
    "            calorie = food.find('p').text\n",
    "            print(name,calorie)\n",
    "\n",
    "        \n",
    "task_lists = []\n",
    "for x in range(3):\n",
    "    task = gevent.spawn(crawler)\n",
    "    task_lists.append(task)\n",
    "gevent.joinall(task_lists)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 爬虫框架 Scrapy （已自动实现异步）\n",
    "# Scrapy的结构：Scrapy Engine(引擎）、Scheduler(调度器)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Scrapy Engine(引擎）就是这家爬虫公司的大boss，负责统筹公司的4大部门，每个部门都只听从它的命令，并只向它汇报工作。\n",
    "\n",
    "#Scheduler(调度器)部门主要负责处理引擎发送过来的requests对象（即网页请求的相关信息集合，包括params，data，cookies，request headers…等）\n",
    "#会把请求的url以有序的方式排列成队，并等待引擎来提取（功能上类似于gevent库的queue模块）。\n",
    "\n",
    "#Downloader（下载器）部门则是负责处理引擎发送过来的requests，进行网页爬取，并将返回的response（爬取到的内容）交给引擎。\n",
    "#对应的是爬虫流程【获取数据】这一步。\n",
    "\n",
    "#Spiders(爬虫)部门是公司的核心业务部门，主要任务是创建requests对象和接受引擎发送过来的response（Downloader部门爬取到的内容），从中解析并提取出有用的数据。\n",
    "#对应的是爬虫流程【解析数据】和【提取数据】这两步。\n",
    "\n",
    "#Item Pipeline（数据管道）部门则是公司的数据部门，只负责存储和处理Spiders部门提取到的有用数据。\n",
    "#对应的是爬虫流程【存储数据】这一步。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#创建scrapy工程的步骤\n",
    "#在本地电脑打开终端（windows：Win+R，输入cmd >>\n",
    "# 跳转到你想要保存项目的目录下 >> 命令行输入e:，就会跳转到e盘，再输入cd Python，就能跳转到Python文件夹 >>\n",
    "# 输入一行能帮我们创建Scrapy项目的命令：scrapy startproject douban，douban就是Scrapy项目的名字。按下enter键，一个Scrapy项目就创建成功了。\n",
    "# 在pycharm中运行项目,在spiders目录下创建文件top250.py。\n",
    "# 工程文件见文件夹 douban"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义爬取数据item.py\n",
    "import scrapy\n",
    "class DoubanItem(scrapy.Item):     #定义一个类DoubanItem，它继承自scrapy.Item\n",
    "    title = scrapy.Field()   #让数据能以类似字典的形式记录\n",
    "    publish = scrapy.Field()\n",
    "    score = scrapy.Field()\n",
    "\n",
    "book = DoubanItem()\n",
    "# 实例化一个DoubanItem对象\n",
    "book['title'] = '海边的卡夫卡'\n",
    "book['publish'] = '[日] 村上春树 / 林少华 / 上海译文出版社 / 2003'\n",
    "book['score'] = '8.1'\n",
    "print(book)\n",
    "print(type(book))  #“自定义的Python字典”"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#创建和编写top250.py\n",
    "import scrapy # 用创建类的方式写这个爬虫\n",
    "import bs4\n",
    "from ..items import DoubanItem  #items在top250.py的上一级目录\n",
    "\n",
    "class DoubanSpider(scrapy.Spider):\n",
    "    name = 'douban'         # name是定义爬虫的名字，爬虫的唯一标识\n",
    "    allowed_domains = ['https://book.douban.com']       # allowed_domains限制了我们关联爬取的URL，不会跳转到某个奇怪的广告页面。\n",
    "    start_urls = []     #爬取的网址封装成requests对象\n",
    "    for x in range(3):\n",
    "        url = 'https://book.douban.com/top250?start=' + str(x * 25)\n",
    "        start_urls.append(url)\n",
    "\n",
    "    def parse(self, response):      #解析处理response\n",
    "        bs = bs4.BeautifulSoup(response.text,'html.parser')\n",
    "        datas = bs.find_all('tr',class_='item')\n",
    "        for data in datas:\n",
    "            item = DoubanItem() \n",
    "            item['title'] = data.find_all('a')[1]['title']\n",
    "            item['publish'] = data.find('p',class_='pl').text\n",
    "            item['score'] = data.find('span',class_'rating_nums').text\n",
    "            print(item)\n",
    "            yield item # 将获得的item传递给引擎\n",
    "            #有点类似return，不过它和return不同的点在于，它不会结束函数，且能多次返回信息。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "hello\nhello again\nhello\n"
     ]
    }
   ],
   "source": [
    "### yield的介绍\n",
    "def f():    # 生成器对象\n",
    "    while True:\n",
    "        print('hello')\n",
    "        yield 1\n",
    "        print('hello again')\n",
    "        yield 2    # 不写会报错StopIteration\n",
    "gen = f()\n",
    "\n",
    "res1 = next(gen)   # 执行到第一个yield，返回值为1\n",
    "res2 = next(gen)   # 从上一个yield开始继续执行到下一个yield，返回值为2\n",
    "res3 = next(gen) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 修改 settings.py文件\n",
    "# Scrapy是遵守robots协议，需要修改请求头\n",
    "# Crawl responsibly by identifying yourself (and your website) on the user-agent\n",
    "USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n",
    "\n",
    "# Obey robots.txt rules\n",
    "ROBOTSTXT_OBEY = False\n",
    "\n",
    "# 控制爬虫的速度\n",
    "# Configure a delay for requests for the same website (default: 0)\n",
    "# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay\n",
    "# See also autothrottle settings and docs\n",
    "DOWNLOAD_DELAY = 0.5    # 下载延迟时间为0.5秒\n",
    "\n",
    "# Ⅰ存储为csv文件\n",
    "FEED_URI='./storage/data/%(name)s.csv'  #存在与setting.py同级的storage的子文件夹data中\n",
    "FEED_FORMAT='CSV'\n",
    "FEED_EXPORT_ENCODING='ansi'     # windows通常用'ansi'编码\n",
    "\n",
    "# Ⅱ存储Excel文件：第一步启用 ITEM_PIPELINES\n",
    "# Configure item pipelines\n",
    "# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n",
    "ITEM_PIPELINES = {\n",
    "    'jobuitest.pipelines.JobuitestPipeline': 300,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 存储Excel文件（续） \n",
    "import openpyxl\n",
    "\n",
    "class JobuiPipeline(object):\n",
    "#定义一个JobuiPipeline类，负责处理item\n",
    "    def __init__(self):\n",
    "    #初始化函数 当类实例化时这个方法会自启动\n",
    "        self.wb =openpyxl.Workbook()\n",
    "        #创建工作薄\n",
    "        self.ws = self.wb.active\n",
    "        #定位活动表\n",
    "        self.ws.append(['公司', '职位', '地址', '招聘信息'])\n",
    "        #用append函数往表格添加表头\n",
    "        \n",
    "    def process_item(self, item, spider):\n",
    "    #process_item是默认的处理item的方法，就像parse是默认处理response的方法\n",
    "        line = [item['company'], item['position'], item['address'], item['detail']]\n",
    "        #把公司名称、职位名称、工作地点和招聘要求都写成列表的形式，赋值给line\n",
    "        self.ws.append(line)\n",
    "        #用append函数把公司名称、职位名称、工作地点和招聘要求的数据都添加进表格\n",
    "        return item\n",
    "        #将item丢回给引擎，如果后面还有这个item需要经过的itempipeline，引擎会自己调度\n",
    "\n",
    "    def close_spider(self, spider):\n",
    "    #close_spider是当爬虫结束运行时，这个方法就会执行\n",
    "        self.wb.save('./jobui.xlsx')\n",
    "        #保存文件\n",
    "        self.wb.close()\n",
    "        #关闭文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 增加main函数 = 运行cmd程序\n",
    "from scrapy import cmdline\n",
    "#导入cmdline模块,可以实现控制终端命令行。\n",
    "cmdline.execute(['scrapy','crawl','douban'])\n",
    "#用execute（）方法，输入运行scrapy的命令。其中'douban'为爬虫的唯一标识"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 13关作业 见dangdang\n",
    "import requests\n",
    "import bs4\n",
    "\n",
    "r = requests.get('http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-year-2018-0-1-1')\n",
    "bs = bs4.BeautifulSoup(r.text,'html.parser')\n",
    "datas = bs.find_all('ul',class_='bang_list clearfix bang_list_mode')\n",
    "for data in datas:\n",
    "    name = data.find(class_='name').find('a')['title']\n",
    "    publisher = data.find(class_=\"publisher_info\").find('a')['title']\n",
    "    price = data.find(class_=\"price\").find('p').find(class_=\"price_n\").text\n",
    "print(name,publisher,price)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### scrapy 爬取招聘信息: 在scarpy_test文件夹中有详细模板\n",
    "# 框架的使用目的在于让爬虫更快、更强\n",
    "import requests\n",
    "import bs4\n",
    "\n",
    "url = 'https://www.jobui.com/rank/company/'\n",
    "headers = {\n",
    "     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "r = requests.get(url,headers=headers)\n",
    "bs = bs4.BeautifulSoup(r.text,'html.parser')\n",
    "company_id = []\n",
    "ids = bs.find_all('ul',class_='textList flsty cfix')\n",
    "for Id in ids:\n",
    "    companys = Id.find_all('a',target='_blank')\n",
    "    for ID in companys:\n",
    "        company = ID['href']\n",
    "        company_id.append('https://www.jobui.com'+company+'jobs')  #爬取公司代码\n",
    "print(company_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 爬取公司职位详情\n",
    "import requests\n",
    "import bs4\n",
    "\n",
    "url = 'https://www.jobui.com/company/10375749/jobs'\n",
    "headers = {\n",
    "     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "r = requests.get(url,headers=headers)\n",
    "bs = bs4.BeautifulSoup(r.text,'html.parser')\n",
    "company = bs.find('a',class_='company-banner-name').text\n",
    "jobs = bs.find_all('div',class_='c-job-list')\n",
    "for job in jobs:\n",
    "    name = job.find('a',class_='job-name').find('h3').text\n",
    "    loc = job.find('div',class_='job-desc').find_all('span')[0]['title']\n",
    "    info = job.find('div',class_='job-desc').find_all('span')[1]['title']\n",
    "print(company,name,loc,info)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#练习，无法运行\n",
    "import scrapy\n",
    "import bs4\n",
    "from ..items import JobuiItem\n",
    "\n",
    "class JobuiSpider(scrapy.Spider):\n",
    "    name = 'jobs'\n",
    "    allowed_domins = ['https://www.jobui.com']\n",
    "    start_urls = ['https://www.jobui.com/rank/company/']\n",
    "    \n",
    "#提取公司id标识和构造公司招聘信息的网址：\n",
    "    def parse(self, response):\n",
    "    #parse是默认处理response的方法\n",
    "        bs = bs4.BeautifulSoup(response.text, 'html.parser')\n",
    "        ul_list = bs.find_all('ul',class_=\"textList flsty cfix\")\n",
    "        for ul in ul_list:\n",
    "            a_list = ul.find_all('a')\n",
    "            for a in a_list:\n",
    "                company_id = a['href']\n",
    "                real_url = 'https://www.jobui.com{id}jobs'.format(id=company_id)\n",
    "                yield scrapy.Request(real_url, callback=self.parse_job)\n",
    "# 用yield语句把构造好的request对象real_url传递给引擎\n",
    "# scrapy.Request是构造requests对象的类\n",
    "# real_url是往requests对象里传入的每家公司招聘信息网址的参数\n",
    "# callback是回调。用于接收请求后的返回信息(parse_job的返回值)，若没指定，则默认为parse()函数\n",
    "\n",
    "    def parse_job(self, response):\n",
    "    #定义新的处理response的方法parse_job（方法的名字可以自己起）\n",
    "        bs = bs4.BeautifulSoup(response.text, 'html.parser')\n",
    "        #用BeautifulSoup解析response(公司招聘信息的网页源代码)\n",
    "        company = bs.find(id=\"companyH1\").text\n",
    "        #用fin方法提取出公司名称\n",
    "        datas = bs.find_all('li',class_=\"company-job-list\")\n",
    "        #用find_all提取<li class_=\"company-job-list\">标签，里面含有招聘信息的数据\n",
    "        for data in datas:\n",
    "        #遍历datas\n",
    "            item = JobuiItem()\n",
    "            #实例化JobuiItem这个类\n",
    "            item['company'] = company\n",
    "            #把公司名称放回JobuiItem类的company属性里\n",
    "            item['position']=data.find('h3').find('a').text\n",
    "            #提取出职位名称，并把这个数据放回JobuiItem类的position属性里\n",
    "            item['address'] = data.find('span',class_=\"col80\").text\n",
    "            #提取出工作地点，并把这个数据放回JobuiItem类的address属性里\n",
    "            item['detail'] = data.find('span',class_=\"col150\").text\n",
    "            #提取出招聘要求，并把这个数据放回JobuiItem类的detail属性里\n",
    "            yield item\n",
    "            #用yield语句把item传递给引擎"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 反爬虫    共识：所谓的反爬虫，从不是将爬虫完全杜绝；而是想办法将爬虫的访问量限制在一个可接纳的范围，不要让它过于肆无忌惮\n",
    "# 限制请求头 Request Headers >> user-agent / origin / referer\n",
    "# 限制登陆  cookies / session 模拟登录\n",
    "# 验证码限制 Selenium自动输入验证码 / 图像处理库tesserocr/pytesserart/pillow\n",
    "# IP限制 time.slle()限制爬虫速度 / IP代理池\n",
    "import requests\n",
    "url = 'https://…'\n",
    "proxies = {'http':'http://…'}\n",
    "# ip地址\n",
    "response = requests.get(url,proxies=proxies)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 进阶学习\n",
    "# 解析与提取 >> 存储 >> 数据分析、可视化 >> 更多爬虫框架\n",
    "# 解析数据  xpath/lxml\n",
    "# 提取数据  ★★★正则表达式re模块\n",
    "# 存储数据  MySQL[关系型数据库] / MongoDB[非关系型数据库]  【当数据量庞大时】  需要学习SQL数据库语言。\n",
    "# 数据分析  Pandas / Matplotlib / Numpy / Scikit-Learn / Scipy\n",
    "# 分布式爬虫 让多个设备跑一个项目\n",
    "# 爬虫框架   使用Scrapy模拟登录、存储数据库、使用HTTP代理、分布式爬虫\n",
    "# 多读一读，编程前辈们的博客、github主页学习案例；同时，探索更多的项目实操，丰富自己的爬虫经验。\n",
    "# 最重要的点  确认目标-分析过程-面向过程实现代码-代码封装"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python374jvsc74a57bd02a7e95a32014fc1ccf24626d45a98c6e7b4373277259c22f47a91d487fc3e8a5",
   "display_name": "Python 3.7.4 64-bit ('base': conda)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}