{
 "cells": [
  {
   "source": [
    "### BeautifulSoup【解析、提取数据】 \n",
    "## [官方文档](https://www.crummy.com/software/BeautifulSoup/bs4/doc.zh/)\n",
    "解析html数据(替代浏览器翻译HTML语言)  \n",
    "★★数据提取:  \n",
    "点击'网页源代码'('Elements'是HTML+js渲染后的网页),搜索想要爬取的数据  \n",
    "> 搜索可得则数据存在静态页面  \n",
    "> 搜索不到则可能存在js文件中\n",
    ">>查找并爬取XHR文件 / 渲染网页代码后爬取  "
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 解析数据 ： bsobj = BeautifulSoup(要解析的文本【字符串类型】,'解析器')\n",
    "# '解析器' 以Python内置的html.parser为例\n",
    "import requests \n",
    "from bs4 import BeautifulSoup\n",
    "res = requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html') \n",
    "print(res.status_code) \n",
    "html = res.text \n",
    "soup = BeautifulSoup(html,'html.parser') #把网页源代码解析为BeautifulSoup对象，便于提取\n",
    "print(type(html)) #查看html的类型   <class 'str'>\n",
    "print(type(soup)) #查看soup的类型   <class 'bs4.BeautifulSoup'>\n",
    "print(soup)\n",
    "# response.text和soup打印出的内容表面上看长得一模一样\n",
    "# BeautifulSoup对象在直接打印它的时候会调用该对象内的__str__方法\n",
    "# 提取数据的过程是从<class 'bs4.BeautifulSoup'>进行提取的\n",
    "# 通过定位标签和属性提取我们想要的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取数据   find()与find_all()  \n",
    "# find()只提取【首个】满足要求的数据，而find_all()提取出的是所有满足要求的数据\n",
    "# 示例 ： item = soup.find('div',class_='book')  (标签,属性)\n",
    "# 【借助 CTRL+F 寻找标签是否具代表性，尽量找html语法里标准的属性名】\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "url = 'https://localprod.pandateacher.com/python-manuscript/crawler-html/spder-men0.0.html'\n",
    "res = requests.get (url)\n",
    "# print(res.status_code)\n",
    "soup = BeautifulSoup(res.text,'html.parser')  \n",
    "item = soup.find('div') #提取首个<div>标签，并放到变量item里。\n",
    "print(type(item)) #打印item的数据类型   <.Tag类对象> = 单个元素\n",
    "print(item)     \n",
    "print('\\n') \n",
    "item2 = soup.find_all('div') #提取全部<div>标签，并放到列表item2里。\n",
    "print(type(item2)) #打印item2的数据类型   <.ResultSet对象> = 列表\n",
    "print(item2)       \n",
    "# 打印的内容仍包含HTML标签"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 练习\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "url = 'https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html'\n",
    "res = requests.get (url)\n",
    "# print(res.status_code)   \n",
    "soup = BeautifulSoup(res.text,'html.parser')\n",
    "item = soup.find_all(class_='info')  # class_ 避免与类冲突\n",
    "item2 = soup.find_all(class_='title')\n",
    "for i in item2:\n",
    "    print(i)   # <class 'bs4.element.Tag'>\n",
    "    print(i.attrs)  # 通过字典形式存储所有属性内容\n",
    "    print(i.string,i.text)  # 打印正文"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取数据   Tag.find()/Tag.find_all()等\n",
    "# ★ Python是一门面向对象编程的语言，只有知道是什么对象，才能调用相关的对象属性和方法。\n",
    "# Tag对象的三种方法：Tag.find()/Tag.find_all()、Tag.text、Tag['属性']\n",
    "\n",
    "import requests\n",
    "from bs4 import BeautifulSoup \n",
    "res = requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html')\n",
    "html = res.text    # 把res的内容以字符串的形式返回\n",
    "soup = BeautifulSoup( html,'html.parser')    # 把网页解析为BeautifulSoup对象\n",
    "items = soup.find_all(class_='books') # 在【soup】中匹配\n",
    "for item in items:\n",
    "    kind = item.find('h2') # ①在【列表中】的每个元素里，匹配首个标签<h2>提取出数据，返回Tag对象类型\n",
    "    title = item.find(class_='title')  \n",
    "    titlename = title.text  # ② Tag.text 提取出Tag中的文字，返回str类型\n",
    "    titleurl = title['href']  # ③ Tag[''] 提取出Tag的属性，返回str类型\n",
    "    brief = item.find(class_='info')\n",
    "    print(kind,'\\n',title,'\\n',brief,'\\n')\n",
    "    print(titlename,'\\n',titleurl,'\\n')\n",
    "print(type(titlename),type(titleurl),type(brief))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 总结：面向对象编程！！\n",
    "import requests\n",
    "from bs4 import BeautifulSoup \n",
    "res = requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html')  # 返回一个response对象，获取网页HTML源代码赋值给res \n",
    "html = res.text    # 将response对象转化为字符串\n",
    "soup = BeautifulSoup( html,'html.parser') # 把网页解析为BeautifulSoup对象\n",
    "# items = soup.find_all(class_='books') # BS对象：在【soup】中匹配属性转化为列表；最小共同父级标签\n",
    "\n",
    "items = soup.find_all(attrs={'class':'books'})  # 第二种写法!!!\n",
    "for item in items:           # 遍历列表提取Tag对象\n",
    "    kind = item.find('h2')       # Tag对象：①在【列表中】的每个元素里，匹配首个标签<h2>提取出数据，返回Tag对象类型\n",
    "    title = item.find(class_='title')  \n",
    "    titlename = title.text       # ② 提取出Tag中的文字，返回str类型\n",
    "    titleurl = title['href']     # ③ 提取出Tag的属性，返回str类型\n",
    "print(items)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 极简版写法 【对象类型不停转化】\n",
    "import requests\n",
    "from bs4 import BeautifulSoup \n",
    "for i in range(len(BeautifulSoup(requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html').text,'html.parser').find_all(class_='books'))):\n",
    "    print(BeautifulSoup(requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/spider-men5.0.html').text,'html.parser').find_all(class_='books')[i].find(class_='title').text )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#当我们在用text获取纯文本时，获取的是该标签内的所有纯文本信息，不论是直接在这个标签内，还是在它的子标签内。\n",
    "from bs4 import BeautifulSoup\n",
    "bs = BeautifulSoup('<p><a>惟有痴情难学佛</a>独无媚骨不如人</p>','html.parser')\n",
    "tag = bs.find('p')\n",
    "print(tag.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#但是，父标签只能提取它自身的属性值['href']，不能提取子标签的属性值。\n",
    "from bs4 import BeautifulSoup\n",
    "bs = BeautifulSoup('<p><a href=\\'https://www.pypypy.cn\\'></a></p>','html.parser')\n",
    "# 此处多出来的\\，是转义字符。\n",
    "tag = bs.find('p')#.find('a')\n",
    "print(tag['href'])\n",
    "# 这样会报错，因为<p>标签没有属性href，href属于<a>标签"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 爬取热门菜谱清单代码 思路1\n",
    "# 找到爬取内容的最小父级标签\n",
    "\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "res_foods = requests.get('http://www.xiachufang.com/explore/')\n",
    "print(res_foods.status_code)\n",
    "bs_foods = BeautifulSoup(res_foods.text,'html.parser')\n",
    "list_all = []\n",
    "list_foods = bs_foods.find_all('div',class_='info pure-u')  #父类Tag <p class_='name'>\n",
    "# 查找最小父级标签，因为该标签是并列的，后续用循环\n",
    "# 实践过程中标签选取不当、或者网页本身的编写没做好板块区分，可能会多打印出一些奇怪的东西\n",
    "# 数量多无规律：换个标签提取   ；   数量少有规律：程序筛选\n",
    "for food in list_foods:  \n",
    "    tag_a = food.find('a')             # 子类Tag  <a>\n",
    "    # 提取第0个标签<p>中的<a>标签\n",
    "    name = tag_a.text[17:-13]\n",
    "    # 菜名，使用[17:-13]切掉了多余的信息\n",
    "    URL = 'http://www.xiachufang.com'+tag_a['href']\n",
    "    # 获取URL\n",
    "    tag_p = list_foods[0].find('p',class_='ing ellipsis')\n",
    "    # 提取第0个父级标签中的<p>标签\n",
    "    ingredients = tag_p.text[1:-1]\n",
    "    # 食材，使用[1:-1]切掉了多余的信息\n",
    "    list_all.append([name,URL,ingredients])\n",
    "print(list_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 爬取热门菜谱清单代码 思路2\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "res_foods = requests.get('http://www.xiachufang.com/explore/')\n",
    "bs_foods = BeautifulSoup(res_foods.text,'html.parser')\n",
    "# 转为BS对象\n",
    "tag_name = bs_foods.find_all('p',class_='name')\n",
    "# 查找包含菜名和URL的<p>标签\n",
    "tag_ingredients = bs_foods.find_all('p',class_='ing ellipsis')\n",
    "# 查找包含食材的<p>标签\n",
    "list_all = []\n",
    "# 创建一个空列表，用于存储信息\n",
    "for x in range(len(tag_name)):\n",
    "# 启动一个循环，次数等于菜名的数量【潜在问题：菜名和食材数量对不上】\n",
    "    list_food = [tag_name[x].text[18:-14],tag_name[x].find('a')['href'],tag_ingredients[x].text[1:-1]]\n",
    "    # 提取信息，封装为列表。注意此处[18:-14]切片和之前不同，是因为此处使用的是<p>标签，而之前是<a>\n",
    "    list_all.append(list_food)\n",
    "print(list_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 练习：网上书店\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "res = requests.get('http://books.toscrape.com/')\n",
    "book_sp = BeautifulSoup(res.text,'html.parser')\n",
    "book_tag = book_sp.find('ul',class_=\"nav nav-list\")  #  BS对象转化为Tag对象\n",
    "tag_names = book_tag.find('li').find_all('li')    # Tag对象需要提取好几层，抽丝剥茧\n",
    "for tag_name in tag_names:\n",
    "    print(tag_name.text.strip())  #去除特殊字符串（空格，\\n,\\t）\n",
    "#     print(tag_name.text[63:-55])   蠢办法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取书名、评分、价格\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "res = requests.get('http://books.toscrape.com/catalogue/category/books/travel_2/index.html')\n",
    "book_sp = BeautifulSoup(res.text,'html.parser')\n",
    "book_tags = book_sp.find_all('article',class_=\"product_pod\")\n",
    "list_all = []\n",
    "for book_tag in book_tags:\n",
    "    rating = book_tag.find('p')['class'][1]\n",
    "    title = book_tag.find('h3').find('a')['title']\n",
    "    price = book_tag.find('p',class_=\"price_color\").text[1:]\n",
    "    list_all.append([rating,title,price])\n",
    "print(list_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 豆瓣TOP100 序号、电影、评分、推荐语、链接\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "url = 'https://movie.douban.com/top250'\n",
    "page = [0,25,50,75]\n",
    "for i in range(len(page)):\n",
    "    params = {\n",
    "        'start': str(page[i]),\n",
    "        'filter': ' '\n",
    "    }\n",
    "    headers = {\n",
    "        'origin':'https://y.qq.com',\n",
    "        'referer':'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',\n",
    "        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n",
    "        }\n",
    "    res = requests.get(url,headers=headers,params=params)  # 后文会讲Headers用法\n",
    "    TopSoup = BeautifulSoup(res.text,'html.parser')\n",
    "    TopLists = TopSoup.find('ol',class_='grid_view')\n",
    "\n",
    "    for TopList in TopLists:   # ❌内部循环的子对象是？写法不规范！\n",
    "        if TopList.find('em') == -1:  # 出现有规律'-1'\n",
    "            pass\n",
    "        else:\n",
    "            num = TopList.find('em').text\n",
    "            name = TopList.find('span').text\n",
    "            star = TopList.find('span',class_='rating_num').text\n",
    "            quote = TopList.find('span',class_='inq').text\n",
    "            link = TopList.find('a')['href']\n",
    "            print('第%s部电影是《%s》,评分%s，推荐语“%s”，\\n观看连接为：%s'%(num,name,star,quote,link))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 上题修正\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "url = 'https://movie.douban.com/top250'\n",
    "page = [0,25,50,75]\n",
    "for i in range(len(page)):\n",
    "    params = {\n",
    "        'start': str(page[i]),\n",
    "        'filter': ' '\n",
    "    }\n",
    "    headers = {\n",
    "        'origin':'https://y.qq.com',\n",
    "        'referer':'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',\n",
    "        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n",
    "        }\n",
    "    res = requests.get(url,headers=headers,params=params) \n",
    "    TopSoup = BeautifulSoup(res.text,'html.parser')\n",
    "    TopLists = TopSoup.find('ol',class_='grid_view')\n",
    "\n",
    "    for TopList in TopLists.find_all('li'):  #★★★\n",
    "        num = TopList.find('em',class_=\"\").text\n",
    "        name = TopList.find('span').text\n",
    "        star = TopList.find('span',class_='rating_num').text\n",
    "        quote = TopList.find('span',class_='inq').text\n",
    "        link = TopList.find('a')['href']\n",
    "        print('第%s部电影是《%s》,评分%s，推荐语“%s”，\\n观看连接为：%s'%(num,name,star,quote,link))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 一键下电影  https://www.ygdy8.com/index.html\n",
    "# 输入电影名称[需完全匹配]即可爬取下载链接\n",
    "\n",
    "def WatchMovie():\n",
    "    import urllib\n",
    "    import requests\n",
    "    from bs4 import BeautifulSoup\n",
    "\n",
    "    # url_code_name = urllib.quote(poet_name)    #报错:无quote应用 >> 翻看urllib官方手册得到答案\n",
    "    movie_name = input(\"你想下载什么电影：\")\n",
    "    keyword = urllib.parse.quote(movie_name,encoding='gbk')   # 采用拼接\n",
    "\n",
    "    url = 'http://s.ygdy8.com/plus/s0.php?typeid=1&keyword='\n",
    "    res = requests.get(url+keyword)  \n",
    "    res.encoding='gbk'    # 定义res的编码类型为gbk\n",
    "    movieSoup = BeautifulSoup(res.text,'html.parser')\n",
    "    movieTags = movieSoup.find('div',class_='co_content8').find('ul').find_all('table',border='0',width='100%')\n",
    "    if movieTags == []:\n",
    "        print('电影天堂没有你要的电影')\n",
    "    else:\n",
    "        print('以下是你要的电影链接')\n",
    "        for movieTag in movieTags:\n",
    "            link = 'https://www.ygdy8.com/' + movieTag.find('b').find('a')['href']\n",
    "            print(link)\n",
    "            \n",
    "WatchMovie()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# res.get()函数再理解\n",
    "import requests\n",
    "import urllib\n",
    "\n",
    "print(urllib.parse.quote('勇敢的心',encoding='gbk'))\n",
    "print(urllib.parse.quote('勇敢的心',encoding='utf-8'))\n",
    "\n",
    "params = {\n",
    "    'typeid': '1',\n",
    "    'keyword':'勇敢的心'   \n",
    "}\n",
    "url = 'http://s.ygdy8.com/plus/s0.php'\n",
    "res = requests.get(url,params=params) # 库里面已对params采用'utf-8'解码,再发送至服务端\n",
    "res.encoding='gbk'\n",
    "print(res.url)    # 发现编码非网页url\n",
    "res2 = requests.get('http://s.ygdy8.com/plus/s0.php?typeid=1&keyword=%D3%C2%B8%D2%B5%C4%D0%C4')\n",
    "res2.encoding='gbk'\n",
    "print(res2.url)\n",
    "# 以上keyword是勇敢的心采用'gbk'解码\n",
    "# print(res.text[:])\n",
    "# print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\n",
    "# print(res2.text[:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 《寻找周杰伦》\n",
    "#★★★ 确认版权网站 >> 查看robots文件 >> 找出具体网页\n",
    "import requests\n",
    "\n",
    "res_music = requests.get('https://y.qq.com/portal/search.html#page=1&searchid=1&remoteplace=txt.yqq.top&t=song&w=%E5%91%A8%E6%9D%B0%E4%BC%A6')\n",
    "print(res_music.status_code)\n",
    "print(res_music.text)   # 【发现问题】源代码HTML中'CTRL+F'找不到歌单名？？？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 静态网页：第1关用html写出的网页，就是静态网页。网页源代码中就包含着网页的所有信息，因此，网页地址栏的URL就是网页源代码的URL，因此使用BeautifulSoup爬取这类型网页。\n",
    "### 动态网页：爬取的数据不在HTML源代码中，而是在json中，不能直接使用网址栏的URL，而需要找到json数据的真实URL。\n",
    "# 解析json数据"
   ]
  },
  {
   "source": [
    "一些比较老（或比较轻量）的网站：将所有关键信息都放在第0个请求  \n",
    "先进网站：往往第0个请求加载的html源代码是骨架，而所需内容存放在XHR文件内  \n",
    "在【Network】中查找 \n",
    "启用监控/清空面板信息 >> 查看ALL/XHR >> 保留请求Preserve log  \n",
    "分类查看：ALL（查看全部）/XHR（在html源代码中找不到的信息一般在XHR）/Doc（Document，第0个请求一般在这里）...  \n",
    "统计：有多少个请求，流量，耗时。  \n",
    "\n",
    "XHR and fetch【上题中歌单所在的文件】  \n",
    "数据传输Ajax技术：不借助网页刷新即可传输数据  \n",
    "eg.购物网站，下滑自动加载出更多商品；在线翻译网站，输入中文实时变英文。  \n",
    "Ajax工作时会创建一个XHR/Fetch对象，然后利用XHR对象来实现服务器和浏览器之间传输数据。  \n",
    "判断信息所在位置：Network >> All（而非XHR）>>刷新网页 >> 点第0个请求 >> 点Preview看是否有所需信息 OR 清空Network后点击新内容  "
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "res_music = requests.get('https://c.y.qq.com/splcloud/fcgi-bin/gethotkey.fcg?g_tk_new_20200303=817340209&g_tk=817340209&loginUin=979747718&hostUin=0&format=json&inCharset=utf8&outCharset=utf-8&notice=0&platform=yqq.json&needNewCode=0')\n",
    "print(res_music.status_code)\n",
    "songList = res_music.text # 【发现问题】.text仅能将代码转为字符串\n",
    "# a = songList['data']   # 字典调用有错？\n",
    "print(type(res_music.text)) # 看似字典实际上是字符串！省略了''\n",
    "print(res_music.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "c = '[1,2,3,4]'\n",
    "# 数据的组织：分层结构+层间排序(eg.列表)+层内对应(eg.字典)\n",
    "#  html格式  通过<head class=''> body </head>实现分层对应\n",
    "#  ★★★  引入【json格式】  用列表/字典的语法写成的字符串文本(eg.网页XHR文件中的'字典')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### JSON格式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#  json数据的解析.json() [requests方法]\n",
    "#  从Response对象开始，分成了两种可能，一种可能是数据放在HTML里，所以我们用BeautifulSoup库去解析数据和提取数据；\n",
    "#  另一种可能，数据作为Json存储起来，所以我们用response.json()方法去解析，然后提取、存储数据。\n",
    "import requests\n",
    "\n",
    "res_music = requests.get('https://u.y.qq.com/cgi-bin/musics.fcg?-=getSingerSong08670443194277855&g_tk=817340209&sign=zzaenq1pme8c1f6be8abd941f9f2b62c18f39dd6266e6&loginUin=979747718&hostUin=0&format=json&inCharset=utf8&outCharset=utf-8&notice=0&platform=yqq.json&needNewCode=0&data=%7B%22comm%22%3A%7B%22ct%22%3A24%2C%22cv%22%3A0%7D%2C%22singerSongList%22%3A%7B%22method%22%3A%22GetSingerSongList%22%2C%22param%22%3A%7B%22order%22%3A1%2C%22singerMid%22%3A%220025NhlN2yWrP4%22%2C%22begin%22%3A0%2C%22num%22%3A10%7D%2C%22module%22%3A%22musichall.song_list_server%22%7D%7D')\n",
    "music_list = res_music.json() # 将返回的res对象转为字典格式\n",
    "# 【由于url返回json字典列表，不需要做源代码解析（BS模块）】\n",
    "songList = music_list['singerSongList']['data']['songList'] # 一层一层地取字典，获取歌单列表。是否需要从第一层开始？\n",
    "songs = []\n",
    "for song in songList:\n",
    "    songs.append([song['songInfo']['name'],song['songInfo']['interval'],song['songInfo']['album']['name']])\n",
    "print(songs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 另外地，在Python中实现列表/字典转json，json转列表/字典，则需要借助json模块\n",
    "# dumps和loads是在内存中转换（python对象和json字符串之间的转换），而dump和load则是对应于文件的处理\n",
    "import json\n",
    "\n",
    "a = [1,2,3,4]\n",
    "b = json.dumps(a)\n",
    "# 使用dumps()函数，将列表a转换为json格式的字符串，赋值给b。\n",
    "print(b)\n",
    "print(type(b))\n",
    "\n",
    "c = json.loads(b)\n",
    "# 使用loads()函数，将json格式的字符串b转为列表，赋值给c。\n",
    "print(c)\n",
    "print(type(c)) "
   ]
  },
  {
   "source": [
    "阶段总结：  \n",
    "制定一个目标（爬取周杰伦的歌曲清单）>>>  \n",
    "根据目标确认方案（爬取QQ音乐） >>>  \n",
    "带着方案，去分析它的网站结构 >>>  \n",
    "最后在写代码的过程当中，我们会遇到困难（如json数据不知如何解析）>>>   \n",
    "学习新知识，去网络上搜索官方文档找到解决方案 >>> 最终完成项目  \n",
    "【发现问题】 ： 怎么翻页  "
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "###  Requests Headers / params [带参数请求数据] —— 爬取歌曲歌词和评论\n",
    "# 网页URL结构: https://xx.xx.xxx?xx=xx&xx=xxx&……   请求的地址 ? 参数1=XX & 参数2=XX\n",
    "# Network/XHR/Name/Headers/General & Query String Parameters\n",
    "# “寻找”需要的内容对应的XHR文件(Network'清空后'点击出现的新XDR，同时Name栏更简洁)\n",
    "# “观察”指的是阅读参数的键与值，尝试理解它的含义\n",
    "# “比较”指的是比较两个相近的XHR有哪些不同，对应的页面显示内容有什么不同。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 改写requests.get() \n",
    "# 翻页\n",
    "import requests\n",
    "for p in range(1,6):\n",
    "    res_music = requests.get('https://c.y.qq.com/soso/fcgi-bin/client_search_cp?ct=24&qqmusic_ver=1298&new_json=1&remoteplace=txt.yqq.song&searchid=60997426243444153&t=0&aggr=1&cr=1&catZhida=1&lossless=0&flag_qc=0&p='+str(p)+'&n=20&w=%E5%91%A8%E6%9D%B0%E4%BC%A6&g_tk=5381&loginUin=0&hostUin=0&format=json&inCharset=utf8&outCharset=utf-8&notice=0&platform=yqq.json&needNewCode=0')\n",
    "    json_music = res_music.json()\n",
    "    list_music = json_music['data']['song']['list']\n",
    "    for music in list_music:\n",
    "        print(music['name'])\n",
    "        print('所属专辑：'+music['album']['name'])\n",
    "        print('播放时长：'+str(music['interval'])+'秒')\n",
    "        print('播放链接：https://y.qq.com/n/yqq/song/'+music['mid']+'.html\\n\\n')\n",
    "#  缺点:太长了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#★★★ requests.get()中的 params 参数\n",
    "# 以'字符串字典'形式为URL的Query String传递参数\n",
    "payload = ('key1':'value1','key2':'value2') #需要找出所有不同！包含所有参数！\n",
    "r = requests.get('url',params = payload)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取周杰伦歌单\n",
    "import requests\n",
    "url = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'\n",
    "for x in range(1):\n",
    "    params = {\n",
    "    'ct':'24',\n",
    "    'qqmusic_ver': '1298',\n",
    "    'new_json':'1',\n",
    "    'remoteplace':'sizer.yqq.song_next',\n",
    "    'searchid':'64405487069162918',\n",
    "    't':'0',\n",
    "    'aggr':'1',\n",
    "    'cr':'1',\n",
    "    'catZhida':'1',\n",
    "    'lossless':'0',\n",
    "    'flag_qc':'0',\n",
    "    'p':str(x+1),\n",
    "    'n':'20',\n",
    "    'w':'周杰伦',\n",
    "    'g_tk':'5381',\n",
    "    'loginUin':'0',\n",
    "    'hostUin':'0',\n",
    "    'format':'json',\n",
    "    'inCharset':'utf8',\n",
    "    'outCharset':'utf-8',\n",
    "    'notice':'0',\n",
    "    'platform':'yqq.json',\n",
    "    'needNewCode':'0'    \n",
    "    }\n",
    "    # 将参数封装为字典\n",
    "    res_music = requests.get(url,params=params)\n",
    "    json_music = res_music.json()\n",
    "    list_music = json_music['data']['song']['list']\n",
    "    song = []\n",
    "    songList = []\n",
    "    for music in list_music:\n",
    "        song = [\n",
    "            music['name'],\n",
    "            music['album']['name'],\n",
    "            str(music['interval'])+'秒',\n",
    "            'https://y.qq.com/n/yqq/song/'+music['mid']+'.html'\n",
    "        ]\n",
    "        songList.append(song)\n",
    "        print(music['name'])\n",
    "        print('所属专辑：'+music['album']['name'])\n",
    "        print('播放时长：'+str(music['interval'])+'秒')\n",
    "        print('播放链接：https://y.qq.com/n/yqq/song/'+music['mid']+'.html\\n\\n')\n",
    "print(songList)\n",
    "# 下接存储数据2  Excel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 爬取评论[失败例子]\n",
    "import requests\n",
    "url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg'\n",
    "for i in range(5):\n",
    "    qsp = {\n",
    "        'g_tk_new_20200303': '817340209',\n",
    "        'g_tk': '817340209',\n",
    "        'loginUin': '979747718',\n",
    "        'hostUin': '0',\n",
    "        'format': 'json',\n",
    "        'inCharset': 'utf8',\n",
    "        'outCharset': 'GB2312',\n",
    "        'notice': '0',\n",
    "        'platform': 'yqq.json',\n",
    "        'needNewCode': '0',\n",
    "        'cid': '205360772',\n",
    "        'reqtype': '2',\n",
    "        'biztype': '1',\n",
    "        'topid': '102065756',\n",
    "        'cmd': '8',\n",
    "        'needmusiccrit': '0',\n",
    "        'pagenum': str(i),\n",
    "        'pagesize': '25',\n",
    "        'lasthotcommentid': 'song_102065756_1152921504849507854_1590101017', # 【没有找到这个不同点！导致爬虫重复同样内容】\n",
    "        'domain': 'qq.com',\n",
    "        'ct': '24',\n",
    "        'cv': '10101010'\n",
    "    }\n",
    "    res_music = requests.get(url,params = qsp)\n",
    "    music_tab = res_music.json()\n",
    "    commentLists = music_tab['comment']['commentlist']\n",
    "    for i in range(len(commentLists)):\n",
    "        print(commentLists[i]['rootcommentcontent'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 爬取评论[参考答案]\n",
    "import requests\n",
    "\n",
    "url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg'\n",
    "commentid = ''\n",
    "# 设置一个初始commentid\n",
    "for x in range(5):\n",
    "    \n",
    "    params = {\n",
    "    'g_tk':'5381',\n",
    "    'loginUin':'0',\n",
    "    'hostUin':'0',\n",
    "    'format':'json',\n",
    "    'inCharset':'utf8',\n",
    "    'outCharset':'GB2312',\n",
    "    'notice':'0',\n",
    "    'platform':'yqq.json',\n",
    "    'needNewCode':'0',\n",
    "    'cid':'205360772',\n",
    "    'reqtype':'2',\n",
    "    'biztype':'1',\n",
    "    'topid':'102065756',\n",
    "    'cmd':'8',\n",
    "    'needcommentcrit':'0',\n",
    "    'pagenum':str(x),\n",
    "    'pagesize':'25',\n",
    "    'lasthotcommentid':commentid,\n",
    "    'domain':'qq.com',\n",
    "    'ct':'24',\n",
    "    'cv':'101010  '\n",
    "    }\n",
    "    # 将参数封装为字典，其中pagenum和lastcommentid是特殊的变量\n",
    "    res_comment = requests.get(url,params=params)\n",
    "    json_comment = res_comment.json()\n",
    "    list_comment = json_comment['comment']['commentlist']\n",
    "    for comment in list_comment:\n",
    "        print(comment['rootcommentcontent'])\n",
    "    commentid = list_comment[24]['commentid']\n",
    "    # 将最后一个评论的id赋值给comment，准备开始下一次循环\n",
    "    # 【提出问题】服务器拒绝爬虫 'Disallowed'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### requests.get()中的 headers 参数\n",
    "# Network — Requests Headers 【包含请求的基本信息】服务器识别'浏览器or爬虫'的依据\n",
    "# user-agent 记录你电脑的信息和浏览器版本（eg.Mozilla/5.0 (Windows NT 10.0; Win64; x64)....)\n",
    "# origin和referer则记录请求最初的起源是来自哪个页面。是referer会比origin携带的信息更多些\n",
    "# 若不修改user-agent发送请求时默认为Python\n",
    "# 爬取某些特定信息，也要求你注明请求的来源\n",
    "\n",
    "import requests\n",
    "url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg'\n",
    "# 这是那个，请求歌曲评论的url\n",
    "headers = {\n",
    "    'origin':'https://y.qq.com',\n",
    "    'referer':'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',\n",
    "    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n",
    "    # 标记了请求从什么设备，什么浏览器上发出 【爬虫伪装成浏览器】\n",
    "    }     # 直接从Network上的Requests Headers复制过来\n",
    "res_music = requests.get(url,headers=headers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#★总结1：打开网址 >> Network刷新 >> 第0个请求无信息 >> 清空列表查看XHR \n",
    "# >> 查看变动的params >> 修改字典赋值.get(params=××)\n",
    "# 总结2：打印res.status.code >> 输出404 >> 创建headers字典传入.get()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 练习：爬取歌词    https://y.qq.com/n/yqq/singer/0025NhlN2yWrP4.html#stat=y_new.song.header.singernam\n",
    "import requests\n",
    "import re\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "musicid = []\n",
    "urlx = 'https://y.qq.com/n/yqq/singer/0025NhlN2yWrP4.html#stat=y_new.song.header.singername'\n",
    "resx = requests.get(urlx)\n",
    "music_Bs = BeautifulSoup(resx.text,'html.parser')\n",
    "music_Tgs = music_Bs.find('ul',class_='songlist__list').find_all('li')\n",
    "for music_Tg in music_Tgs:\n",
    "    musicid.append(music_Tg['mid'])\n",
    "    \n",
    "url = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'\n",
    "headers = {\n",
    "    'origin': 'https://y.qq.com',\n",
    "    'referer': 'https://y.qq.com/n/yqq/song/0039MnYb0qxYhV.html',\n",
    "    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n",
    "}\n",
    "for i in range(len(musicid)):\n",
    "    params = {\n",
    "        'nobase64': '1',\n",
    "        'musicid': musicid[i],\n",
    "        '-': 'jsonp1',\n",
    "        'g_tk_new_20200303': '817340209',\n",
    "        'g_tk': '817340209',\n",
    "        'loginUin': '979747718',\n",
    "        'hostUin': '0',\n",
    "        'format': 'json',\n",
    "        'inCharset': 'utf8',\n",
    "        'outCharset': 'utf-8',\n",
    "        'notice': '0',\n",
    "        'platform': 'yqq.json',\n",
    "        'needNewCode': '0'\n",
    "    }\n",
    "    res = requests.get(url,params = params,headers = headers) # 没有headers就出不来歌词\n",
    "    music_table = res.json()   # res自带的解析模块\n",
    "    print(music_table)\n",
    "    lyric = re.findall('[\\u4e00-\\u9fff]+',music_table['lyric'])  # 移除除汉字外的符号\n",
    "    # eg. [ a-z ]表示匹配所有的小写英文字符； [\\u4e00-\\u9fff]代表所有汉字\n",
    "    # 匹配测试网站 https://regex101.com/\n",
    "    # re正则表达式用法见 https://www.runoob.com/python3/python3-reg-expressions.html\n",
    "#     print(lyric[-1])  #仅显示最后一句歌词\n",
    "    print(lyric)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 正确答案：https://y.qq.com/portal/search.html#page=1&searchid=1&remoteplace=txt.yqq.top&t=lyric&w=%E5%91%A8%E6%9D%B0%E4%BC%A6\n",
    "import requests\n",
    "import json    #使用json模块来解析res.text\n",
    "url = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'\n",
    "for x in range(5):\n",
    "    params = {\n",
    "        'ct':'24',\n",
    "        'qqmusic_ver': '1298',\n",
    "        'new_json':'1',\n",
    "        'remoteplace':'sizer.yqq.lyric_next',\n",
    "        'searchid':'94267071827046963',\n",
    "        'aggr':'1',\n",
    "        'cr':'1',\n",
    "        'catZhida':'1',\n",
    "        'lossless':'0',\n",
    "        'sem':'1',\n",
    "        't':'7',\n",
    "        'p':str(x+1),\n",
    "        'n':'10',\n",
    "        'w':'周杰伦',\n",
    "        'g_tk':'1714057807',\n",
    "        'loginUin':'0',\n",
    "        'hostUin':'0',\n",
    "         'format':'json',\n",
    "        'inCharset':'utf8',\n",
    "        'outCharset':'utf-8',\n",
    "        'notice':'0',\n",
    "        'platform':'yqq.json',\n",
    "        'needNewCode':'0'\n",
    "    }\n",
    "    res = requests.get(url, params = params)\n",
    "    #下载该网页，赋值给res\n",
    "    jsonres = json.loads(res.text)\n",
    "#     print(jsonres)\n",
    "#     print('\\n')\n",
    "#     print(res.json())\n",
    "    list_lyric = jsonres['data']['lyric']['list']\n",
    "    #一层一层地取字典，获取歌词的列表\n",
    "    for lyric in list_lyric:\n",
    "        #lyric是一个列表，x是它里面的元素\n",
    "        print(lyric['content'])\n",
    "        #以content为键，查找歌词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 一键查快递\n",
    "import requests\n",
    "#调用requests模块，负责上传和下载数据\n",
    "\n",
    "logisticsName = 'yuantong'\n",
    "courierNum = 'YT4523206520263'\n",
    "\n",
    "url = 'https://www.kuaidi100.com/query?'\n",
    "#使用get需要一个链接\n",
    "\n",
    "params = {\n",
    "    'type': logisticsName,\n",
    "    'postid': courierNum,\n",
    "    'temp': '0.9661515218223198',\n",
    "    'phone':''\n",
    "}\n",
    "#将需要get的内容，以字典的形式记录在params内\n",
    "\n",
    "r = requests.get(url, params = params)\n",
    "print(r.text)   # 同样的url每次运行得到的结果都不同\n",
    "#get需要输入两个参数，一个是刚才的链接，一个是params，返回的是一个Response对象\n",
    "# result = r.json()\n",
    "\n",
    "# print ('最新物流状态‘：'+ result['data'][0]['context'])\n",
    "# #记得观察preview里面的参数哦"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 存储数据1  csv.py  >>  csv格式[https://yiyibooks.cn/xx/python_352/library/csv.html#module-csv]  \n",
    "# 相比excel优点：读写比较方便，易于实现，文件小\n",
    "# 缺点：不能嵌入图像和图表，不能生成公式\n",
    "# 存储的csv用excel打开后成乱码???\n",
    "\n",
    "#写入操作\n",
    "import csv\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\demo.csv'\n",
    "# 加newline=' '参数的原因是避免csv文件出现两倍的行距;\n",
    "# encoding=''，可以避免编码问题导致的报错或乱码。\n",
    "with open(routine,'w',newline = '',encoding = 'gbk') as csv_file:    # 创建文件\n",
    "    mvw = csv.writer(csv_file)   # 创建对象\n",
    "    mvw.writerow(['电影','豆瓣评分'])   # 写入内容\n",
    "    mvw.writerow(['肖申克的救赎','9.5'])    # 完成写入后自动关闭\n",
    "    \n",
    "# 读取操作\n",
    "csv_file = open(routine,'r',newline = '') \n",
    "mvr = csv.reader(csv_file)\n",
    "for row in mvr:\n",
    "    print(row)\n",
    "# I/O operation on closed file.  with open后不能操作写之后再读取？？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 存储数据2   openpyxl.py  >>  Excel xlsx/xls[https://openpyxl.readthedocs.io/en/stable/]  \n",
    "# workbook > worksheet > cell A1-Z21\n",
    "from openpyxl import Workbook\n",
    "\n",
    "wb = Workbook()  # 创建excel文件\n",
    "sheet1 = wb.active      #获取工作表。\n",
    "sheet1.title = 'new title'  #工作表重命名\n",
    "print(type(sheet1.title))\n",
    "\n",
    "sheet1['A1'] = '漫威宇宙'   # 写入单个单元格\n",
    "rows = [['美国队长','钢铁侠','蜘蛛侠'],['是','漫威','宇宙', '经典','人物']]  \n",
    "for i in rows:\n",
    "    sheet1.append(i)   # 写入内容 【写入操作类似列表】\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\demo.xlsx'\n",
    "wb.save(routine)  # 保存文件\n",
    "# wb.close()   # 保存后不用关闭"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取Excel表格\n",
    "from openpyxl import load_workbook\n",
    "\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\demo.xlsx'\n",
    "wb = load_workbook(routine)  # 打开文件\n",
    "sheet = wb['new title']  # 获取工作表\n",
    "# print(wb.sheetnames)   循环对sheet操作\n",
    "print(sheet['A1'])   \n",
    "A1_val = sheet['A1'].value   # 读取单元格。value是单元格的属性\n",
    "print(type(sheet))        # 每个工作表是一个sheet类\n",
    "print(type(sheet['A1']))  # 每个单元格是一个Cell类：【通过类调用对象不应该是[]】>> 阅读源文件\n",
    "print(type(sheet['A1'].value))   \n",
    "\n",
    "print(s1.dimensions)\n",
    "print(sheet['A'])         \n",
    "print(print(s1[1][1].value))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 上接周杰伦歌单\n",
    "# 存储\n",
    "import openpyxl\n",
    "songList = [['晴天', '叶惠美', '269秒', 'https://y.qq.com/n/yqq/song/0039MnYb0qxYhV.html'], \n",
    "            ['一路向北', 'J III MP3 Player', '295秒', 'https://y.qq.com/n/yqq/song/001xd0HI0X9GNq.html'], \n",
    "            ['七里香', '七里香', '299秒', 'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html'], \n",
    "            ['稻香', '魔杰座', '223秒', 'https://y.qq.com/n/yqq/song/003aAYrm3GE0Ac.html'],\n",
    "            ['听妈妈的话', '依然范特西', '265秒', 'https://y.qq.com/n/yqq/song/002hXDfk0LX9KO.html'],\n",
    "            ['告白气球', '周杰伦的床边故事', '215秒', 'https://y.qq.com/n/yqq/song/003OUlho2HcRHC.html'], \n",
    "            ['搁浅', '七里香', '240秒', 'https://y.qq.com/n/yqq/song/001Bbywq2gicae.html'], \n",
    "            ['夜曲', '十一月的萧邦', '226秒', 'https://y.qq.com/n/yqq/song/001zMQr71F1Qo8.html'], \n",
    "            ['不能说的秘密', '不能说的秘密 电影原声带', '296秒', 'https://y.qq.com/n/yqq/song/002MXZNu1GToOk.html'], \n",
    "            ['等你下课', '等你下课', '270秒', 'https://y.qq.com/n/yqq/song/001J5QJL1pRQYB.html'], \n",
    "           ]   # 未全部摘录\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\songList2.xlsx'\n",
    "\n",
    "wbw = openpyxl.Workbook()\n",
    "shtw = wbw.active\n",
    "shtw.title = 'songList of JZ'\n",
    "shtw['A1'] ='歌曲名'\n",
    "shtw['B1'] ='所属专辑'\n",
    "shtw['C1'] ='播放时长'\n",
    "shtw['D1'] ='播放链接'\n",
    "for line in songList:\n",
    "    shtw.append(line)   \n",
    "wbw.save(routine)\n",
    "\n",
    "# 读取\n",
    "wbr = openpyxl.load_workbook(routine)\n",
    "s1 = wbr['songList of JZ']\n",
    "\n",
    "ListSong = []\n",
    "for j in range(len(s1['A'])):\n",
    "    info = []\n",
    "    for i in range(len(s1[1])):   # 列表行数可获取，列数怎么获取  s1['A']\n",
    "        info.append(s1[j+1][i].value)\n",
    "    ListSong.append(info)\n",
    "    \n",
    "print(ListSong)\n",
    "wbr.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 完整代码\n",
    "import requests,openpyxl\n",
    "wb=openpyxl.Workbook()  \n",
    "#创建工作薄\n",
    "sheet=wb.active \n",
    "#获取工作薄的活动表\n",
    "sheet.title='restaurants' \n",
    "#工作表重命名\n",
    "\n",
    "sheet['A1'] ='歌曲名'   \n",
    "sheet['B1'] ='所属专辑' \n",
    "sheet['C1'] ='播放时长' \n",
    "sheet['D1'] ='播放链接' \n",
    "\n",
    "url = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'\n",
    "for x in range(5):\n",
    "    params = {\n",
    "        'ct': '24',\n",
    "        'qqmusic_ver': '1298',\n",
    "        'new_json': '1',\n",
    "        'remoteplace': 'sizer.yqq.song_next',\n",
    "        'searchid': '64405487069162918',\n",
    "        't': '0',\n",
    "        'aggr': '1',\n",
    "        'cr': '1',\n",
    "        'catZhida': '1',\n",
    "        'lossless': '0',\n",
    "        'flag_qc': '0',\n",
    "        'p': str(x + 1),\n",
    "        'n': '20',\n",
    "        'w': '周杰伦',\n",
    "        'g_tk': '5381',\n",
    "        'loginUin': '0',\n",
    "        'hostUin': '0',\n",
    "        'format': 'json',\n",
    "        'inCharset': 'utf8',\n",
    "        'outCharset': 'utf-8',\n",
    "        'notice': '0',\n",
    "        'platform': 'yqq.json',\n",
    "        'needNewCode': '0'\n",
    "    }\n",
    "\n",
    "    res_music = requests.get(url, params=params)\n",
    "    json_music = res_music.json()\n",
    "    list_music = json_music['data']['song']['list']\n",
    "    for music in list_music:\n",
    "        name = music['name']\n",
    "        album = music['album']['name']\n",
    "        time = music['interval']\n",
    "        link = 'https://y.qq.com/n/yqq/song/' + str(music['file']['media_mid']) + '.html\\n\\n'\n",
    "        sheet.append([name, album, time,url])\n",
    "        # 把name、album、time和link写成列表，用append函数多行写入Excel\n",
    "        print('歌曲名：' + name + '\\n' + '所属专辑:' + album +'\\n' + '播放时长:' + str(time) + '\\n' + '播放链接:'+ url)\n",
    "        \n",
    "wb.save('Jay.xlsx')            \n",
    "#最后保存并命名这个Excel文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 豆瓣TOP100 Excel存储\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "from openpyxl import Workbook\n",
    "url = 'https://movie.douban.com/top250'\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\movie.xlsx'\n",
    "page = [0,25,50,75]\n",
    "\n",
    "wb = Workbook()\n",
    "sheet = wb.active\n",
    "sheet.title ='Top 100'\n",
    "sheet['A1'] ='排名'   \n",
    "sheet['B1'] ='影名'  \n",
    "sheet['C1'] ='评分' \n",
    "sheet['D1'] ='推荐语'   \n",
    "sheet['E1'] ='观看链接'   \n",
    "for i in range(len(page)):\n",
    "    params = {\n",
    "        'start': str(page[i]),\n",
    "        'filter': ' '\n",
    "    }\n",
    "    headers = {\n",
    "        'origin':'https://y.qq.com',\n",
    "        'referer':'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',\n",
    "        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n",
    "        }\n",
    "    res = requests.get(url,headers=headers,params=params) \n",
    "    TopSoup = BeautifulSoup(res.text,'html.parser')\n",
    "    TopLists = TopSoup.find('ol',class_='grid_view')\n",
    "#     info = []\n",
    "    for TopList in TopLists.find_all('li'):  #★★★\n",
    "        num = TopList.find('em',class_=\"\").text\n",
    "        name = TopList.find('span').text\n",
    "        star = TopList.find('span',class_='rating_num').text\n",
    "        quote = TopList.find('span',class_='inq').text\n",
    "        link = TopList.find('a')['href']\n",
    "        sheet.append([num,name,star,quote,link])\n",
    "        print('第%s部电影是《%s》,评分%s，推荐语“%s”，\\n观看链接为：%s'%(num,name,star,quote,link))\n",
    "#     sheet.append(info)   #每次25个存入表格 ❌ \n",
    "wb.save(routine)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例操练   知乎张佳玮 文章标题、摘要、链接\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "from openpyxl import Workbook\n",
    "url = 'https://www.zhihu.com/api/v4/members/zhang-jia-wei/articles?'\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\2~7关：第2个小目标：爬虫小成\\articles_end.xlsx'\n",
    "headers = {\n",
    "    'referer':'https://www.zhihu.com/people/zhang-jia-wei/posts/posts_by_votes?page=1',\n",
    "    'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n",
    "}\n",
    "\n",
    "wb = Workbook()\n",
    "sheet = wb.active\n",
    "sheet.title = '张佳玮文章集'\n",
    "sheet['A1'] = '标题'\n",
    "sheet['B1'] = '摘要'\n",
    "sheet['C1'] = '链接'\n",
    "con = []\n",
    "offset = 0\n",
    "while True:\n",
    "    params = {\n",
    "        'include': 'data[*].comment_count,suggest_edit,is_normal,thumbnail_extra_info,thumbnail,can_comment,comment_permission,admin_closed_comment,content,voteup_count,created,updated,upvoted_followees,voting,review_info,is_labeled,label_info;data[*].author.badge[?(type=best_answerer)].topics',\n",
    "        'offset': str(offset),\n",
    "        'limit': '20',\n",
    "        'sort_by': 'voteups'\n",
    "    }\n",
    "    offset += 20\n",
    "    if offset>40:\n",
    "        break\n",
    "    res = requests.get(url,headers=headers,params=params)\n",
    "    res.encoding = 'utf-8'\n",
    "    article = res.json()\n",
    "    print(article)\n",
    "    infos = article['data']\n",
    "    print(i)\n",
    "    print('----------------------')\n",
    "    for info in infos:\n",
    "        title = info['title']\n",
    "        abstract = info['excerpt']\n",
    "        link = info['url']\n",
    "        con = [title,abstract,link]\n",
    "        sheet.append(con)\n",
    "    if article['paging']['is_end'] == True:\n",
    "        break    #如果键is_end所对应的值是True，就结束while循环。\n",
    "wb.save(routine)\n",
    "\n",
    "    # 返回的html源代码中只有两个文章标题 >> 寻找信息其实在XHR里\n",
    "    # 返回的全是\\u的unicode字节编码  >>  res.encoding\n",
    "    # params = {\n",
    "\n",
    "    # }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 参考答案\n",
    "import requests\n",
    "import csv\n",
    "#引用csv。\n",
    "csv_file=open('articles.csv','w',newline='',encoding='utf-8')\n",
    "#调用open()函数打开csv文件，传入参数：文件名“articles.csv”、写入模式“w”、newline=''。\n",
    "writer = csv.writer(csv_file)\n",
    "# 用csv.writer()函数创建一个writer对象。\n",
    "list2=['标题','链接','摘要']\n",
    "#创建一个列表\n",
    "writer.writerow(list2)\n",
    "#调用writer对象的writerow()方法，可以在csv文件里写入一行文字 “标题”和“链接”和\"摘要\"。\n",
    "\n",
    "headers={'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}\n",
    "url='https://www.zhihu.com/api/v4/members/zhang-jia-wei/articles?'\n",
    "offset=0\n",
    "#设置offset的起始值为0\n",
    "while True:\n",
    "    params={\n",
    "        'include':'data[*].comment_count,suggest_edit,is_normal,thumbnail_extra_info,thumbnail,can_comment,comment_permission,admin_closed_comment,content,voteup_count,created,updated,upvoted_followees,voting,review_info,is_labeled,label_info;data[*].author.badge[?(type=best_answerer)].topics',\n",
    "        'offset':str(offset),\n",
    "        'limit':'20',\n",
    "        'sort_by':'voteups',\n",
    "        }\n",
    "    #封装参数\n",
    "    res=requests.get(url,headers=headers,params=params)\n",
    "    #发送请求，并把响应内容赋值到变量res里面\n",
    "    articles=res.json()\n",
    "    print(articles)\n",
    "    data=articles['data']\n",
    "    #定位数据\n",
    "    for i in data:\n",
    "        list1=[i['title'],i['url'],i['excerpt']]\n",
    "        #把目标数据封装成一个列表\n",
    "        writer.writerow(list1)\n",
    "        #调用writerow()方法，把列表list1的内容写入\n",
    "    offset=offset+20\n",
    "    #在while循环内部，offset的值每次增加20\n",
    "    if offset > 40:\n",
    "        break\n",
    "csv_file.close()\n",
    "#写入完成后，关闭文件就大功告成\n",
    "print('okay')  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 练习 ： 单词测试\n",
    "import requests\n",
    "import csv\n",
    "\n",
    "numUrl = 'https://www.shanbay.com/api/v1/vocabtest/category/?_=1590400448321'\n",
    "resNum = requests.get(numUrl)\n",
    "NumLocate = resNum.json()['data']\n",
    "\n",
    "bianhao = int(input('''请输入你选择的词库编号，按Enter确认\n",
    "1，GMAT  2，考研  3，高考  4，四级  5，六级\n",
    "6，英专  7，托福  8，GRE  9，雅思  10，任意\n",
    ">'''))\n",
    "\n",
    "wordUrl = 'https://www.shanbay.com/api/v1/vocabtest/vocabularies/?'\n",
    "params = {'category': NumLocate[bianhao-1][0]}\n",
    "\n",
    "resWord = requests.get(wordUrl,params=params)  # 若params传入失败，可以用拼接实现\n",
    "wordLists = resWord.json()['data']\n",
    "wordTodo = []\n",
    "i = 0\n",
    "for wordList in wordLists:\n",
    "    wordTodo.append(wordList['content'])  # 获取数据并存入列表\n",
    "    i += 1\n",
    "    if(i==5):     # 只取前十个做测试\n",
    "        break\n",
    "print(wordTodo)\n",
    "wordEasy = []\n",
    "wordHard = []\n",
    "for word in wordTodo:\n",
    "    while True:\n",
    "        anwser = int(input('你是否记得这个单词：%s\\n记得选1，不记得选2\\n'% word))\n",
    "        if anwser == 1:\n",
    "            wordEasy.append(word)\n",
    "            break\n",
    "        elif anwser == 2:\n",
    "            wordHard.append(word)\n",
    "            break\n",
    "        else :\n",
    "            print('输入错误，请重新输入！')\n",
    "            \n",
    "with open('wordEasy.csv','w',newline='')as file1:\n",
    "    csv_f1 = csv.writer(file1)\n",
    "    csv_f1.writerow(wordEasy)\n",
    "    \n",
    "with open('wordHard.csv','w',newline='')as file2:\n",
    "    csv_f2 = csv.writer(file2)\n",
    "    csv_f2.writerow(wordHard)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python374jvsc74a57bd02a7e95a32014fc1ccf24626d45a98c6e7b4373277259c22f47a91d487fc3e8a5",
   "display_name": "Python 3.7.4 64-bit ('base': conda)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}