{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c5242d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "### XPath[仅从HTML提取]  /       正则表达式[基础，难掌握]   /      CSS selector           ==>三种可复用\n",
    "# 不需要像bs4和json一样在代码或XHR中逐级找目标\n",
    "# XPath能否提取XHR文件中的数据??? 需要用selenium工具转为HTML语句\n",
    "# pip parsel模块：ptyhon高性能的HTML/XML解析器，数据转为selector对象，返回list数据\n",
    "# html标签即网页源代码，树状元素，主要用于显示数据\n",
    "# XML与其的不同处:XML数据均为双标签，主要用于传输数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests \n",
    "res = requests.get('https://www.runoob.com/xpath/xpath-nodes.html')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#节点=标签所在的位置  根节点<html>  子节点<head>  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    " # 将网页转成XPath元素\n",
    "from lxml import etree     \n",
    "rxml = etree.HTML(res.text) # 解析源码\n",
    "type(rxml)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 表达式            描述\n",
    "# //                跨节点提取\n",
    "# /                 逐级提取\n",
    "# *                 匹配任意节点/属性\n",
    "# @                 提取属性\n",
    "# text()            提取文本\n",
    "# ..                选取父节点\n",
    "# .                 选取当前节点\n",
    "#//p | //div        选取p或div标签"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绝对路径提取。层层嵌套，不能断层\n",
    "# ★★★返回list类型\n",
    "rxml.xpath('/html/body/div[@class=\"container main\"]/div[@class=\"row\"]/div[@class=\"col middle-column\"]/div[@class=\"article\"]/div[@class=\"article-body\"]/div[@id=\"content\"]/p[1]/text()')  \n",
    "# ★★★ 在检查中点击Ctrl+F,查找所有Xpath路径，并更改[]里面的数字为'*'，可以找到所有目标项\n",
    "# 采用XPath Helper插件自动匹配路径；Syntax Error:复制过来为单引号，与路径采用的单引号冲突\n",
    "# 指定属性提取位置[@class=\"container main\"]；双属性[@class=\"container main\" and @href=\"\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取文本  相对路径提取。\n",
    "rxml.xpath('//*[@id=\"content\"]/p[1]/text()') "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取属性\n",
    "rxml.xpath('//div[@class=\"article-body\"]/div/@id') "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# lxml.etree._Element对象的几个属性\n",
    "eltex = rxml.xpath('//*[@id=\"content\"]/p')[0]\n",
    "print(type(eltex))\n",
    "print(eltex.text)   # 'lxml.etree._Element'类型\n",
    "els = rxml.xpath('//*[@id=\"content\"]/p[1]')\n",
    "print(type(els))\n",
    "for i in els:     # 'list'类型，需要遍历提取\n",
    "    print (i.tag)\n",
    "    print (i.text)\n",
    "    print (i.attrib)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 例程：爬取imdb TOP250电影的前10个\n",
    "import requests \n",
    "res = requests.get('https://www.imdb.com/chart/top/?ref_=nv_mv_250')\n",
    "res.status_code\n",
    "from lxml import etree\n",
    "rxml = etree.HTML(res.text)\n",
    "elems = rxml.xpath('//*[@id=\"main\"]/div/span/div/div/div[3]/table/tbody/tr[*]/td[2]/a/@href')[:10]\n",
    "base_url = 'https://www.imdb.com/'\n",
    "links = [base_url + elem for elem in elems]\n",
    "links   # 相比bs4：程序简短至10行不到！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#用XPath实现课程15的作业\n",
    "#用多协程爬取薄荷网11个常见食物分类里的食物信息（包含食物名、热量、食物详情页面链接）\n",
    "\n",
    "from gevent.queue import Queue\n",
    "from gevent import monkey\n",
    "monkey.patch_all()  #让程序变成异步模式。\n",
    "import requests , gevent\n",
    "from lxml import etree\n",
    "\n",
    "work = Queue()\n",
    "\n",
    "url_lists = []\n",
    "for i in range(1,4):\n",
    "    for j in range(1,4):\n",
    "        url = 'http://www.boohee.com/food/group/'+str(j)+'?'+'page='+str(i)\n",
    "        work.put_nowait(url)\n",
    "    url2 = 'http://www.boohee.com/food/view_menu?page='+str(i)\n",
    "    work.put_nowait(url2)\n",
    "\n",
    "def crawler():    \n",
    "    headers = {\n",
    "    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "    }\n",
    "    while not work.empty():\n",
    "        Url = work.get_nowait()\n",
    "        r = requests.get(Url,headers=headers)\n",
    "        rxml = etree.HTML(r.text)\n",
    "        names = rxml.xpath('//*[@id=\"main\"]/div/div[2]/ul/li[*]/div[2]/h4/a/text()') \n",
    "        # 列表元素为<class 'lxml.etree._ElementUnicodeResult'>，为str类型\n",
    "        calories = rxml.xpath('//*[@id=\"main\"]/div/div[2]/ul/li[*]/div[2]/p/text()')\n",
    "        for i in range(10):\n",
    "            foods[names[i]] = calories[i]\n",
    "        \n",
    "        # bs_res = bs4.BeautifulSoup(r.text,'html.parser')\n",
    "        # foods = bs_res.find_all('li',class_='item clearfix')\n",
    "        # for food in foods:\n",
    "        #     name = food.find_all('a')[1]['title']\n",
    "        #     calorie = food.find('p').text\n",
    "        # print(names,calories)\n",
    "\n",
    "foods = {}\n",
    "task_lists = []\n",
    "for x in range(3):\n",
    "    task = gevent.spawn(crawler)\n",
    "    task_lists.append(task)\n",
    "gevent.joinall(task_lists)\n",
    "print(foods)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 正则表达式 Regular expression\n",
    "# \"一行胜千言\" \"无穷\"[字符串组]的最简洁表达方式\n",
    "# 通用的字符串表达框架,匹配字符串\n",
    "# 字符 + 操作符\n",
    "# .     任意单个字符\n",
    "# []    \"单个\"字符取值范围     [a-z]表示a到z的单个字符\n",
    "# [^]   \"单个\"字符排除范围\n",
    "# *     前一字符0次或无限拓展   abc*表示ab,abc,abcccc等\n",
    "# +     前一字符1次或无限拓展   abc+表示abc,abcccc等\n",
    "# ?     前一字符0次或1次拓展    abc?表示ab,abc \n",
    "# ??    操作符的最小匹配        正则表达式默认匹配最长[贪婪匹配],须在操作符后加?\n",
    "# |     左右表达式任一个        abc|def表示abc,def\n",
    "# {m}   拓展前一字符m次         ab{2}c表示abbc\n",
    "# {m,n} 拓展前一字符m至n次      ab{z,2}c表示abc,abbc\n",
    "# ^     匹配字符串开头          ^abc表示以abc开头的字符串\n",
    "# $     匹配字符串结尾          abc$表示以abc结尾的字符串\n",
    "# \\d    数字,等价于[0-9]\n",
    "# \\w    单词字符,等价于[A-Za-z0-9]"
   ]
  },
  {
   "source": [
    "!['re'](http://img.mukewang.com/587634120001851205900512.png)"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "['dave@google.com', 'steve@gmail.com', 'rob@gmail.com']\n[('dave', 'google', 'com'), ('steve', 'gmail', 'com'), ('rob', 'gmail', 'com')]\n"
     ]
    }
   ],
   "source": [
    "# 识别所有邮件地址的正则表达式\n",
    "import re\n",
    "data=\"\"\"DAVE dave@google.com\n",
    "        dsd steve@gmail.com\n",
    "        dsddsdaw rob@gmail.com\n",
    "        wes np.nan\"\"\"\n",
    "      \n",
    "pattern=r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}'\n",
    "# 将相同的表达式应用到多个字符串上，使用re.compile创建一个正则表达式对象，节约CPU周期。\n",
    "regex=re.compile(pattern,flags=re.IGNORECASE) # re.IGNORECASE使用正则表达式不区分大小写\n",
    "print(regex.findall(data))\n",
    "\n",
    "pattern2=r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\\.([A-Z]{2,4})'    # 利用()进行表达式分组\n",
    "regex2=re.compile(pattern2,flags=re.IGNORECASE) \n",
    "print(regex2.findall(data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "ls = re.findall(r'[1-9]\\d{5}','BIT100081 TSU100084')    #全部匹配 第一种写法\n",
    "\n",
    "pat = re.compile(r'[1-9]\\d{5}')\n",
    "ls2 = pat.findall('BIT100081 TSU100084')    # findall全部匹配 第二种写法\n",
    "\n",
    "ls3 = pat.search('BIT100081 TSU100084')     # search匹配第一个\n",
    "print(ls,ls2,ls3.group(0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mat = re.search(r'PY.*N','PYANVNCNDN')  # 贪婪匹配\n",
    "print(mat.group(0))    \n",
    "mat = re.search(r'PY.*?N','PYANVNCNDN')  # 添加?后成最小匹配\n",
    "print(mat.group(0))  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 正则表达式解析案例"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.7.4 64-bit ('base': conda)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "interpreter": {
   "hash": "2a7e95a32014fc1ccf24626d45a98c6e7b4373277259c22f47a91d487fc3e8a5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}