{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第8关，学会cookies，就可以让浏览器记住你，你们可以更方便地长期保持联系，而不是在一次见面之后就相忘于江湖。\n",
    "# 注：用三个#表示该章节的每个分节"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 发表博客评论  >>  登陆   >>   提交数据\n",
    "### Requests Method : POST(not Get) >> DATA\n",
    "\n",
    "# post请求应用于向网页提交数据，比如提交表单类型数据（像账号密码就是网页表单的数据）\n",
    "# get请求多应用于获取网页数据 requests.get()\n",
    "# Get是不安全的，因为在传输过程，数据被放在请求的URL中；Post的所有操作对用户来说都是不可见的。\n",
    "# Get传送的数据量较小，这主要是因为受URL长度限制；Post传送的数据量较大，一般被默认为不受限制\n",
    "# 其他请求方式 head、options..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Responds Headers 服务器响应信息  <<>>  Requests Headers 浏览器请求信息 \n",
    "# Set cookies : 服务器往浏览器写入的cookies，自动登录   <<>>  带账号密码的请求头往往带cookies\n",
    "# 自动登录时与账号绑定 >>> 带cookies登陆可以免账号密码  >>>  过时失效\n",
    "# Form Data ：上传的 信息 >> 存入data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "200\n"
     ]
    }
   ],
   "source": [
    "# 顺序： POST带参数请求登陆 >> 获得登录的cookies >>  带cookies请求发表评论 \n",
    "# >>  Headers - Form Data - commentContent:'😎😁😁'\n",
    "import requests\n",
    "\n",
    "url = 'https://www.nowcoder.com/nccommon/login/do?token='   #登录的网址\n",
    "\n",
    "headers = {\n",
    "    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}   # 在“检查”中点击“Preserve log”，展开第0个请求/在页面中点CTRL+F搜索关键词所在请求，浏览【headers】，【response headers】服务器的响应信息,登陆信息位于名为[do?token=]的XHR文件内\n",
    "data = {\n",
    "    'email': '18571826829',\n",
    "    'remember': 'true',\n",
    "    'cipherPwd': 'Zqy3qUOYQUFwVllWVBNrEmzleDaBCOAwlpNzbLKnrH+ZzC+QpgTu1sQuCH09af1kCTyqTa7Z+xoGRDf/Tv6XfRdecGKu/8u28QDaplAra58I4txbV7YFROCo5Jg5MFA/3LETwt2cphdi8g0cEY4QTIUG0HuDa7CLyW7+EItRpPM='\n",
    "}   #把有关登录的参数(Form Data)封装成字典，赋值给data。\n",
    "\n",
    "login_in = requests.post(url,headers=headers,data=data)\n",
    "#用【requests.post】发起请求，放入参数：请求登录的网址、请求头和登录参数，然后赋值给login_in。\n",
    "cookies = login_in.cookies\n",
    "#【提取cookies】：调用requests对象的cookies属性获得登录的cookies，并赋值给变量cookies。\n",
    "\n",
    "url_1 = 'https://www.nowcoder.com/moment/create?token='\n",
    "#评论的网址。【如何寻找动作对应的URL——清空-“Preserve log”-输入框输入查看ALL】\n",
    "data_1 = {\n",
    "    'content': '学习一下python🤨',\n",
    "    'type': '0',\n",
    "    'circle': '-1'\n",
    "}\n",
    "#把有关评论的参数封装成字典。\n",
    "comment = requests.post(url_1,headers=headers,data=data_1,cookies=cookies)\n",
    "#用requests.post发起发表评论的请求，放入参数：文章网址、headers、评论参数、cookies参数\n",
    "#调用cookies的方法就是在post请求中传入cookies=cookies的参数。\n",
    "print(comment.status_code)\n",
    "# 返回405：一般由于post引起、或者评论区url选择错误"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自制翻译机\n",
    "import requests\n",
    "\n",
    "url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'   #登录的网址\n",
    "headers = {\n",
    "    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "TobeTrans = input('你想翻译什么：\\n')\n",
    "data = {\n",
    "    'i': TobeTrans,\n",
    "    'from': 'AUTO',\n",
    "    'to': 'AUTO',\n",
    "    'smartresult': 'dict',\n",
    "    'client': 'fanyideskweb',\n",
    "    'doctype': 'json',\n",
    "    'version': '2.1',\n",
    "    'keyfrom': 'fanyi.web',\n",
    "    'action': 'FY_BY_REALTlME'\n",
    "}    # 不是所有的请求参数都是必须的\n",
    "Trans = requests.post(url,headers=headers,data=data)\n",
    "Traned = Trans.json()['translateResult'][0][0]['tgt']\n",
    "print(Traned)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# session\n",
    "# 会话：浏览器上网的过程\n",
    "# session：会话过程中，服务器用来记录特定用户会话的信息。[我的足迹]\n",
    "# session[保存在服务器]和cookies[保存在前端]的关系：cookies中存储着session的编码信息，session中又存储了cookies的信息。\n",
    "# 通过创建一个session来处理cookies\n",
    "# 举例：这也是为什么你每次重新登录购物网站后，你之前在购物车放入的商品并不会消失的原因 >>\n",
    "# 因为你在登录时，服务器可以通过浏览器携带的cookies，找到保存了你购物车信息的session。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 代码修改 采用session [自带cookies的请求]\n",
    "# 发表评论 >>  浏览器找到评论区 >> 新请求 XHR - Headers - Form Data - commentContent:'😎😁😁'\n",
    "import requests\n",
    "\n",
    "Session = requests.session()\n",
    "#创建Session对象，相当于创建了一个特定的会话，帮我们【自动保持】cookies。\n",
    "\n",
    "url = 'https://www.nowcoder.com/nccommon/login/do?token=' \n",
    "headers = {\n",
    "    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "data = {\n",
    "    'email': '18571826829',\n",
    "    'remember': 'true',\n",
    "    'cipherPwd': 'Zqy3qUOYQUFwVllWVBNrEmzleDaBCOAwlpNzbLKnrH+ZzC+QpgTu1sQuCH09af1kCTyqTa7Z+xoGRDf/Tv6XfRdecGKu/8u28QDaplAra58I4txbV7YFROCo5Jg5MFA/3LETwt2cphdi8g0cEY4QTIUG0HuDa7CLyW7+EItRpPM='\n",
    "}\n",
    "\n",
    "Session.post(url,headers=headers,data=data)\n",
    "#★发起[登录请求]，放入参数：请求登录的网址、请求头和登录参数。\n",
    "\n",
    "url_1 = 'https://www.nowcoder.com/moment/create?token='\n",
    "data_1 = {\n",
    "    'content': '再来学习一下python🤨',\n",
    "    'type': '0',\n",
    "    'circle': '-1'\n",
    "}\n",
    "comment = Session.post(url_1,headers=headers,data=data_1)\n",
    "# 发起[评论请求]，放入参数：文章网址，请求头和评论参数，并赋值给comment。\n",
    "print(comment.status_code)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### cookies 存储 & 读取\n",
    "import requests,json\n",
    "#引入requests和json模块。json把cookies转成字符串\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\8~10关：第3个小目标：更上层楼\\cookies.txt'\n",
    "\n",
    "session = requests.session()   \n",
    "url = 'https://www.nowcoder.com/nccommon/login/do?token'\n",
    "headers = {\n",
    "'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n",
    "}\n",
    "data = {\n",
    "    'email': '18571826829',\n",
    "    'remember': 'true',\n",
    "    'cipherPwd': 'Zqy3qUOYQUFwVllWVBNrEmzleDaBCOAwlpNzbLKnrH+ZzC+QpgTu1sQuCH09af1kCTyqTa7Z+xoGRDf/Tv6XfRdecGKu/8u28QDaplAra58I4txbV7YFROCo5Jg5MFA/3LETwt2cphdi8g0cEY4QTIUG0HuDa7CLyW7+EItRpPM='\n",
    "}\n",
    "loading = session.post(url, headers=headers, data=data)\n",
    "print(loading.status_code)\n",
    "cookies_dict = requests.utils.dict_from_cookiejar(session.cookies)\n",
    "#调用方法：把cookies转化成字典。\n",
    "cookies_str = json.dumps(cookies_dict)\n",
    "#调用json模块的dumps函数，把cookies从字典再转成字符串。\n",
    "print(cookies_dict)\n",
    "print('--------------')\n",
    "print(cookies_str)\n",
    "f = open(routine, 'w')\n",
    "f.write(cookies_str)\n",
    "#把已经转成字符串的cookies写入文件。\n",
    "f.close()\n",
    "\n",
    "# cookies 读取\n",
    "cookies_txt = open('cookies.txt', 'r')\n",
    "cookies_dict = json.loads(cookies_txt.read())\n",
    "#调用json模块的loads函数，把字符串转成字典。\n",
    "cookies = requests.utils.cookiejar_from_dict(cookies_dict)\n",
    "#把转成字典的cookies再转成cookies本来的格式。\n",
    "session.cookies = cookies\n",
    "\n",
    "print('--------------')\n",
    "print(session.cookies)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 【两个问题待解决】第一次输入账号密码后将cookies保存,cookies过期后重新获取\n",
    "import requests,json\n",
    "\n",
    "routine = r'E:\\software\\Python\\crawler\\00-风变python\\2-风变python课程资料【基础语法+爬虫精进】\\Python爬虫精进\\8~10关：第3个小目标：更上层楼\\cookies2.txt'\n",
    "headers = {\n",
    "'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "\n",
    "def cookies_read():\n",
    "    cookies_txt = open(routine, 'r')\n",
    "    cookies_dict = json.loads(cookies_txt.read())\n",
    "    cookies = requests.utils.cookiejar_from_dict(cookies_dict)\n",
    "    return (cookies)\n",
    "\n",
    "def sign_in():\n",
    "    url = 'https://www.nowcoder.com/nccommon/login/do?token'\n",
    "    data = {\n",
    "        'email': input('你的账号是：'),\n",
    "        'remember': 'true',\n",
    "        'cipherPwd': 'Zqy3qUOYQUFwVllWVBNrEmzleDaBCOAwlpNzbLKnrH+ZzC+QpgTu1sQuCH09af1kCTyqTa7Z+xoGRDf/Tv6XfRdecGKu/8u28QDaplAra58I4txbV7YFROCo5Jg5MFA/3LETwt2cphdi8g0cEY4QTIUG0HuDa7CLyW7+EItRpPM='\n",
    "    }\n",
    "    login = session.post(url, headers=headers, data=data)   # session已自动存入cookies\n",
    "    print(login.status_code)   # 【1】并不能作为登陆成功的标识！[只能说明向服务器成功传递数据]\n",
    "    cookies_dict = requests.utils.dict_from_cookiejar(session.cookies)\n",
    "    cookies_str = json.dumps(cookies_dict)\n",
    "    f = open(routine, 'w')\n",
    "    f.write(cookies_str)\n",
    "    f.close()\n",
    "    \n",
    "def write_message():\n",
    "    url_2 = 'https://www.nowcoder.com/moment/create?token='\n",
    "    data_1 = {\n",
    "        'content': input('发表你的评论：'),\n",
    "        'type': '0',\n",
    "        'circle': '-1'\n",
    "    }\n",
    "    return (session.post(url_2, headers=headers, data=data_1))\n",
    "    \n",
    "session = requests.session() \n",
    "try:                                    # 在本地读取cookies文件\n",
    "    session.cookies = cookies_read()   # 赋予session的cookies属性。\n",
    "except FileNotFoundError:           #读取不到cookies文件，重新登录获取cookies。\n",
    "    sign_in()\n",
    "comment = write_message()   \n",
    "if comment.status_code == 200:   # 【2】不能作为cookies有效的标识，错误的cookies也会返回200\n",
    "    print('成功啦！')\n",
    "else:\n",
    "    print('cookies可能失效，需要重新登陆')\n",
    "    sign_in()\n",
    "    session.cookies = cookies_read()\n",
    "    num = write_message()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 登陆美团\n",
    "import requests\n",
    "from datetime import datetime\n",
    "import zlib\n",
    "import base64\n",
    "\n",
    "# 生成美团反爬码：token\n",
    "def encode_token():\n",
    "    ts = int(datetime.now().timestamp() * 1000)\n",
    "    token_dict = {\n",
    "        'rId': 100900,\n",
    "        'ver': '1.0.6',\n",
    "        'ts': ts,\n",
    "        'cts': ts + 100 * 1000,\n",
    "        'brVD': [1010, 750],\n",
    "        'brR': [[1920, 1080], [1920, 1040], 24, 24],\n",
    "        'bI': ['https://gz.meituan.com/meishi/c11/', ''],\n",
    "        'mT': [],\n",
    "        'kT': [],\n",
    "        'aT': [],\n",
    "        'tT': [],\n",
    "        'aM': '',\n",
    "        'sign': 'eJwdjktOwzAQhu/ShXeJ4zYNKpIXqKtKFTsOMLUn6Yj4ofG4UjkM10CsOE3vgWH36df/2gAjnLwdlAPBBsYoR3J/hYD28f3z+PpUnmJEPqYa5UWEm0mlLBRqOSaP1qjEtFB849VeRXJ51nr56AOSVIi9S0E3LlfSzhitMix/mQwsrdWa7aTyCjInDk1mKu9nvOHauCQWq2rB/8laqd3cX+adv0zdzm3nbjTOdzCi69A/HQAHOOyHafMLmEtKXg=='\n",
    "    }\n",
    "    # 二进制编码\n",
    "    encode = str(token_dict).encode()\n",
    "    # 二进制压缩\n",
    "    compress = zlib.compress(encode)\n",
    "    # base64编码\n",
    "    b_encode = base64.b64encode(compress)\n",
    "    # 转为字符串\n",
    "    token = str(b_encode, encoding='utf-8')\n",
    "    return token\n",
    "token = encode_token()\n",
    "\n",
    "Session = requests.session()\n",
    "\n",
    "#生成requestCode\n",
    "url_rescode = 'https://passport.meituan.com/api/v3/account/mobileloginapply?uuid=58d9f58d358340fdb5f6.1590715700.1.0.0&partner=nodejs&sdkType=pc&risk_platform=1&risk_partner=0&risk_app=-1&risk_smsTemplateId=0&risk_smsPrefixId=0' \n",
    "headers = {\n",
    "    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n",
    "}\n",
    "mobile = input('输入你的电话\\n')\n",
    "data_code = {\n",
    "    'mobile':mobile\n",
    "}\n",
    "resCode = Session.post(url_rescode,headers=headers,data=data_code)\n",
    "requestCode = resCode.json()['error']['data']['requestCode']\n",
    "\n",
    "# 将生成的token和rescode作为query string code传入获取验证码\n",
    "url_val = 'https://verify.meituan.com/v2/ext_api/login/info' \n",
    "data_val = {\n",
    "    'id':'4',\n",
    "    'request_code': requestCode,\n",
    "    'fingerprint':' ',\n",
    "    'mobile':mobile ,\n",
    "    '_token':token\n",
    "}   \n",
    "valCode = Session.post(url_val,headers=headers,data=data_val)\n",
    "\n",
    "# 利用验证码进行登陆\n",
    "url_login = 'https://verify.meituan.com/v2/ext_api/login/verify' \n",
    "data_login = {\n",
    "    'id':'4',\n",
    "    'request_code':requestCode,\n",
    "    'fingerprint':' ',\n",
    "    'mobile': mobile,\n",
    "    '_token':token,\n",
    "    'smscode': input('输入验证码\\n')\n",
    "}\n",
    "Login = Session.post(url_login,headers=headers,data=data_login)\n",
    "print(Login.status_code)\n",
    "\n",
    "headers2 = {\n",
    "#     'Cookie': 'uuid=58d9f58d358340fdb5f6.1590543820.1.0.0; _lxsdk_cuid=17253cca5e0c8-04721d5793bef8-d373666-144000-17253cca5e0c8; __mta=220494503.1590543820292.1590544493903.1590544566385.5; mtcdn=K; userTicket=BuiIHNnGDJFGvyQWFXRdBejWfvfWuJryGZRwhEfI; lsu=; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; firstTime=1590714746178; ci=110; rvct=110%2C420; _lxsdk_s=1725dfcc5ac-050-bf-82f%7C%7C11; lt=gbIiEfw0sD2ygcIexAu-MnDB82gAAAAArwoAALc6eYnheFXH21GSfHopREjZjs_nUMRlXfByGZIHaECblZ9tdCTgV-Aok4HYF7Z8kA; u=125692730; n=WRN740106035; token2=gbIiEfw0sD2ygcIexAu-MnDB82gAAAAArwoAALc6eYnheFXH21GSfHopREjZjs_nUMRlXfByGZIHaECblZ9tdCTgV-Aok4HYF7Z8kA',\n",
    "    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n",
    "}             # 验证码通过后需要cookie才能在另一URL登陆\n",
    "url_verify = 'https://www.meituan.com/ptapi/getLoginedUserInfo'\n",
    "res = Session.get(url_verify,headers=headers2)\n",
    "print(res.status_code)\n",
    "print(res.json())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 饿了么登陆方法   [饿了么网站已更改，仅作阅读用]\n",
    "## 慢慢消化 ##\n",
    "import requests\n",
    "\n",
    "session = requests.session()   # 创建会话\n",
    "\n",
    "headers = {\n",
    "'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n",
    "}\n",
    "url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'    # 发送验证码的网址\n",
    "tel = input('请输入手机号码：')\n",
    "data_1 = {\n",
    "    'captcha_hash':'',\n",
    "    'captcha_value':'',\n",
    "    'mobile':tel,\n",
    "    'scf':''\n",
    "}   # 发送验证码的参数\n",
    "token = session.post(url_1, headers=headers, data=data_1).json()['validate_token']\n",
    "# 在会话下，模拟获取验证码的请求后返回token值\n",
    "\n",
    "url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'   # 登陆的网址\n",
    "code = input('请输入手机验证码：')\n",
    "data_2 = {\n",
    "    'mobile':tel,\n",
    "    'scf':'ms',\n",
    "    'validate_code':code,\n",
    "    'validate_token':token\n",
    "}\n",
    "session.post(url_2,headers=headers,data=data_2)\n",
    "# 实现登陆，登陆后才可访问餐厅\n",
    "\n",
    "address_url = 'https://www.ele.me/restapi/v2/pois?'\n",
    "place = input('请输入你的收货地址：')\n",
    "params ={\n",
    "    'extras[]':'count',\n",
    "    'geohash':'ws105rz9smwm',\n",
    "    'keyword':place,\n",
    "    'limit':'20',\n",
    "    'type':'nearby'\n",
    "}# 这里使用了深圳的geohash\n",
    "address_res = requests.get(address_url,params=params)\n",
    "address_json = address_res.json()\n",
    "print('以下，是与'+place+'相关的位置信息：\\n')\n",
    "n=0\n",
    "for address in address_json:\n",
    "    print(str(n)+'. '+address['name']+'：'+address['short_address']+'\\n')\n",
    "    n = n+1\n",
    "address_num = int(input('请输入您选择位置的序号：'))\n",
    "final_address = address_json[address_num]\n",
    "\n",
    "restaurants_url = 'https://www.ele.me/restapi/shopping/restaurants?'   # 使用带有餐馆列表的那个XHR地址。\n",
    "params = {\n",
    "    'extras[]':'activities',\n",
    "    'geohash':final_address['geohash'],\n",
    "    'latitude':final_address['latitude'],\n",
    "    'limit':'24',\n",
    "    'longitude':final_address['longitude'],\n",
    "    'offset':'0',\n",
    "    'terminal':'web'\n",
    "}    #其中geohash和经纬度，来自前面获取到的数据。\n",
    "restaurants_res = session.get(restaurants_url,params=params)\n",
    "# 发起请求，将响应的结果，赋值给restaurants_res\n",
    "restaurants = restaurants_res.json()\n",
    "for restaurant in restaurants:\n",
    "# restsurants最外层是一个列表，它可被遍历。restaurant则是字典，里面包含了单个餐厅的所有信息。\n",
    "    print(restaurant['name'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建图灵机器人\n",
    "import requests\n",
    "import json\n",
    "\n",
    "userid = 'YangYingjie'\n",
    "apikey = '4c17984edeff45d9a3c73b70470ed509'\n",
    "# 机器人识别码\n",
    "\n",
    "# 创建post函数\n",
    "def robot(content):\n",
    "    api = r'http://openapi.tuling123.com/openapi/api/v2'\n",
    "    data = {\n",
    "        \"perception\": {\n",
    "            \"inputText\": {\n",
    "            \"text\": content\n",
    "            }\n",
    "        },\n",
    "        \"userInfo\": {\n",
    "            \"apiKey\": apikey,\n",
    "            \"userId\": userid,\n",
    "        }\n",
    "    }\n",
    "     # 转化为json格式\n",
    "    jsondata = json.dumps(data)\n",
    "    # 发起post请求\n",
    "    response = requests.post(api, data = jsondata)\n",
    "    # 将返回的json数据解码\n",
    "    robot_res = json.loads(response.content)\n",
    "    # 提取对话数据\n",
    "    print(robot_res[\"results\"][0]['values']['text'])\n",
    "    \n",
    "#加一些stopwords，只要说了这些词就可以终止聊天\n",
    "while True:\n",
    "    content = input(\"talk:\")\n",
    "    # 输入对话内容\n",
    "    robot(content)\n",
    "    if content == 'bye':\n",
    "    # 设置stopwords\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# NLPIR人工智能语义分析 http://ictclas.nlpir.org/nlpir/ [现在已经需要验证码才能用]\n",
    "import requests,json\n",
    "url = 'http://ictclas.nlpir.org/nlpir/index6/getWord2Vec.do'\n",
    "headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181Safari/537.36'}\n",
    "words = input('请输入你想查询的词汇：')\n",
    "data = {'content':words}\n",
    "res = requests.post(url,data=data,headers=headers)\n",
    "data = res.text\n",
    "print(data)\n",
    "data1=json.loads(data)\n",
    "print ('和“'+words+'”相关的词汇，至少还有：')\n",
    "\n",
    "f=0\n",
    "for i in data1['w2vlist']: \n",
    "    f=f+1\n",
    "    word = i.split(',') # 切割字符串\n",
    "    print ('('+str(f)+')'+word[0]+'，其相关度为'+word[1])\n",
    "# 【问题提出：需要验证码】"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第9关，学习控制浏览器，来应对爬虫中一些更复杂的情况。\n",
    "\n",
    "### selenium模块 [控制浏览器，做出自动打开、输入、点击]\n",
    "# 作用场合：遇到页面交互复杂或是URL加密逻辑复杂的情况，不得已才用\n",
    "# 优点：真实地打开一个浏览器，【等待】所有数据都加载到开发者工具的Elements中之后，再把这个网页当做静态网页爬取\n",
    "# 缺点：相比原来牺牲了速度和更多资源\n",
    "\n",
    "# selenium库安装 >> 下载浏览器驱动[直观看到操作过程] >> 运行本地脚本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "for i in sys.path:\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# selenium 用法案例\n",
    "import time\n",
    "from selenium import  webdriver   \n",
    "# 这里没找到selenium模块，将其C盘下的\\lib\\site-packages转移至D盘后才找到\n",
    "driver = webdriver.Chrome(r'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe') \n",
    "# 设置浏览器引擎为Chrome，控制这个实例化的浏览器，由于找不到chrome地址，将其赋值在括号内\n",
    "\n",
    "driver.get('https://localprod.pandateacher.com/python-manuscript/hello-spiderman/') \n",
    "time.sleep(2)  \n",
    "# 【获取数据】打开指定URL的网页\n",
    "\n",
    "teacher = driver.find_element_by_id('teacher')\n",
    "teacher.send_keys('必须是吴枫呀')\n",
    "# 【解析数据 + 提取数据】 selenium所解析的是Elements中的所有数据\n",
    "#  而BeautifulSoup所解析的只是Network中第0个请求的响应\n",
    "#  解析数据步骤在driver内是自动完成的，提取数据是driver的一个方法\n",
    "\n",
    "assistant = driver.find_element_by_name('assistant')\n",
    "assistant.clear()                # 预先清除按键输入内容  \n",
    "assistant.send_keys('都喜欢')    # 模拟按键输入，自动填写表单  类似 post\n",
    "time.sleep(1)\n",
    "button = driver.find_element_by_class_name('sub')\n",
    "time.sleep(1)\n",
    "button.click()  # 点击元素\n",
    "# 【鼠键操作】  .clear()  .send_keys('XXX')  .click()\n",
    "\n",
    "driver.close()  # 关闭浏览器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#提取数据的方法<一> 采用selenium.driver提取：find_element_by\n",
    "import time\n",
    "from selenium import  webdriver   \n",
    "from selenium.webdriver.chrome.options import Options # 从options模块中调用Options类\n",
    "chrome_options = Options() # 实例化Option对象\n",
    "chrome_options.add_argument('--headless') # 把Chrome浏览器设置为【静默模式】\n",
    "driver = webdriver.Chrome(r'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe',options = chrome_options) # 设置引擎为Chrome，后台默默运行\n",
    "\n",
    "driver.get('https://localprod.pandateacher.com/python-manuscript/hello-spiderman/') \n",
    "time.sleep(2)  \n",
    "\n",
    "teacher = driver.find_element_by_tag_name('label') # 解析网页+提取第一个'teacher'标签，放入列表\n",
    "print(teacher) # 打印teacher[描述]\n",
    "print(type(teacher)) # 打印teacher的数据类型：WE对象\n",
    "print(teacher.text) # 打印teacher的文本  与Tag.text一致 ：str\n",
    "print(teacher.get_attribute('type')) # 获取'style'这个属性的值  类似Tag['type']  ：str\n",
    "\n",
    "labels = driver.find_elements_by_tag_name('label')   #解析网页+提取所有'label'标签。只须将element改成复数。\n",
    "for label in labels:\n",
    "    print(label.text)\n",
    "print(type(labels)) # 打印labels的数据类型：list   ；   以列表形式存储\n",
    "\n",
    "driver.close() # 关闭浏览器\n",
    "\n",
    "# 其他提取数据的方法\n",
    "# 以下方法可以从网页中提取出'你好，蜘蛛侠！'这段文字\n",
    "# 如<h1>你好，蜘蛛侠！</h1> \n",
    "# 可以使用find_element_by_tag_name('h1')\n",
    "# 如<h1 class=\"title\">你好，蜘蛛侠！</h1>\n",
    "# 可以使用find_element_by_class_name('title')\n",
    "# 如<h1 id=\"title\">你好，蜘蛛侠！</h1> \n",
    "# 可以使用find_element_by_id('title')\n",
    "# 如<h1 name=\"hello\">你好，蜘蛛侠！</h1> \n",
    "# 可以使用find_element_by_name('hello')\n",
    "\n",
    "# 以下方法可以提取出超链接\n",
    "# 如<a href=\"spidermen.html\">你好，蜘蛛侠！</a>\n",
    "# 可以使用find_element_by_link_text('你好，蜘蛛侠！')\n",
    "# 如<a href=\"https://localprod.pandateacher.com/python-manuscript/hello-spiderman/\">你好，蜘蛛侠！</a>\n",
    "# 可以使用find_element_by_partial_link_text('你好')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取数据的方法<二> driver解析+BeautifulSoup提取\n",
    "import time\n",
    "from selenium import  webdriver   \n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "driver = webdriver.Chrome(r'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe')  \n",
    "driver.get('https://localprod.pandateacher.com/python-manuscript/hello-spiderman/') \n",
    "time.sleep(2)  \n",
    "\n",
    "pageSource = driver.page_source # 获取完整渲染的网页源代码[包含JS,CSS]\n",
    "print(type(pageSource)) # 打印pageSource的类型   <str>\n",
    "print(pageSource) # 打印pageSource\n",
    "\n",
    "soup = BeautifulSoup(pageSource,'html.parser') #把网页源代码解析为BeautifulSoup对象，便于提取\n",
    "items = soup.find_all('label')\n",
    "for item in items:\n",
    "    print(item.text)\n",
    "    \n",
    "driver.close() # 关闭浏览器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用selenium爬取QQ音乐歌曲评论\n",
    "import time\n",
    "from selenium import  webdriver   \n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "driver = webdriver.Chrome(r'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe')  \n",
    "driver.get('https://y.qq.com/n/yqq/song/000xdZuV2LcQ19.html') \n",
    "time.sleep(2)  \n",
    "\n",
    "button = driver.find_element_by_class_name('js_get_more_hot')\n",
    "time.sleep(1)\n",
    "button.click()  # 点击元素\n",
    "time.sleep(1)\n",
    "\n",
    "pageSource = driver.page_source \n",
    "soup = BeautifulSoup(pageSource,'html.parser')\n",
    "items = soup.find(class_='comment__list js_hot_list').find_all(class_='comment__list_item c_b_normal js_cmt_li')\n",
    "for item in items:\n",
    "    print(item.find(class_='c_tx_normal comment__text js_hot_text').text.split())\n",
    "    \n",
    "driver.close() # 关闭浏览器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 拖动滑块验证 [调试中...]\n",
    "from selenium import webdriver\n",
    "from selenium.webdriver.common.by import By\n",
    "from PIL import Image, ImageEnhance\n",
    "from selenium.webdriver import ActionChains  \n",
    "from selenium.webdriver.common.keys import Keys  \n",
    "from selenium.webdriver.support import expected_conditions as EC  \n",
    "from selenium.webdriver.support.wait import WebDriverWait   \n",
    "import cv2\n",
    "import numpy as np\n",
    "from io import BytesIO\n",
    "import time, requests\n",
    "\n",
    "class CrackSlider():\n",
    "#     \"\"\"\n",
    "#     通过浏览器截图，识别验证码中缺口位置，获取需要滑动距离，并模仿人类行为破解滑动验证码\n",
    "#     \"\"\"\n",
    "    def __init__(self):\n",
    "        super(CrackSlider, self).__init__()\n",
    "        # 实际地址\n",
    "        self.url = 'http://ictclas.nlpir.org/nlpir/'\n",
    "        self.driver = webdriver.Chrome()\n",
    "        self.wait = WebDriverWait(self.driver, 20)\n",
    "        self.zoom = 2\n",
    "\n",
    "    def open(self):\n",
    "        self.driver.get(self.url)\n",
    "\n",
    "    def get_pic(self):\n",
    "        time.sleep(2)\n",
    "        target = browser.find_element_by_class_name(\"geetest_canvas_bg geetest_absolute\")\n",
    "        template = browser.find_element_by_class_name(\"yidun_jigsaw\")\n",
    "        target_link = target.get_attribute('src')\n",
    "        template_link = template.get_attribute('src')\n",
    "        target_img = Image.open(BytesIO(requests.get(target_link).content))\n",
    "        template_img = Image.open(BytesIO(requests.get(template_link).content))\n",
    "        target_img.save('target.jpg')\n",
    "        template_img.save('template.png')\n",
    "        size_orign = target.size\n",
    "        local_img = Image.open('target.jpg')\n",
    "        size_loc = local_img.size\n",
    "        self.zoom = 320 / int(size_loc[0])\n",
    "\n",
    "    def get_tracks(self, distance):\n",
    "        print(distance)\n",
    "        distance += 20\n",
    "        v = 0\n",
    "        t = 0.2\n",
    "        forward_tracks = []\n",
    "        current = 0\n",
    "        mid = distance * 3/5\n",
    "        while current < distance:\n",
    "            if current < mid:\n",
    "                a = 2\n",
    "            else:\n",
    "                a = -3\n",
    "            s = v * t + 0.5 * a * (t**2)\n",
    "            v = v + a * t\n",
    "            current += s\n",
    "            forward_tracks.append(round(s))\n",
    "\n",
    "        back_tracks = [-3,-3,-2,-2,-2,-2,-2,-1,-1,-1]\n",
    "        return {'forward_tracks':forward_tracks,'back_tracks':back_tracks}\n",
    "\n",
    "    def match(self, target, template):\n",
    "        img_rgb = cv2.imread(target)\n",
    "        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n",
    "        template = cv2.imread(template,0)\n",
    "        run = 1\n",
    "        w, h = template.shape[::-1]\n",
    "        print(w, h)\n",
    "        res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED) \n",
    "        \n",
    "        # 使用二分法查找阈值的精确值 \n",
    "        L = 0\n",
    "        R = 1\n",
    "        while run < 20:\n",
    "            run += 1\n",
    "            threshold = (R + L) / 2\n",
    "            if threshold < 0:\n",
    "                print('Error')\n",
    "                return None\n",
    "            loc = np.where( res >= threshold)\n",
    "            #print(len(loc[1]))\n",
    "            if len(loc[1]) > 1:\n",
    "                L += (R - L) / 2\n",
    "            elif len(loc[1]) == 1:\n",
    "                print('目标区域起点x坐标为：%d' % loc[1][0])\n",
    "                break\n",
    "            elif len(loc[1]) < 1:\n",
    "                R -= (R - L) / 2\n",
    "\n",
    "        return loc[1][0]\n",
    "\n",
    "    def crack_slider(self,browser):\n",
    "        #self.open()\n",
    "        target = 'target.jpg'\n",
    "        template = 'template.png'\n",
    "        self.get_pic()\n",
    "        distance = self.match(target, template)\n",
    "        zoo = 1.36 #缩放系数，需要自己调整大小\n",
    "        tracks = self.get_tracks((distance + 7 )*zoo) # 对位移的缩放计算\n",
    "        #print(tracks)\n",
    "        slider = browser.find_element_by_class_name(\"yidun_slider\")\n",
    "        ActionChains(browser).click_and_hold(slider).perform()\n",
    "\n",
    "        for track in tracks['forward_tracks']:\n",
    "            ActionChains(browser).move_by_offset(xoffset=track, yoffset=0).perform()\n",
    "\n",
    "        time.sleep(0.5)\n",
    "        for back_tracks in tracks['back_tracks']:\n",
    "            ActionChains(browser).move_by_offset(xoffset=back_tracks, yoffset=0).perform()\n",
    "\n",
    "        ActionChains(browser).move_by_offset(xoffset=-3, yoffset=0).perform()\n",
    "        ActionChains(browser).move_by_offset(xoffset=3, yoffset=0).perform()\n",
    "        time.sleep(0.5)\n",
    "        ActionChains(browser).release().perform()\n",
    "        try:\n",
    "            failure = WebDriverWait(browser, 5).until(EC.text_to_be_present_in_element((By.CLASS_NAME, 'yidun_tips__text'),'向右滑动滑块填充拼图'))\n",
    "            print('failure')\n",
    "        except:\n",
    "            print('验证成功')\n",
    "            return None\n",
    "\n",
    "        if failure:\n",
    "            self.crack_slider(browser)\n",
    "\n",
    "#if __name__ == '__main__':\n",
    "browser = webdriver.Chrome()\n",
    "browser.get( 'http://ictclas.nlpir.org/nlpir/')\n",
    "browser.implicitly_wait(10)\n",
    "elem=browser.find_element_by_class_name(\"geetest_radar_tip\")#.submit()\n",
    "#elem=browser.find_element_by_id(\"go\")\n",
    "elem.click()\n",
    "\n",
    "browser.implicitly_wait(10)\n",
    "browser.switch_to_window(browser.window_handles[-1])\n",
    "c = CrackSlider()\n",
    "k = 1\n",
    "for i in range(1,70000):\n",
    "    try:\n",
    "        elem=browser.find_element_by_class_name(\"idol_vote_info\")\n",
    "        elem.click()\n",
    "        time.sleep(0.2)\n",
    "        # 设置点击50次刷新一次\n",
    "        if k%50 == 0:\n",
    "            browser.refresh()   # 刷新方法 refresh \n",
    "            print ('test pass: refresh successful')\n",
    "        # 点击110次休眠50s，可以自己设置\n",
    "        if k%110 == 0:\n",
    "            print (\"click\",k)\n",
    "            time.sleep(50)             \n",
    "        k += 1\n",
    "    except:\n",
    "        print('-----需要验证-----')\n",
    "        c.crack_slider(browser)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第10关，定时工作，还可以把爬取结果传递给你。\n",
    "# 演唱会刷票 / 股票买入卖出\n",
    "\n",
    "# 爬取当地天气\n",
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "\n",
    "headers={'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}\n",
    "url='http://www.weather.com.cn/weather/101280601.shtml'\n",
    "res=requests.get(url,headers=headers)\n",
    "res.encoding='utf-8'  # 通过网页源代码-‘charset’确定编码方式\n",
    "bsdata=BeautifulSoup(res.text,'html.parser')\n",
    "data1= bsdata.find(class_='tem')\n",
    "data2= bsdata.find(class_='wea')\n",
    "print(data1.text)\n",
    "print(data2.text)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 定时模块 schedule\n",
    "# time和datetime也可实现，但操作逻辑会相对复杂；\n",
    "# schedule可直接解决定时功能，代码比较简单\n",
    "\n",
    "import schedule\n",
    "import time\n",
    "\n",
    "def job():\n",
    "    print(\"I'm working...\")\n",
    "\n",
    "schedule.every(2).seconds.do(job)        #每2s执行一次job()函数\n",
    "while True:\n",
    "    schedule.run_pending()\n",
    "    time.sleep(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import smtplib \n",
    "from email.mime.text import MIMEText\n",
    "from email.header import Header\n",
    "import schedule\n",
    "import time\n",
    "\n",
    "def getData():\n",
    "    headers={'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}\n",
    "    url='http://www.weather.com.cn/weather/101280601.shtml'\n",
    "    res=requests.get(url,headers=headers)\n",
    "    res.encoding='utf-8'\n",
    "    bsdata=BeautifulSoup(res.text,'html.parser')\n",
    "    data1= bsdata.find(class_='tem')\n",
    "    data2= bsdata.find(class_='wea')\n",
    "    tem = data1.text\n",
    "    weather = data2.text\n",
    "    return tem,weather\n",
    "\n",
    "def sendData(tp,wthr):   \n",
    "    text = '''\n",
    "    泉州今日气象预报：\n",
    "    气温：%s\n",
    "    天气：%s\n",
    "    '''%(tp,wthr)\n",
    "    msg = MIMEText(text,'plain')   # 正文内容\n",
    "    msg['From'] = Header('中国气象网','gbk')  #发件人\n",
    "    msg['To'] = Header('M201871424@hust.edu.cn','utf-8')   #收件人\n",
    "    msg['Subject'] = Header('气象播报')  # 标题\n",
    "\n",
    "    try:\n",
    "        server = smtplib.SMTP() \n",
    "        server.connect('smtp.qq.com',25)\n",
    "        server.login('y156596850@qq.com','jxxoyztydbpibjic') \n",
    "        server.sendmail('y156596850@qq.com','M201871424@hust.edu.cn',msg.as_string()) \n",
    "        server.quit() \n",
    "        print('邮件发送成功')\n",
    "    except smtplib.SMTPException:\n",
    "        print('邮件发送失败')\n",
    "\n",
    "def job():\n",
    "    print('开始一次任务')\n",
    "    tem,weather = getData() # 传参\n",
    "    sendData(tem,weather)\n",
    "    print('任务完成')\n",
    "\n",
    "schedule.every().day.at(\"11:30\").do(job) \n",
    "while True:\n",
    "    schedule.run_pending()\n",
    "    time.sleep(1)\n",
    "#保持程序一直运行的状态，和电脑在一直开机的状态。如果程序结束或者电脑关机了就不会定时爬取天气信息。\n",
    "#在程序员真实的开发环境中，程序一般都会挂在远端服务器，因为远端服务器24小时都不会关机>>>需要自己去做一些额外的学习。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 邮件发送例程\n",
    "import smtplib \n",
    "from email.mime.text import MIMEText\n",
    "from email.header import Header\n",
    "#引入smtplib、MIMETex和Header\n",
    "\n",
    "mailhost='smtp.qq.com'\n",
    "#把qq邮箱的服务器地址赋值到变量mailhost上，地址应为字符串格式\n",
    "qqmail = smtplib.SMTP()\n",
    "#实例化一个smtplib模块里的SMTP类的对象，这样就可以调用SMTP对象的方法和属性了\n",
    "qqmail.connect(mailhost,25)\n",
    "#连接服务器，第一个参数是服务器地址，第二个参数是SMTP端口号。\n",
    "#以上，皆为连接服务器。\n",
    "\n",
    "account = input('请输入你的邮箱：')\n",
    "#获取邮箱账号，为字符串格式\n",
    "password = input('请输入你的密码：')\n",
    "#获取邮箱密码，为字符串格式\n",
    "qqmail.login(account,password)\n",
    "#登录邮箱，第一个参数为邮箱账号，第二个参数为邮箱密码\n",
    "#以上，皆为登录邮箱。\n",
    "\n",
    "receiver=input('请输入收件人的邮箱：')\n",
    "#获取收件人的邮箱。\n",
    "\n",
    "content=input('请输入邮件正文：')\n",
    "#输入你的邮件正文，为字符串格式\n",
    "message = MIMEText(content, 'plain', 'utf-8')\n",
    "#实例化一个MIMEText邮件对象，该对象需要写进三个参数，分别是邮件正文，文本格式和编码\n",
    "subject = input('请输入你的邮件主题：')\n",
    "#输入你的邮件主题，为字符串格式\n",
    "message['Subject'] = Header(subject, 'utf-8')\n",
    "#在等号的右边是实例化了一个Header邮件头对象，该对象需要写入两个参数，分别是邮件主题和编码，然后赋值给等号左边的变量message['Subject']。\n",
    "#以上，为填写主题和正文。\n",
    "\n",
    "try:\n",
    "    qqmail.sendmail(account, receiver, message.as_string())\n",
    "    print ('邮件发送成功')\n",
    "except:\n",
    "    print ('邮件发送失败')\n",
    "qqmail.quit()\n",
    "#以上为发送邮件和退出邮箱。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.7.4 64-bit ('base': conda)"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "interpreter": {
   "hash": "2a7e95a32014fc1ccf24626d45a98c6e7b4373277259c22f47a91d487fc3e8a5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}