#爬虫必备的知识
#1.知道怎么发起请求库 urlib (python)  requests()
#2.解析响应    bs4,xpath(重点),re,beatifulSoup

from typing import Dict
import requests
from lxml import etree
import json
import re
url='https://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
headers={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36"
}
data={
    'i':'我爱你',
    "from": "AUTO",
    "to": "AUTO",
    "smartresult": "dict",
    "client": "fanyideskweb",
    "salt": "16290026148086",
    "sign": "6e85b71fde008dcddb19a7efd41db453",
    "lts": "1629002614808",
    "bv": "82e80c28d69a2cab7ec51944342775ae",
    "doctype": "json",
    "version": "2.1",
    "keyfrom": "fanyi.web",
    "action": "FY_BY_REALTlME"

}

response=requests.post(url,headers=headers,data=data)

if response.status_code==200:
    html=response.text
    #字符串的方法
    # list=html.split("\"")
    # print(list[-2])

    #json对象
    # txt=json.loads(html)
    # print(txt['translateResult'][0][0]['tgt'])
    
    #正则表达式
    
    # print(txt)
    # html=etree.HTML(html)
    # print(html.xpath('//span').text)