# -*- coding:utf-8 -*-
# import urllib.request # 爬虫模块
# import re # 正则

'''爬虫测试1'''
# url = "http://www.baidu.com"
# res = urllib.request.urlopen(url)
# html = res.read().decode("utf-8")
# # print(len(html))
#
# dlist = re.findall("<title>(.*?)</title>", html)
# print(dlist)

'''爬虫测试2'''
# url = "http://news.baidu.com"
# # 伪装浏览器用户
# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'}
# req = urllib.request.Request(url, headers=headers) # 对url进行封装，可以附加header头新薛
#
# # 执行请求获取响应信息
# res = urllib.request.urlopen(req)
#
# # 从响应对象中读取html数据
# html = res.read().decode('utf-8')
#
# # print(len(html))
# # 使用正则解析初新闻标题信息
# pat = '<a href="(.*?) target="_blank">(.*?)</a>'
# dlist = re.findall(pat, html)
#
# for v in dlist:
#     print(v[1] + ":" + v[0])

'''爬虫异常测试'''
# from urllib import request, error
#
# try:
#     url = "http://avatar.csdn.net/E/0/9/qwerdssdf.jpg"
#     # url = "http://www.baidu2355.com"
#     req = request.Request(url)
#
#     res = request.urlopen(req)
#     html = res.read().decode("utf-8")
#
#     print(len(html))
#
# except Exception as e:
#     if hasattr(e, "status"):
#         print("HTTPError")
#         print(e.reason)
#         print(e.status)
#     elif hasattr(e, "reason"):
#         print("URLError")
#         print(e.reason)
#
# print("OK")

'''urllib3库的使用'''
# import urllib3
# import re
#
# url = "http://www.baidu.com"
#
# http = urllib3.PoolManager() # 获取一个请求连接
#
# res = http.request("GET", url)
# print("status:%d" % res.status)
#
# data = res.data.decode("utf-8")
# print(re.findall("<title>(.*?)</title>", data))


'''requets库的使用'''
# import requests
# import re
#
# url = "http://www.baidu.com"
# res = requests.get(url)
# print("status:%d" % res.status_code)
#
# content = res.content.decode("utf-8")
# print(re.findall("<title>(.*?)</title>", content))

'''使用urllib爬取网页翻译'''
# from urllib import request, parse
# import json
#
# url = "https://fanyi.baidu.com/sug"
# # 定义请求的参数，并编码转换
# data = {'kw':"python"}
# data = parse.urlencode(data)
#
# # 设置头信息
# headers = {"Content-Length":len(data)}
#
# # 创建请求，发送请求，爬取信息
# req = request.Request(url, data=bytes(data, encoding="utf-8"), headers=headers)
# res = request.urlopen(req)
#
# # 解析结果
# str_json = res.read().decode('utf-8')
#
# print(str_json)
#
# myjson = json.loads(str_json)
# print(myjson)
#
# print(myjson['data'][0]['v'])


'''使用requests库爬取翻译页面'''
import requests
import json
def fanyi(keyword):
    # 定义请求地址和参数
    url =  "https://fanyi.baidu.com/sug"
    data = {'kw':keyword}

    # 发送请求，爬取信息
    res = requests.post(url, data)

    # 解析结果
    str_json = res.content.decode("utf-8")
    myjson = json.loads(str_json)
    print(myjson['data'][0]['v'])

if __name__ == "__main__":
    while True:
        keyword = input("请输入要翻译的词：")
        if keyword == "q":
            break
        try:
            fanyi(keyword)
        except Exception as e:
            print(e)
