# from urllib.request import urlopen
# from urllib.parse import quote
#
# import ssl
#
# ssl._create_default_https_context = ssl.create_unverified_context
# def search_baidu(wd='千峰'):
#     url='https://www.baidu.com/s?wd=%s'
#     response = urlopen(url % quote(wd))
#     assert response.code == 200
#     print('请求成功')
#     bytes_=response.read()
#     with open('%s.html' % wd,'wd') as file:
#         file.write(bytes_)
#
# if __name__=='__main__':
#     search_baidu()


# from urllib.request import urlopen
#
# # 发起网络请求
# resp = urlopen('http://www.hao123.com')
# assert resp.code == 200
# print('请求成功')
# # 保存请求的网页
# # f 变量接收open()函数返回的对象的__enter__()返回结果
# with open('a.html', 'wb') as f:
#      f.write(resp.read())


# #调用百度翻译实例 代码有误！！
#！！
# import encodings.idna
#
# import json
# from urllib.request import Request, urlopen
# from urllib.parse import urlencode
# import ssl
#
# ssl._create_default_https_context = ssl._create_unverified_context
#
# url = 'https://fanyi.baidu.com/sug'  # 请求的API接口
#
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
#     'Cookie': 'BIDUPSID=16CECBB89822E3A2F26ECB8FC695AFE0; PSTM=1572182457; BAIDUID=16CECBB89822E3A2C554637A8C5F6E91:FG=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1573184257; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; H_PS_PSSID=1435_21084_30211_30283; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; PSINO=1; to_lang_often=%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D; APPGUIDE_8_2_2=1; yjs_js_security_passport=0927713bf2c240ca607108086d07729426db4dbb_1577084843_js; __yjsv5_shitong=1.0_7_c3620451e4363f4aed30cbe954abf8942810_300_1577084847314_223.255.14.197_2d7151e0; from_lang_often=%5B%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D',
#     'x-requested-with': 'XMLHttpRequest',
#     'Content-Type':'text/html; charset=utf-8'
# }
#
# def fanyi(kw):
#     data = {
#         'kw': kw
#     }
#
#     # Request() 中的data参数是byte类型
#     # data不为空时，是post请求
#     req = Request(url,
#                   data=urlencode(data).encode('utf-8'),
#                   headers=headers)
#
#     resp = urlopen(req)
#     assert resp.code == 200
#
#     json_data = resp.read()  # byte
#
#     content_encode = resp.getheader('Content-Type')
#     #content_encode = 'utf-8' if content_encode is None else content_encode.split('/')[-1]
#     content_encode = 'utf-8'
#     print(content_encode)
#
#     return json.loads(json_data.decode(content_encode))
#
#
# if __name__ == '__main__':
#     print(fanyi('orange'))



# """
# 复杂的GET请求，多页面请求下载
# 保存了百度搜索python3.6后的n页
# """
#
# from urllib.request import Request, urlopen
# from urllib.parse import urlencode
#
# import ssl
#
# import time
#
# ssl._create_default_https_context = ssl._create_unverified_context
#
# url = 'https://www.baidu.com/s?'
#
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
#     'Cookie': 'BIDUPSID=16CECBB89822E3A2F26ECB8FC695AFE0; PSTM=1572182457; BAIDUID=16CECBB89822E3A2C554637A8C5F6E91:FG=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1573184257; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; H_PS_PSSID=1435_21084_30211_30283; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; PSINO=1; to_lang_often=%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D; APPGUIDE_8_2_2=1; yjs_js_security_passport=0927713bf2c240ca607108086d07729426db4dbb_1577084843_js; __yjsv5_shitong=1.0_7_c3620451e4363f4aed30cbe954abf8942810_300_1577084847314_223.255.14.197_2d7151e0; from_lang_often=%5B%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D',
#     'x-requested-with': 'XMLHttpRequest'
# }
#
# params = {
#     'wd': '',
#     'pn': 0  # 0, 10, 20, 30 ...  = (n-1)*10
# }
#
# def pages_get(wd):
#     params['wd'] = wd
#     for page in range(1, 5):
#         params['pn'] = (page-1)*10
#
#         page_url = url+urlencode(params)
#         resp = urlopen(Request(page_url,
#                                headers=headers))
#
#         assert resp.code == 200
#         file_name = 'baidu_pages/%s-%s.html' % (wd, page)
#         with open(file_name, 'wb') as f:
#             bytes_ = resp.read()
#             f.write(bytes_)
#             print(f'{file_name} 写入成功!')
#             time.sleep(0.5)
#
#     print('下载 %s 4页成功!' % wd)
#
#
# if __name__ == '__main__':
#     pages_get('Python3.6')


# """
# 模拟浏览器，增加不同的处理器Handler
# urllib.request.build_opener(*handlers)
# urllib.request.HTTPHandler  处理Http请求
# """
# from collections import namedtuple
#
# from urllib.request import HTTPHandler, build_opener
# import ssl
# ssl._create_default_https_context = ssl._create_unverified_context
#
# # 声明类 namedtuple 有命名的元组类
# Response = namedtuple('Response',
#                       field_names=['headers', 'code','text','body', 'encoding'])
#
# def get(url):
#     opener = build_opener(HTTPHandler())
#     resp = opener.open(url)
#     # 要求返回某一个类对象， 它的属性包含：
#     # headers-> dict,
#     # code-> int ,
#     # text 文本 ,
#     # body 字节码等相关属性
#
#     headers = dict(resp.getheaders())
#     try:
#         encoding = headers['Content-Type'].split('=')[-1]
#     except:
#         encoding = 'utf-8'
#     code = resp.code
#     body = resp.read()
#     text = body.decode(encoding)
#
#     return Response(headers=headers,
#                     encoding=encoding,
#                     code=code,
#                     body=body,
#                     text=text)
#
# if __name__ == '__main__':
#     resp: Response = get('http://jd.com')
#     print(resp.code)
#     print(resp.headers)
#
#     #resp.code = 300  # 禁止修改namedtuple类的属性
#     print('ok')



# import requests
# from requests import Response
#
# from urllib.parse import urlencode
#
# url = 'https://shanghai.anjuke.com/community/'
#
# # 变量名后跟 : 类型， 好处是编程时会自动提醒（提示）对象中的属性及方法
# # resp: Response = requests.get(url, params={'from': 'navigation'})
#
#
# # 声明函数时，参数名后 的` :类型 `表示参数值的类型
# # 在函数的() 后的 `-> 类型` 表示函数返回的数据（结果）类型
# def download(url: str) -> str:
#     # resp: Response = requests.get(url, params={'from': 'navigation'})
#     resp: Response = requests.request('get', url, params={'from': 'navigation'})
#     if resp.status_code == 200:
#         return resp.text  # 文本， resp.content 字节码
#     return '下载失败'
#
# def get_douban_json():
#     # url = 'https://movie.douban.com/j/chart/top_list?type=5&interval_id=100%3A90&action=&'  # 请求方法是post
#     url = 'https://movie.douban.com/j/chart/top_list'  # 请求方法是post
#
#     params = {
#         'type': 5,
#         'interval_id': '100:90',  # 100:90
#         'action': ''
#     }
#
#     data = {
#         'start': 1,
#         'limit': 20,
#     }
#
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
#     }
#
#     resp = requests.post(url, params=params, data=data, headers=headers)
#     assert resp.status_code == 200
#     print(resp.url)
#     if 'application/json' in resp.headers['content-type']:
#         return resp.json()
#
#     return resp.text
#
#
# # ret = download(url)
# ret = get_douban_json()
# print(ret)



















































