# -*- coding: utf-8 -*-
# @Time    : 2021/2/22 20:45
# @Author  : FangQianqian
# @File    : testUrllib.py
# @Software: PyCharm


import urllib.request   #请求模块
import urllib.parse   #url解析模块
import urllib.error   #异常处理模块


#获取一个get请求
# response = urllib.request.urlopen("http://www.baidu.com/")   #urlopen()打开网页,获取网页的所有信息
# print(response)    #网页信息保存在response
# print(response.read().decode("utf-8"))  #读取网页内容，用utf-8解码


#获取一个post请求,模拟用户真是登录，向网页提交账号密码等，这些信息需封装在urlopen()中。
# import urllib.parse    #parse解析器
# data = bytes(urllib.parse.urlencode({"username":"1","password":"2"}),encoding="utf-8")   #urlencode是一个函数，可将字符串以URL编码
# response = urllib.request.urlopen("http://httpbin.org/post",data=data)    #打开网页，同时向服务器提交封装的data信息
# print(response.read().decode("utf-8"))

#获取一个get请求
# response = urllib.request.urlopen("http://httpbin.org/get")    #打开网页，同时向服务器提交封装的data信息
# print(response.read().decode("utf-8"))


#超时处理
# try:
#     #未超时程序处理
#     response = urllib.request.urlopen("http://httpbin.org/get",timeout=0.01)    #打开网页，设置网站的访问超时时间
#     print(response.read().decode("utf-8"))
#
# except urllib.error.URLError as reason:   #超时程序处理
#     print('time out')


#响应头
# response = urllib.request.urlopen("https://douban.com/")    #打开网页，设置网站的访问超时时间
# print(response.status)

# response = urllib.request.urlopen("https://baidu.com/")    #打开网页，设置网站的访问超时时间
# print(response.status)
# print(response.getheader("Expires"))    #通过getheader获取响应头的属性


# url = "http://httpbin.org/post"
# data = bytes(urllib.parse.urlencode({"name":"naicha"}),encoding='utf-8')
# headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36"}
#
#
# req = urllib.request.Request(url=url,data=data,headers=headers,method="POST")   #通过Request对访问url的源进行补充
# response = urllib.request.urlopen(req)
# print(response.read().decode('utf-8'))

#访问豆瓣
url = "https://douban.com/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36"}
req = urllib.request.Request(url=url,headers=headers,)   #通过Request对访问url的源进行补充
response = urllib.request.urlopen(req)
print(response.read().decode('utf-8'))