# -*- codeing = utf-8 -*-
# @Time : 2024/7/22 14:32
# @Author : 袁美玉
# @File : testUrllib.py
# @Software  ：PyCharm

import urllib.request
import urllib.parse
# 解析网页
# 获取get请求
# response = urllib.request.urlopen("https://www.baidu.com")
# print(response.read().decode('utf-8')) # 对获取到的网页进行解码

# 获取post请求,模拟用户登录
# data = bytes() # 将数据转换成二进制数据包
# 键值对按照utf-8的格式进行解析，然后把解析的内容以encoding的方式封装成二进制数据包


# data = bytes(urllib.parse.urlencode({"hello":"world"}),encoding="utf-8")
# response = urllib.request.urlopen("https://httpbin.org/post",data=data)
# print(response.read().decode('utf-8'))

# 超时处理
# try:
#     response = urllib.request.urlopen("https://httpbin.org/get",timeout=0.01)
#     print(response.read().decode("utf-8"))
# except urllib.error.URLError as e:
#     print("time out!")

# 获取状态码，获取头部部分 信息
# response = urllib.request.urlopen("https://www.baidu.com")
# # print(response.status)
# print(response.getheader("Server"))

# 如何伪装的更像浏览器
# url = "https://httpbin.org/post"
# headers = {
# "User-Agent":
# "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
# }
# data = bytes(urllib.parse.urlencode({"hello":"world"}),encoding="utf-8")
# # 定义了一个请求对象
# req = urllib.request.Request(url=url, data=data, headers=headers, method="POST")
# # 发送请求
# response = urllib.request.urlopen(req)
# print(response.read().decode('utf-8'))


'''
  访问豆瓣
  会报错：urllib.error.HTTPError: HTTP Error 418:
  识别到是非正常浏览器，是一个爬虫
'''
# response = urllib.request.urlopen("https://www.douban.com")
# print(response.read().decode('utf-8'))

url = "https://www.douban.com"
#头部加上浏览器的信息
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
req = urllib.request.Request(url=url,  headers=headers)
response = urllib.request.urlopen(req)
print(response.read().decode('utf-8'))