
# httpbin.org网站：可以模拟http网页，用来测试和开发http的请求和响应

# 获取一个get请求
'''
import urllib.request
response1 = urllib.request.urlopen("http://www.baidu.com")
print(response1.read().decode('utf-8'))
'''


"""
ctrl + a 全部复制  (或者，选中最后几个字，按住shift，点击右边的上滑按钮，拖动到最上面，点击最开始位置，即可全部复制想要复制的内容)
桌面新建txt文件，后缀改为html
右键，编辑，粘贴
删除最后一行和第一行多余部分，保存并关闭
双击html文件(看到的是百度一下界面)，右键，查看网页源代码
"""


# 获取一个post请求
'''
import urllib.request
import urllib.parse
data = bytes(urllib.parse.urlencode({"hello":"world"}), encoding="utf-8")
response2 = urllib.request.urlopen("http://httpbin.org/post", data=data)
print(response2.read().decode("utf-8"))
'''

# 超时处理
'''
import urllib.request
try:
    response = urllib.request.urlopen("http://httpbin.org/get",timeout=0.01)
    print(response.read().decode('utf-8'))
except Exception as e:
    print(f"报错了：{e}")
'''

'''
import urllib.request
response = urllib.request.urlopen("http://httpbin.org/get")
print(response.status)
# 结果：200
'''
'''
import urllib.request
response = urllib.request.urlopen("http://douban.com/")
print(response.status)
# 报错：418（被发现是爬虫了）
'''

'''
# 用谷歌浏览器打开，fn + f12 查看标头信息
import urllib.request
response = urllib.request.urlopen("http://www.baidu.com")
print(response.getheaders())         # 查看全部响应标头
# print(response.getheader("Server"))  # 查看指定响应标头
'''
'''
# 把爬虫身份封装一下，模拟访问http网页
import urllib.request
import urllib.parse
url = "http://httpbin.org/post"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
}
data = bytes(urllib.parse.urlencode({'name':'eric'}),encoding='utf-8')
req = urllib.request.Request(url=url, data=data, headers=headers, method="POST")
response = urllib.request.urlopen(req)
print(response.read().decode('utf-8'))
'''

# 把爬虫身份封装一下，直接访问douban网页
import urllib.request
url = "http://www.douban.com"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
}
req = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(req)
print(response.read().decode('utf-8'))







