from urllib import request
from urllib.request import Request
import urllib.parse

url = "http://www.hellobi.com"
storge_path = "../files/hellobi.html"

# restrieve()函数，info()函数，getcode()函数，geturl()函数
urllib.request.urlretrieve(url, storge_path)  # urlrestrieve()函数能将网页内容直接存储在本地，会产生缓存
urllib.request.urlcleanup()  # 清除urlrestrieve()中的缓存
html = urllib.request.urlopen(url)
print(f"log100022:{html.info()}")  # info()显示当前环境信息
print(f"log100023:{html.getcode()}")  # getcode()显示网页响应信息
print(f"log100024:{html.geturl()}")  # getcode()显示当前 正在爬取的网址

html = urllib.request.urlopen(url, timeout=300)  # 设置网页爬取时程序认为超时的时间为300秒

# 循环爬取某一个网页
for i in range(2):
    try:
        data = urllib.request.urlopen(url, timeout=1).read()  # 1秒没有响应即为超时
        print(f"log100025:{len(data)}")
    except Exception as e:
        print(f"error-10001:{str(e)}")

# 自动模拟http请求-get
keyword = "付成"  # 爬取时路径中有中文会报编码错误
keyword = urllib.request.quote(keyword)  # 为中文设置编码
url_get = "https://www.baidu.com/s?wd=" + keyword+"&rsv_spt=1&rsv_iqid=0x8e199eba0003ca2e&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=0&rsv_dl=tb&rsv_sug3=20&rsv_sug1=33&rsv_sug7=101&rsv_btype=i&prefixsug=%25E4%25BB%2598%25E6%2588%2590&rsp=9&inputT=14542&rsv_sug4=15217"
url_get = Request(url_get)              # 将网址封装为请求
data = request.urlopen(url_get).read()  # 有些觉得urlopen有点相当于axios,this.$http
print(f"log100027:{data}")
fh = open("../files/txt/关键字爬取百度.txt", "wb")
fh.write(data)
print(f"log100028:爬取百度信息写入完成")  # getcode()显示当前 正在爬取的网址
fh.close()

# 自动模拟http请求-post
post_ulr="http://116.63.136.67:8080/xhcjs/login/checkRole/"
form_data=urllib.parse.urlencode({  # 对请求进行编码
    "name":"15123972556"
}).encode("utf-8")
req=urllib.request.Request(post_ulr,form_data) # 设置提交数据
# req.add_header() #伪装成浏览器
sub_result=urllib.request.urlopen(req).read() #提交请求
print(f"log100029:{sub_result}")  # 细节决定那个成败