#防止多次请求,被浏览器识别出来是爬虫
#随即调用
#第一种:构建user-agent池
# import random
# UAList=[
#     'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
#     'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1 Edg/126.0.0.0',
#     'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Mobile Safari/537.36 Edg/126.0.0.0'
# ]
#
# print(random.choice(UAList))

# #第二zhong:利用fake_useragent库  可能会出现异常
# from fake_useragent import UserAgent
# print(UserAgent().random)


#3.url传参
#https://www.baidu.com/ssid=1388616263313832303536g707a01000000/from=844b/s?word=%E5%AD%A6%E4%B9%A0

#字符串被当作url提交时会被自动进行url编码处理

#输入 --- 学习                          明文
#发送请求的时候 --- %E5%AD%A6%E4%B9%A0   密文
#导入一个模块
from urllib.parse import quote,unquote
#quote()    #明文转密文      传入参数类型:字符串
#unquote()  #密文转明文      传入参数类型%xx%xx

# print(quote('参数'))  #%E5%8F%82%E6%95%B0
# print(unquote('%E5%AD%A6%E4%B9%A0'))    #转明文

#word=参数
#https://www.baidu.com/ssid=1388616263313832303536g707a01000000/from=844b/s?word=%E5%AD%A6%E4%B9%A0

import requests
# url='https://www.baidu.com/s?wd=%E5%AD%A6%E4%B9%A0'
# headers={
#     'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
# }
# res = requests.get(url,headers=headers)
# print(res.content.decode())

#通过params携带参数字典
#1.构建请求参数字典
#2.发送请求的时候带上请求参数字典

# url = 'https://www.baidu.com/s?'
#
# headers={
#     'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
# }
#
# #构建请求参数字典
# name = input('请输入关键字:')
# kw = {'word':name}
# res2 = requests.get(url,headers=headers,params=kw)
# print(res2.content.decode())















