import re
from urllib import request
from urllib import parse

# res = request.urlopen(url='https://www.baidu.com/')

# print(res)
# 向测试网站：http://httpbin.org/get发请求，可以确定User-Agent

# html=res.read()
# html=res.read().decode()
# html=res.geturl()
# html=res.getcode()

# print(html)
# headers={'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)'}
# 包装请求
# req=request.Request(url='http://httpbin.org/get',headers=headers)
# res1=request.urlopen(req)
# html2=res1.read().decode()
# print(html2)
#  定义请求头
headers={'User-Agent':' Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'}
# #  参数编码 多个参数自动在中间添加&
word=input("请输入")
params=parse.urlencode({'wd':word})
# print(params)
# a=parse.quote("赵丽颖")
# print(a)
url='https://www.baidu.com/s?'+params
# print(url)
# # 占位符
# url1='https://www.baidu.com/s?{}'.format(params)
# print(url1)
req=request.Request(url=url,headers=headers)
res=request.urlopen(req)
# #  解码为字符串
html=res.read().decode()
# print(html)
# filename=word+'.html'
# with open(filename,'w',encoding='utf-8') as f:
#     f.write(html)

# 贪婪匹配 将标签作为一个字符进行匹配
# 非贪婪匹配 加？变非贪婪
regex=''
p=re.compile(regex,re.S)
p.findall(html)
# 匹配结果一定是[(),(),()] 列表套元祖


