import requests
#1.指定url
url='https://www.sogou.com/'
#2.发起请求get方法发的返回值为响应对象
response=requests.get(url=url)
#3.获取相应数据
page_text=response.text
#4.持久化存储
with open('G:/爬虫代码/souhu/sougou1.html','w',encoding='utf-8')as fp:
    fp.write(page_text)

# #实现简易网页采集器，对指定不同的关键字进行爬取
# keyWord=input('inter a key word')
# #携带请求参数url,如果想爬取不同关键字对应的页面，我们需要将参数url进行动态化
# #实现参数动态话,parqms参数：保存请求时携带的参数
# parqms={
#     'query':keyWord
# }
# url='https://www.sogou.com/web?query=zhou&_asf=www.sogou.com&_ast=&w=01019900&p=40040100&ie=utf8&from=index-nologin&s_from=index&sut=2858&sst0=1616305831070&lkt=4%2C1616305828212%2C1616305829179&sugsuv=1616305406346380&sugtime=1616305831070'
# response=requests.get(url=url)
# page_text=response.text

# with open('E:/爬虫代码/souhu/zhou.html','w',encoding='utf-8')as fp:
#     fp.write(page_text)
