from urllib import request

# 加载一个页面
def loadPage(url):
	# 发起请求
	req = request.Request(url)
	#print(req) # <urllib.request.Request object at 0x00000000022D8710>

	# 打开响应的对象
	response = request.urlopen(req)
	#print(response) # <http.client.HTTPResponse object at 0x0000000002A236A0>

	# 获取响应的内容
	html = response.read()
	
	# 对网页进行解码
	content = html.decode('utf-8')
	
	return content
# 把下载的内容保存到本地文件
def writePage(html, filename):
	print('正在保存到:', filename)
	f = open(filename, 'w', encoding='utf-8')
	f.write(html)
	f.close()
# 设置起始页和终止页
def tiebaSpider(url, beginPage, endPage):
	for page in range(beginPage, endPage + 1):
		pn = 50 * (page - 1)
		fullurl = url + str(pn)
		content = loadPage(fullurl)
		filename = '第' + str(page) + '页.html'
		writePage(content, filename)
from urllib import parse
if __name__ == '__main__':
	# kw = input('请输入要爬取的帖吧：')
	# beginPage = int(input('请输入起始页：'))
	# endPage = int(input('请输入终止页：'))
	
	# key = parse.urlencode({'kw': kw}) # kw=%E6%9F%AF%E5%8D%97
	
	# url = 'https://tieba.baidu.com/f?'
	# url += key + '&pn='
	# tiebaSpider(url, beginPage, endPage)
	print("1"+str("1")+"\n"+"1"+'\n')

