"""
项目：百度搜索功能，将搜索到的内容按规定页数下载下来

应用到的模块：
- urllib.request.urlopen()
- urllib.request.Request()
- urllib.parse.urlencode()
- HTTPResponse对象的 read()
技巧点：
使用 for...in 循环形成 页码 page
完整 url 的拼接
根据 wd 参数 和 page 页码自动在文件夹中创建文件：文件夹/文件
"""

from urllib.request import urlopen, Request
from urllib.parse import urlencode
import time

# 百度搜索，并下载规定的页面数
# 百度搜索接口 url
# 封装 headers
url = 'https://www.baidu.com/s?'
headers = {
	'Host': 'www.baidu.com',
	'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
	'Cookie': 'BIDUPSID=1B3AE86D1BD840AF1BC36FECE91C076B; PSTM=1610086369; BAIDUID=1B3AE86D1BD840AFD51F2C61C653BC4C:FG=1; BD_UPN=12314353; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; __yjs_duid=1_164c67506903ec5c2882a145bbe2b9ed1610087866484; BDUSS=dDNDViNWg5ZFZRM2FkMC1ud3gzQVNHWXBFTGNaU3BZOE13aEUyZmYyUEo3UjlnSVFBQUFBJCQAAAAAAAAAAAEAAAAenUXox7OzqsDr6eTX7cKlwLwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlg-F~JYPhfRm; BDUSS_BFESS=dDNDViNWg5ZFZRM2FkMC1ud3gzQVNHWXBFTGNaU3BZOE13aEUyZmYyUEo3UjlnSVFBQUFBJCQAAAAAAAAAAAEAAAAenUXox7OzqsDr6eTX7cKlwLwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlg-F~JYPhfRm; yjs_js_security_passport=cc15c3ef5d050470f661c81b5e3902f288b4ba3e_1610522985_js; ab_sr=1.0.0_M2M1MzUwZGM2OTU3OGM2NjIxNWI5ZjExNWEyOTY3MTdlNWUwN2Q3ZWI3MGE2YzU0ODJkZTNhNGMwMDFiYzRjZGQ4YjA5N2Q0MGIzNjAwYTJlNDFlYzZlMWE2OTAwYmRm; H_PS_PSSID=33423_33442_33272_31660_33284_33287_33413_26350_33264_33394_33370; delPer=1; BD_CK_SAM=1; PSINO=3; rsv_jmp_slow=1610529749851; COOKIE_SESSION=24_0_7_0_3_16_1_1_6_5_0_4_105556_0_0_0_1610490740_0_1610529789%7C9%230_0_1610529789%7C1; sug=3; sugstore=0; ORIGIN=2; bdime=0; H_PS_645EC=07efUgSoro8NxNOsKDKOTryWAqVz9mN2U7C2f%2Ban8I%2FWzSNjLyV767R15MY; BA_HECTOR=808l0k0h018h2hakis1fvtfpt0r; BDSVRTM=252',
	'Accept-Language': 'zh-CN,zh;q=0.9',
}

# url 接口简化后只需要两个参数即可
params = {
	'wd': '',
	'pn': 0
}


def baidu_search_pages():
	wd = input('请输入您想要搜索的关键字：')
	pages = int(input('请输入您想下载的总页数：'))
	if pages < 0:
		pages = 0
	params['wd'] = wd
	# 设置需要下载的页数，例如下载50页的网页内容
	for page in range(1, pages + 1):
		params['pn'] = page

		# urlenocde() 将字典参数转换为 url 形式：key=value&key=value
		# 将 url api接口 和 参数拼接起来，形成完整的链接
		page_url = url + urlencode(params)
		# 封装 url 和 headers
		req = Request(page_url, headers=headers)

		# 向服务器发送请求，返回响应数据
		resp = urlopen(req)

		# 读取响应数据
		page_resp = resp.read()
		# 以规定的命名方式在文件夹中创建文件
		file_name = 'baidu_pages/%s-%s.html' % (wd, page)
		with open(file_name, "wb") as file:
			file.write(page_resp)  # 将读取的数据写入相应文件
			print(f'{file_name} 写入成功！')
			time.sleep(0.1)  # 延迟


if __name__ == '__main__':
	baidu_search_pages()
