"""
https://tieba.baidu.com/f?kw=浙江工贸&pn=0
"""
import requests

def tieba_spider(url,file_name):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    response = requests.get(url,headers=headers)
    with open(file_name, 'w', encoding='utf-8') as f:
        f.write(response.content.decode())
    print(f'正在保存{file_name}')


kw = input(f'请输入需要爬取的贴吧名：')
begin = int(input(f'请输入起始页：'))
end = int(input(f'请输入结束页：'))
# 将贴吧名拼接在url后面
base_url = "https://tieba.baidu.com/f?kw="+kw
# 循环操作：拼接完成url，加上pn,pn需要计算，爬取每一页数据，保存在文件中
for i in range(begin,end+1):
    pn = (i - 1) * 50
    url = base_url + "&pn=" + str(pn)
    # print(url)
    # 文件名
    file_name = '第' + i + '页.html'
    # 函数：爬取url内容保存在file_name文件中
    tieba_spider(url, file_name)
