import requests

def load_page(url, file_name):
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    html = requests.get(url, headers=h)
    print(f'正在保存{file_name}')
    with open(file_name, 'w', encoding='utf-8') as f:
        f.write(html.content.decode())


def tieba_spider(base_url, begin, end):
    for page in range(begin, end + 1):
        pn = (page - 1) * 50
        # 字符串拼接，类型转换
        url = base_url + "$pn=" + str(pn)
        # 拼接每一个url对应的文件名
        file_name = '第' + str(page) + '页.html'
        load_page(url, file_name)


t_name = input("请输入吧名：")
begin = int(input("请输入起始页："))
end = int(input("请输入结束页："))
base_url = "https://tieba.baidu.com/f?kw=" + t_name
# 调用函数
tieba_spider(base_url, begin, end)
