"""
使用requests爬取百度浙江工贸贴吧第1至3页数据，保存在文件中
https://tieba.baidu.com/f?kw=浙江工贸&pn=0
https://tieba.baidu.com/f?kw=浙江工贸&pn=50
https://tieba.baidu.com/f?kw=浙江工贸&pn=100
"""
import requests


def load_write_page(url_new, file_name):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/86.0.4240.183 Safari/537.36"}
    response = requests.get(url_new, headers=headers)
    print(f'正在保存{file_name}')
    with open(file_name, 'w', encoding='utf-8') as f:
        f.write(response.content.decode())


def tieba_spider(url, begin, end):
    # 循环爬取每一页数据
    for page in range(begin, end + 1):
        pn = (page - 1) * 50
        url_new = url + "&pn=" + str(pn)
        # print(url_new)
        file_name = '第' + str(page) + '页.html'
        load_write_page(url_new, file_name)


if __name__ == '__main__':
    kw = input('请输入需要爬取的贴吧名：')
    begin_page = int(input('请输入起始页：'))
    end_page = int(input('请输入结束页：'))
    url = 'https://tieba.baidu.com/f?kw=' + kw
    # print(url)
    tieba_spider(url, begin_page, end_page)
