import requests


class TieBa_spider(object):
    def __init__(self, text):
        self.text = text
        self.url = 'https://tieba.baidu.com/f?kw=' + text + '&pn={}'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"}

    def get_url_list(self):
        """生成url列表"""
        url_list = [self.url.format(i * 50) for i in range(5)]
        return url_list

    def get_data_fromurl(self, url):
        """从服务器获取数据 并且解码返回"""
        respones = requests.get(url, headers=self.headers)
        return respones.content.decode()

    def save_html(self, html_str, num):
        """保存到本地"""
        file_name = '贴吧_' + text + "第{}页".format(num) + '.html'
        with open(file_name, 'w', encoding='utf-8') as f:
            f.write(html_str)

    def run(self):
        """确定url 生成url列表"""
        url_list = self.get_url_list()
        for item_url in url_list:
            html_str = self.get_data_fromurl(item_url)

            # 保存
            self.save_html(html_str, url_list.index(item_url) + 1)


if __name__ == '__main__':
    text = input('请输入贴吧内容:\n')
    spider = TieBa_spider(text)
    spider.run()
