# 爬取百度贴吧
# https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=0
# https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=50
# https://tieba.baidu.com/f?kw=python&ie=utf-8&pn=100
import requests


class TiebaSpider(object):
    def __init__(self, tieba_name):
        """初始化"""
        self.tieba_name = tieba_name
        self.url = "https://tieba.baidu.com/f?kw=" + tieba_name + "&ie=utf-8&pn={}"

    def get_url_list(self):
        """构造url列表"""
        # url_list = []
        # for i in range(10):
        #     url_list.append(self.url.format(i * 50))
        #  print(url_list)

        # 列表推导式
        return [self.url.format(i * 50) for i in range(10)]

    def parse_url(self, url):
        """请求页面并返回页面信息"""
        print(url)
        r = requests.get(url)
        return r.text

    def save_html(self, page_num, tb_html):
        """保存页面信息"""
        file_path = '{}-第{}页.html'.format(self.tieba_name, page_num)
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(tb_html)

    def run(self):
        """业务逻辑"""
        tieba_url_list = self.get_url_list()
        for tburl in tieba_url_list:
            tb_html = self.parse_url(tburl)
            self.save_html(tieba_url_list.index(tburl) + 1, tb_html)


if __name__ == '__main__':
    tb_spider = TiebaSpider("python")
    tb_spider.run()

