# 抓取指定贴吧的指定页的数据，保存到本地文件
from urllib import request, parse
import time
import random


class BaiduTieBSpider:
    def __init__(self):
        #         定义常用变量
        self.url = 'https://tieba.baidu.com/f?kw={}&pn={}'
        self.headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'}

    def get_html(self, url):
        req = request.Request(url=url, headers=self.headers)
        res = request.urlopen(req)
        html = res.read().decode()
        return html

    def parse_html(self):
        pass

    def save_html(self, filename, html):
        with open(filename, "w", encoding='utf-8') as f:
            f.write(html)

    def run(self):
        name = input('请输入贴吧名称')
        start = int(input('请输入起始页'))
        end = int(input('请输入终止页'))
        # 参数编码
        parms = parse.quote(name)
        for page in range(start, end+1):
            pn = (page - 1) * 50
            url = self.url.format(parms, pn)
            html = self.get_html(url)
            filename = '{}_第{}页.html'.format(name, page)
            self.save_html(filename, html)
            print('第%d页抓取成功' % page)
            time.sleep(random.randint(1,2))


if __name__ == '__main__':
    spider = BaiduTieBSpider()
    spider.run()
