# _*_ coding : utf-8 _*_
# @Time : 2022/1/13 0013 16:40
# @Author : 小猿圈爬虫
# @Project : 0.达内_百度贴吧爬虫案例

import requests
import os
import random
import time

class BaiduTiebaSpider:
    def __init__(self):
        """定义常用变量"""
        # UA伪装：将对应的User-Agent封装到一个字典中
        #self.url = 'http://tieba.baidu.com/f?kw={}&pn{}'
        self.url = 'http://tieba.baidu.com/f'
        # UA伪装：将对应的User-Agent封装到一个字典中
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
        }

    def get_html(self,url,params):
        """获取相应内容的函数"""
        req = requests.get(url=url, params=params, headers=self.headers)
        return req

    def parse_html(self):
        """解析提取数据的函数"""
        pass

    def save_html(self,fileName,html):
        """数据持久化函数"""
        with open(fileName,'w',encoding='utf-8') as fq:
            fq.write(html)

    def run(self):
        """程序入口函数"""
        name = input('请输入贴吧名：')
        start = int(input('请输入起始页：'))
        end = int(input('请输入终止页：'))

        #1、拼接URL地址
        for page in range(start,end+1):
            pn = (page-1)*5
            params = {
                'kw': name,
                'pn': pn
            }
            #url = self.url.format(params,pn)
            #2、发起请求，解析，保存
            html = self.get_html(self.url, params)
            fileName = '{}_第{}页.html'.format(name,page)
            self.save_html(fileName,html.text)
            # 终端打印提示
            print('第%d页抓取成功'% page)


if __name__=="__main__":
    spider = BaiduTiebaSpider()
    spider.run()