# 爬取百度贴吧
import requests


#
# kw = input("输入爬取的内容:")
# fn = input("输入爬取的页数:")
#
# url = 'https://tieba.baidu.com/f'
#
# header = {
#     'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
# }
#
# for i in range(int(fn)):
#     # 正在爬取的页数
#     page = i * 5
#     print("正在爬取的页数:", i + 1)
#
#     param = {
#         'kw': kw,
#         'ie': 'utf-8',
#         'fn': page
#     }
#
#     resp = requests.get(url, param, headers=header)
#
#     with open(f'../../file/{kw}{i + 1}.html', 'wb') as f:
#         f.write(resp.content)
#         print(f"{kw}{i + 1}爬取完成")


# 通过class处理 爬虫

class Reptile(object):
    def __init__(self):
        self.url = 'https://tieba.baidu.com/f'
        self.header = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
        }

    def send(self, param):
        resp = requests.get(self.url, param, headers=self.header)
        return resp.text

    @staticmethod
    def save(kw, page, con):
        with open(f'../../file/{kw}{page}.html', 'w', encoding='utf-8') as f:
            f.write(con)
            print(f"{kw}{page}爬取完成")

    def run(self):
        kw = input("要爬取的内容:")
        fn = input("输入爬取的页数:")

        for i in range(int(fn)):
            # 正在爬取的页数
            page = i * 50
            print("正在爬取的页数:", i + 1)

            param = {
                'kw': kw,
                'pn': page
            }

            con = self.send(param)
            self.save(kw, i + 1, con)


re = Reptile()
re.run()
