# -*- coding:utf-8 -*-
# @Author:🎈RedBalloon
# @Time:2022/9/28-22:31
# @File:05-贴吧的案例.py
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from fake_useragent import UserAgent
import os


def get_html(url):
    headers = {
        "user-agent": UserAgent().random,
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    }
    request = Request(url, headers=headers)
    response = urlopen(request)
    html_data = response.read().decode()
    print(html_data)
    return html_data


def save_page(title, page, html):
    """
    :param title: 是文件夹的名字也是文件的名字
    :param page:  页数
    :param html: html数据
    :return:
    """
    # 创建文件夹来保存
    folder = '{}\\'.format(title)
    if not os.path.exists(folder):
        os.mkdir(folder)

    for i in range(page):
        with open(folder + title + str(i) + '.html', mode='w', encoding='utf-8')as f:
            f.write(html)


def main():
    # https://tieba.baidu.com/f?kw=%E8%8B%B1%E9%9B%84%E8%81%94%E7%9B%9F&ie=utf-8&pn=0
    content = input("请输入想要获取的贴吧:")
    pagination = int(input("输入想获取的页数:"))
    # 这里的基础url必须在外边，因为这样写的链接只是通过不断赋值base_url的方式来改变，并没有修改base_url
    base_url = "https://tieba.baidu.com/f?&ie=utf-8{}"

    # base_url如果放在for循环里面的话又是另一种执行方式，具体自己动手尝试吧
    for pn in range(pagination):
        args = {
            "pn": pn * 50,
            "kw": content
        }
        args = urlencode(args)
        # print(base_url.format(args))
        page_data = get_html(base_url.format(args))
        save_page(content, pagination, page_data)


if __name__ == '__main__':
    # 运行次数多了会有限制，会弹出百度的安全验证，此时看你是用ip代理，还是操作验证码咯(我不会了~)
    # 也可以添加time库中的sleep(3)来在请求前睡眠上几秒钟
    main()
