import requests
# 爬取网址
# http://bbs.itheima.com/forum-425-1.html

def load_page(url):
    '''
    作用:根据url发送请求，获取服务器响应文件
    url：需要爬取的url地址
    '''
    headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident / 5.0;"}
    request = requests.get(url, headers=headers)
    return request.text


def save_file(html, filename):
    '''
    作用：将html内容写入本地文件
    html：服务器响应文件内容
    '''
    print("正在保存" + filename)
    with open(filename, 'w', encoding='utf-8') as file:
        file.write(html)


def heima_forum(begin_page, end_page):
    '''
    作用：黑马论坛爬虫调度器，负责组合处理每个页面的url
    url：黑马论坛的url
    begin_page:起始页码
    end_page:结束页
    '''
    for page in range(begin_page, end_page + 1):
        url = f'http://bbs.itheima.com/forum-425-{page}.html'
        file_name = "第" + str(page) + "页.html"
        html = load_page(url)
        save_file(html, file_name)


if __name__ == "__main__":
    begin_page = int(input("请输入起始页："))
    end_page = int(input("请输入结束页："))
    heima_forum(begin_page, end_page)
