import json
from lxml import etree
import requests


def load_page(url):
    header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183'}
    request = requests.get(url, headers=header)
    return request.text


def parse_html(html):
    text = etree.HTML(html)     # 用HTML()方法解析源代码
    node_list = text.xpath('//th[contains(@class,"new") and contains(@class,"forumtit")]')      # 提取所有文章对应节点，并保存到列表node_list中

    items = []      # 用于保存元素信息
    num = 0         # 用于判断某天前条数

    for node in node_list:      # 遍历每篇文章的节点node
        try:
            # 文章标题
            title = node.xpath('./a[1]/text()')[0]
            # 文章链接
            url = node.xpath('./a[1]/@href')[0]
            # 文章作者
            author = node.xpath('./div[@class="foruminfo"]//a/span/text()')[0]
            # 发布时间（具体时间）
            release_time = node.xpath('./div[2]/i/span[1]/text()')[0].strip().replace('@', '')
            # 发布时间（某天前）
            one_page = node.xpath('//div[2]/i/span[1]/span/@title')     # 提前元素<span>中title属性
            if one_page:        # 若不为空，则不是具体日期
                if num < len(one_page):
                    release_time = node.xpath('//div[2]/i/span[1]/span/@title')[num]

            # 构建json格式的字符串
            item = {
                "文章标题": title,
                "文章链接": url,
                "文章作者": author,
                "发布时间": release_time
            }
            items.append(item)
            num += 1

        except Exception as e:
            pass

    return items


def save_file(items):
    try:
        with open('heima.json', mode='w+', encoding='utf-8') as f:
            f.write(json.dumps(items, ensure_ascii=False, indent=2))
    except Exception as e:
        print(e)


def heima_forum(begin_page, end_page):
    li_data = []

    for page in range(begin_page, end_page+1):
        url = f'http://bbs.itheima.com/forum-425-{page}.html'
        filename = "正在请求第" + str(page) + "页"
        print(filename)
        html = load_page(url)
        data = parse_html(html)
        li_data += data

    save_file(li_data)


if __name__ == "__main__":
    begin_page = int(input("请输入起始页码："))
    end_page = int(input("请输入结束页码："))
    heima_forum(begin_page, end_page)