import json
import time

from lxml import etree
import requests


# 爬取网址
# http://bbs.itheima.com/forum-425-1.html

def load_page(url):
    '''
    作用:根据url发送请求，获取服务器响应文件
    url：需要爬取的url地址
    '''
    headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident / 5.0;"}
    request = requests.get(url, headers=headers)
    return request.text


def parse_html(html):
    text = etree.HTML(html)
    node_list = text.xpath('//th[contains(@class,"new") '
                           'and contains(@class,"forumtit")]')
    items = []  # 定义空列表，以保存元素的信息
    num = 0  # 定义计数器，用于判断某天前条数
    for node in node_list:
        try:
            # 文章标题
            title = node.xpath('./a[1]/text()')[0]
            # 文章链接
            url = node.xpath('./a[1]/@href')[0]
            # 文章作者
            author = node.xpath(
                './div[@class="foruminfo"]//a/span/text()')[0]
            # 发布时间（具体日期）
            release_time = node.xpath('./div[2]/i/span[1]/text()'
                                      )[0].strip().replace('@', '')
            # 发布时间（某天前）
            one_page = node.xpath('//div[2]/i/span[1]/span/@title')
            if one_page:
                if num < len(one_page):
                    release_time = node.xpath(
                        '//div[2]/i/span[1]/span/@title')[num]
            # 构建JSON格式的字符串
            item = {
                "文章标题": title,
                "文章链接": url,
                "文章作者": author,
                '发布时间': release_time,
            }
            items.append(item)
            num += 1
        except Exception as e:
            pass
    return items


def save_file(items):
    try:
        with open('heima.json', mode='w+', encoding='utf-8') as f:
            f.write(json.dumps(items, ensure_ascii=False, indent=2))
    except Exception as e:
        print(e)


def heima_forum(begin_page, end_page):
    li_data = []
    for page in range(begin_page, end_page + 1):
        url = f'http://bbs.itheima.com/forum-424-{page}.html'
        file_name = "正在请求第" + str(page) + "页"
        print(file_name)
        html = load_page(url)
        data = parse_html(html)
        li_data += data
    save_file(li_data)


if __name__ == "__main__":
    begin_page = int(input("请输入起始页码："))
    end_page = int(input("请输入结束页码："))
    s_time = time.time()
    heima_forum(begin_page, end_page)
    e_time = time.time()
    print(f'总用时: {e_time - s_time}秒')
