import re
import requests
from lxml import etree
from csv_save import write_to_csv, log

# 设置请求头，模拟浏览器访问
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0"}

# 初始化页码和采集数量
page = 1
item_count = 0

# 遍历新闻列表页（从882到1，逆序处理）
for i in range(1, 80)[::-1]:
    # 构建列表页URL
    url = f'https://news.aust.edu.cn/xxyw/{i}.htm'
    # 发送请求获取列表页内容
    res = requests.get(url=url, headers=headers)
    res.encoding = 'utf-8'  # 设置编码为UTF-8

    # 解析HTML内容
    html = etree.HTML(res.text)

    # 存储当前页所有新闻项
    item_list = []

    # 遍历每条新闻的链接
    for a in html.xpath("//li[@class='clearfix']/a"):
        # 处理相对路径为绝对路径
        link = a.xpath("./@href")[0].replace("../", "https://news.aust.edu.cn/")

        # 获取新闻详情页内容
        res_news = requests.get(link, headers=headers)
        res_news.encoding = 'utf-8'
        html = etree.HTML(res_news.text)

        # 提取新闻标题
        title = html.xpath("//h2/text()")[0]
        # 提取新闻正文并清理格式
        text = html.xpath("string(//div[@class='v_news_content'])").replace("\n", " ").replace("\r", " ").strip()
        # 提取包含日期和来源的文本块
        time_str = html.xpath("string(//div[@class='bar'])")
        # 使用正则表达式提取日期
        time_index = re.findall(r'日期：(.*?) ', time_str)
        # 使用正则表达式提取来源
        author = re.findall(r'来源：(.*?)\r', time_str)

        # 如果没有提取到日期则跳过当前新闻
        if not time_index:
            continue

        # 构建新闻项字典
        item = {
            "title": title,
            'time': time_index[0],
            'text': text,
            'author': author[0] if author else '',
            "url": link
        }
        item_list.append(item)
        log.info(f"完成正文解析《{item['title']}》内容 ")
        item_count += 1

    # 保存当前页所有新闻到Excel文件
    file_path = "./file/news_list.xls"
    header = ["title", "time", "text", "author", "url"]
    write_to_csv(file_path, item_list, header)
    log.info(f"第 {page} 页采集完成,共采集信息 {item_count} 条")
    page += 1

log.info('全部采集完成')