import requests
from bs4 import BeautifulSoup
import json
from datetime import datetime

def fetch_news():
    base_url = "https://starwalk.space/zh-Hans/news"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}

    response = requests.get(base_url, headers=headers)

    if response.status_code != 200:
        print(f"请求失败, 状态码: {response.status_code}, URL: {base_url}")
        return []

    soup = BeautifulSoup(response.content, 'html.parser')

    news_items = []

    for item in soup.select('div.o2bzl80'):
        # 获取标题
        title_tag = item.select_one('h2')
        if not title_tag:
            continue

        title = title_tag.get_text(strip=True)

        # 获取正文
        summary_tag = item.select_one('p')
        summary = summary_tag.get_text(strip=True) if summary_tag else None

        # 获取时间
        time_tag = item.select_one('time')
        datetime_str = time_tag['datetime'] if time_tag and 'datetime' in time_tag.attrs else None
        datetime_obj = datetime.fromisoformat(datetime_str) if datetime_str else None

        # 获取缩略图
        thumbnail_tag = item.select_one('img')
        thumbnail = thumbnail_tag['src'] if thumbnail_tag else None

        # 获取超链接
        link_tag = item.select_one('a')
        link = link_tag['href'] if link_tag and 'href' in link_tag.attrs else None

        news_item = {
            'title': title,
            'summary': summary,
            'datetime': datetime_obj,
            'thumbnail': thumbnail,
            'link': link  # 添加超链接
        }
        news_items.append(news_item)

    return news_items


def read_existing_news(file_path):
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            existing_news = json.load(file)
            # 将 datetime 字符串转换为 datetime 对象
            for item in existing_news:
                item['datetime'] = datetime.fromisoformat(item['datetime']).date()

    except FileNotFoundError:
        print(f"文件未找到: {file_path}")
        return []
    except json.JSONDecodeError:
        print(f"文件格式错误: {file_path}")
        return []

    return existing_news


def update_news_items(new_news, existing_news):
    # 获取现有新闻的链接集合
    existing_links = {item['link'] for item in existing_news}
    updated = False

    # 过滤出新的新闻项
    new_news_items = []
    for item in new_news:
        if item['link'] not in existing_links:
            new_news_items.append(item)
            print(f"新增新闻: {item['title']}")
            updated = True

    # 合并新旧新闻项
    updated_news_items = new_news_items + existing_news

    # 检查现有新闻是否有更新
    for item in updated_news_items:
        for new_item in new_news:
            if item['link'] == new_item['link']:
                if item['title'] != new_item['title'] or item['summary'] != new_item['summary'] or item['datetime'] != new_item['datetime'] or item['thumbnail'] != new_item['thumbnail']:
                    print(f"更新新闻: {item['title']}")
                    item.update(new_item)
                    updated = True
                break

    # 将所有 datetime 对象转换为 date 对象
    for item in updated_news_items:
        if isinstance(item['datetime'], datetime):
            item['datetime'] = item['datetime'].date()

    # 按日期排序
    updated_news_items.sort(key=lambda x: x['datetime'], reverse=True)

    if not updated:
        print("无更新")

    return updated_news_items



def save_news_items(file_path, news_items):
    # 将 datetime 对象转换为字符串
    for item in news_items:
        item['datetime'] = item['datetime'].isoformat()

    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(news_items, f, ensure_ascii=False, indent=4)
    print(f"新闻内容已保存到 {file_path} 文件中")


if __name__ == '__main__':
    # 爬取最新的20条新闻
    new_news_items = fetch_news()

    if not new_news_items:
        print("没有找到新闻内容")
    else:
        # 指定本地文件路径
        file_path = 'news_items.json'

        # 读取现有的新闻内容
        existing_news_items = read_existing_news(file_path)

        # 更新新闻内容
        updated_news_items = update_news_items(new_news_items, existing_news_items)

        # 保存更新后的新闻内容
        save_news_items(file_path, updated_news_items)
