from fastapi import APIRouter
import json
import requests
from bs4 import BeautifulSoup
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig, CacheMode
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy

ai_news_app = APIRouter()


@ai_news_app.get("/news")
async def get_news(limit: int = 5, schema=None):
    news_data, newsdetail = await fetch_news(limit,schema)
    return {"news": news_data, "newsdetail": newsdetail}


async def fetch_news(limit: int = 5, schema=None):
    """
       获取并处理新闻数据。

       该函数从指定的新闻源获取新闻链接，并根据限制数量提取新闻内容。最终返回新闻数据列表和拼接的新闻详情字符串。

       参数:
       - limit (int): 限制获取的新闻数量，默认为5。

       返回值:
       - tuple: 包含两个元素的元组：
           - news_data_list (list): 新闻数据列表，每个元素为包含新闻内容的字典。
           - newsdetail (str): 拼接的新闻详情字符串，包含每条新闻的内容。
       """
    # 获取所有新闻URL
    if schema is None:
        schema = {
            "name": "AIbase News Article",
            "baseSelector": "div.pb-10",  # 主容器的 CSS 选择器
            "fields": [
                {
                    "name": "title",
                    "selector": "h1",
                    "type": "text",
                },
                {
                    "name": "publication_date",
                    "selector": "div.flex.flex-col > div.flex.flex-wrap > span:nth-child(6)",
                    "type": "text",
                },
                {
                    "name": "content",
                    "selector": "div.post-content",
                    "type": "text",
                },
            ],
        }
    news_urls = get_news_urls()
    print(f"共找到 {len(news_urls)} 条新闻链接")
    # 限制新闻数量
    news_urls = news_urls[:limit]
    news_data_list = []
    newsdetail = ""
    # 循环处理每个新闻URL
    for index, url in enumerate(news_urls, start=1):
        news_data = await extract_ai_news_article(url, schema)
        if news_data:
            # 添加到新闻数据列表
            news_data_list.append(news_data)
            # 拼接新闻详情字符串
            content = news_data.get("content", "无法提取内容")
            newsdetail += f"今天新闻第{index}条内容：{content}；\n"
    return news_data_list, newsdetail


# 获取新闻列表页面的所有新闻URL
def get_news_urls():
    url = "https://www.aibase.com/zh/news"
    response = requests.get(url)
    news_urls = []
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        # 查找所有新闻链接
        news_items = soup.find_all('a', href=True)
        for item in news_items:
            link = item['href']
            # 过滤出符合新闻详情页的链接
            if '/news/' in link and len(link.split('/')) == 3:
                full_url = f"https://www.aibase.com/zh{link}"
                news_urls.append(full_url)
    else:
        print(f"请求失败，状态码: {response.status_code}")
    return news_urls


# 提取单个新闻文章的数据
async def extract_ai_news_article(url, schema):
    print(f"\n--- 提取新闻文章数据: {url} ---")
    # 创建提取策略
    extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
    # 创建提取配置
    browser_config = BrowserConfig(headless=True, verbose=True)
    run_config = CrawlerRunConfig(
        word_count_threshold=10,  # 每个内容块的最小字数
        extraction_strategy=extraction_strategy,
        cache_mode=CacheMode.BYPASS)

    # 使用 AsyncWebCrawler 进行爬取
    async with AsyncWebCrawler(verbose=True, config=browser_config) as crawler:
        result = await crawler.arun(
            url=url,
            config=run_config
        )
        if not result.success:
            print(f"页面爬取失败: {url}")
            return None

        if not result.extracted_content:
            print(f"无法提取新闻数据: {url}")
            return None
        extracted_data = json.loads(result.extracted_content)
        print(f"成功提取新闻: {extracted_data[0]['title']}")
        return extracted_data[0]
