"""
AI新闻数据源
获取最新的AI行业新闻和动态
"""
import requests
from typing import List, Dict, Any
import json
import os
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
import re

CACHE_DIR = "./cache"
CACHE_FILE = os.path.join(CACHE_DIR, "ai_news_cache.json")
CACHE_EXPIRATION_HOURS = 1

def extract_article_images(article_url: str) -> List[Dict[str, str]]:
    """
    从文章页面提取相关图片
    """
    images = []
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(article_url, headers=headers, timeout=15)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # 查找文章中的图片
        img_tags = soup.find_all('img')
        
        for img in img_tags:
            src = img.get('src', '')
            alt = img.get('alt', '')
            
            # 过滤掉小图标和广告图片
            if (src and 
                not any(keyword in src.lower() for keyword in ['icon', 'logo', 'avatar', 'ad', 'banner']) and
                any(keyword in src.lower() for keyword in ['article', 'content', 'image', 'photo', 'pic', 'jpg', 'png', 'jpeg'])):
                
                if src.startswith('//'):
                    src = 'https:' + src
                elif src.startswith('/'):
                    # 获取域名
                    domain = re.search(r'https?://([^/]+)', article_url)
                    if domain:
                        src = f"https://{domain.group(1)}{src}"
                elif not src.startswith('http'):
                    continue
                
                images.append({
                    'url': src,
                    'alt': alt or '文章配图',
                    'type': 'article_image'
                })
        
        # 查找Open Graph图片
        og_image = soup.find('meta', property='og:image')
        if og_image and og_image.get('content'):
            og_url = og_image.get('content')
            if og_url.startswith('//'):
                og_url = 'https:' + og_url
            elif og_url.startswith('/'):
                domain = re.search(r'https?://([^/]+)', article_url)
                if domain:
                    og_url = f"https://{domain.group(1)}{og_url}"
            
            if og_url not in [img['url'] for img in images]:
                images.append({
                    'url': og_url,
                    'alt': 'Open Graph图片',
                    'type': 'og_image'
                })
        
        # 查找Twitter Card图片
        twitter_image = soup.find('meta', attrs={'name': 'twitter:image'})
        if twitter_image and twitter_image.get('content'):
            twitter_url = twitter_image.get('content')
            if twitter_url.startswith('//'):
                twitter_url = 'https:' + twitter_url
            elif twitter_url.startswith('/'):
                domain = re.search(r'https?://([^/]+)', article_url)
                if domain:
                    twitter_url = f"https://{domain.group(1)}{twitter_url}"
            
            if twitter_url not in [img['url'] for img in images]:
                images.append({
                    'url': twitter_url,
                    'alt': 'Twitter Card图片',
                    'type': 'twitter_image'
                })
                
    except Exception as e:
        print(f"获取文章图片时出错 {article_url}: {e}")
    
    return images[:2]  # 最多返回2张图片

def fetch_ai_research_news(limit: int = 10) -> List[Dict[str, Any]]:
    """
    获取AI研究新闻
    """
    os.makedirs(CACHE_DIR, exist_ok=True)
    
    # 检查缓存
    if os.path.exists(CACHE_FILE):
        with open(CACHE_FILE, "r", encoding="utf-8") as f:
            cache_data = json.load(f)
        cached_time = datetime.fromisoformat(cache_data["timestamp"])
        if (datetime.now() - cached_time < timedelta(hours=CACHE_EXPIRATION_HOURS) and 
            cache_data.get("limit") == limit):
            print(f"[DEBUG] Returning cached AI news results.")
            return cache_data["articles"]
    
    articles = []
    
    # 从多个AI新闻源获取数据
    sources = [
        {
            'name': 'AI Research News',
            'rss_url': 'https://www.artificialintelligence-news.com/feed/',
            'base_url': 'https://www.artificialintelligence-news.com'
        },
        {
            'name': 'VentureBeat AI',
            'rss_url': 'https://venturebeat.com/ai/feed/',
            'base_url': 'https://venturebeat.com'
        },
        {
            'name': 'MIT Technology Review AI',
            'rss_url': 'https://www.technologyreview.com/topic/artificial-intelligence/feed/',
            'base_url': 'https://www.technologyreview.com'
        }
    ]
    
    for source in sources:
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            }
            
            response = requests.get(source['rss_url'], headers=headers, timeout=30)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.content, 'xml')
            items = soup.find_all('item')[:limit//len(sources) + 1]
            
            for item in items:
                title = item.find('title')
                link = item.find('link')
                pub_date = item.find('pubDate')
                description = item.find('description')
                
                if title and link:
                    article_url = link.text.strip()
                    
                    # 获取文章图片
                    images = extract_article_images(article_url)
                    
                    article_info = {
                        'title': title.text.strip(),
                        'url': article_url,
                        'published': pub_date.text.strip() if pub_date else '',
                        'description': description.text.strip() if description else '',
                        'source': source['name'],
                        'authors': source['name'],
                        'images': images
                    }
                    articles.append(article_info)
                    
        except Exception as e:
            print(f"获取 {source['name']} 新闻时出错: {e}")
            continue
    
    # 按发布时间排序
    articles.sort(key=lambda x: x.get('published', ''), reverse=True)
    articles = articles[:limit]
    
    # 保存到缓存
    cache_data = {
        "timestamp": datetime.now().isoformat(),
        "limit": limit,
        "articles": articles
    }
    with open(CACHE_FILE, "w", encoding="utf-8") as f:
        json.dump(cache_data, f, ensure_ascii=False, indent=2)
    
    return articles

def fetch_openai_blog(limit: int = 5) -> List[Dict[str, Any]]:
    """
    获取OpenAI官方博客文章
    """
    try:
        # OpenAI博客RSS
        rss_url = "https://openai.com/blog/rss.xml"
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(rss_url, headers=headers, timeout=30)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'xml')
        items = soup.find_all('item')[:limit]
        
        articles = []
        for item in items:
            title = item.find('title')
            link = item.find('link')
            pub_date = item.find('pubDate')
            description = item.find('description')
            
            if title and link:
                article_url = link.text.strip()
                
                # 获取文章图片
                images = extract_article_images(article_url)
                
                article_info = {
                    'title': title.text.strip(),
                    'url': article_url,
                    'published': pub_date.text.strip() if pub_date else '',
                    'description': description.text.strip() if description else '',
                    'source': 'OpenAI Official Blog',
                    'authors': 'OpenAI Team',
                    'images': images
                }
                articles.append(article_info)
        
        return articles
        
    except Exception as e:
        print(f"获取OpenAI博客时出错: {e}")
        return []

def fetch_anthropic_blog(limit: int = 5) -> List[Dict[str, Any]]:
    """
    获取Anthropic官方博客文章
    """
    try:
        # Anthropic博客页面
        blog_url = "https://www.anthropic.com/news"
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(blog_url, headers=headers, timeout=30)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # 查找文章链接（这可能需要根据实际页面结构调整）
        article_links = soup.find_all('a', href=True)
        articles = []
        
        for link in article_links[:limit]:
            href = link.get('href')
            if href and '/news/' in href:
                if not href.startswith('http'):
                    href = f"https://www.anthropic.com{href}"
                
                title = link.get_text(strip=True)
                if title and len(title) > 10:  # 过滤掉太短的标题
                    # 获取文章图片
                    images = extract_article_images(href)
                    
                    article_info = {
                        'title': title,
                        'url': href,
                        'published': '',
                        'description': '',
                        'source': 'Anthropic Official',
                        'authors': 'Anthropic Team',
                        'images': images
                    }
                    articles.append(article_info)
        
        return articles[:limit]
        
    except Exception as e:
        print(f"获取Anthropic博客时出错: {e}")
        return []