"""
NewsAPI聚合服务 - 聚合全球新闻热点
支持多国家、多分类的新闻数据获取
"""
import asyncio
import aiohttp
import logging
from typing import List, Dict, Optional, Any
from datetime import datetime, timedelta
import os

logger = logging.getLogger(__name__)

class NewsAPIAggregator:
    """NewsAPI新闻聚合器
    
    集成NewsAPI获取全球新闻热点
    支持多语言、多国家、多分类的新闻获取
    """
    
    def __init__(self, api_key: str = None):
        """初始化NewsAPI聚合器
        
        Args:
            api_key: NewsAPI密钥，如果不提供则从环境变量获取
        """
        self.api_key = api_key or os.getenv('NEWSAPI_KEY')
        if not self.api_key:
            logger.warning("NewsAPI key not found. Using free tier with limitations.")
            # 使用示例密钥（有限制）
            self.api_key = "demo_key_with_limitations"
            
        self.base_url = "https://newsapi.org/v2"
        self.session = None
        self.name = "NewsAPI"
        self.enabled = True
        
        # 配置热门新闻源
        self.top_sources = {
            'international': [
                'bbc-news', 'cnn', 'reuters', 'the-guardian-uk',
                'the-new-york-times', 'the-washington-post', 'bloomberg'
            ],
            'technology': [
                'techcrunch', 'the-verge', 'wired', 'ars-technica',
                'hacker-news', 'engadget', 'techradar'
            ],
            'business': [
                'bloomberg', 'business-insider', 'financial-times',
                'the-wall-street-journal', 'fortune', 'cnbc'
            ],
            'science': [
                'new-scientist', 'national-geographic', 'nature'
            ],
            'sports': [
                'espn', 'bbc-sport', 'the-sport-bible', 'fox-sports'
            ],
            'entertainment': [
                'entertainment-weekly', 'ign', 'polygon', 'buzzfeed', 'mtv-news'
            ],
            'health': [
                'medical-news-today', 'the-lancet'
            ]
        }
        
        # 支持的国家代码
        self.country_codes = [
            'us', 'gb', 'cn', 'jp', 'de', 'fr', 'in', 'br', 'ca', 'au',
            'kr', 'it', 'es', 'ru', 'mx', 'nl', 'se', 'no', 'dk', 'fi'
        ]
        
        # 新闻分类
        self.categories = [
            'general', 'business', 'entertainment', 'health',
            'science', 'sports', 'technology'
        ]
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        self.session = aiohttp.ClientSession()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self.session:
            await self.session.close()
    
    async def _make_request(self, endpoint: str, params: Dict[str, Any], 
                          max_retries: int = 3, timeout: int = 60) -> Dict[str, Any]:
        """发送API请求，支持重试和超时
        
        Args:
            endpoint: API端点
            params: 请求参数
            max_retries: 最大重试次数
            timeout: 超时时间（秒）
            
        Returns:
            API响应数据
        """
        if not self.api_key:
            logger.error("NewsAPI key is required")
            return {'status': 'error', 'articles': []}
        
        # 添加API密钥
        params['apiKey'] = self.api_key
        
        url = f"{self.base_url}/{endpoint}"
        
        # 重试机制
        for attempt in range(max_retries):
            try:
                if not self.session:
                    self.session = aiohttp.ClientSession()
                    
                async with self.session.get(url, params=params, timeout=timeout) as response:
                    data = await response.json()
                    
                    if response.status == 200 and data.get('status') == 'ok':
                        return data
                    elif response.status == 429:  # Rate limit
                        wait_time = 2 ** attempt  # 指数退避
                        logger.warning(f"NewsAPI rate limit hit, waiting {wait_time} seconds...")
                        await asyncio.sleep(wait_time)
                        continue
                    else:
                        logger.error(f"NewsAPI error: {data.get('message', 'Unknown error')}")
                        if attempt < max_retries - 1:
                            await asyncio.sleep(2 ** attempt)
                            continue
                        return {'status': 'error', 'articles': []}
                        
            except asyncio.TimeoutError:
                logger.warning(f"Request timeout (attempt {attempt + 1}/{max_retries})")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 ** attempt)
                    continue
                logger.error("All retry attempts failed due to timeout")
                return {'status': 'error', 'articles': []}
                
            except aiohttp.ClientError as e:
                logger.warning(f"Request failed (attempt {attempt + 1}/{max_retries}): {e}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 ** attempt)
                    continue
                logger.error(f"All retry attempts failed: {e}")
                return {'status': 'error', 'articles': []}
                
            except Exception as e:
                logger.warning(f"Unexpected error (attempt {attempt + 1}/{max_retries}): {e}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 ** attempt)
                    continue
                logger.error(f"All retry attempts failed: {e}")
                return {'status': 'error', 'articles': []}
        
        return {'status': 'error', 'articles': []}
    
    async def get_top_headlines(
        self,
        country: str = None,
        category: str = None,
        sources: str = None,
        q: str = None,
        page_size: int = 20
    ) -> List[Dict[str, Any]]:
        """获取头条新闻
        
        Args:
            country: 国家代码 (us, gb, cn等)
            category: 分类 (business, technology等)
            sources: 新闻源ID，逗号分隔
            q: 搜索关键词
            page_size: 返回数量
            
        Returns:
            新闻列表
        """
        params = {
            'pageSize': min(page_size, 100)  # API限制最多100条
        }
        
        if country and country in self.country_codes:
            params['country'] = country
        if category and category in self.categories:
            params['category'] = category
        if sources:
            params['sources'] = sources
        if q:
            params['q'] = q
        
        data = await self._make_request('top-headlines', params)
        
        articles = []
        for article in data.get('articles', []):
            formatted = self._format_article(article)
            if formatted.get('title'):  # 确保有标题
                articles.append(formatted)
        
        return articles
    
    async def crawl(self) -> List[Dict[str, Any]]:
        """爬取全球热门新闻（兼容旧接口）"""
        return await self.get_trending_topics(50)
    
    async def search_everything(
        self,
        q: str,
        sources: str = None,
        domains: str = None,
        from_date: str = None,
        to_date: str = None,
        language: str = 'en',
        sort_by: str = 'popularity',
        page_size: int = 20
    ) -> List[Dict[str, Any]]:
        """搜索所有新闻
        
        Args:
            q: 搜索关键词
            sources: 新闻源
            domains: 域名限制
            from_date: 开始日期
            to_date: 结束日期
            language: 语言代码
            sort_by: 排序方式 (relevancy, popularity, publishedAt)
            page_size: 返回数量
            
        Returns:
            新闻列表
        """
        params = {
            'q': q,
            'language': language,
            'sortBy': sort_by,
            'pageSize': min(page_size, 100)
        }
        
        if sources:
            params['sources'] = sources
        if domains:
            params['domains'] = domains
        if from_date:
            params['from'] = from_date
        if to_date:
            params['to'] = to_date
        
        data = await self._make_request('everything', params)
        
        articles = []
        for article in data.get('articles', []):
            formatted = self._format_article(article)
            if formatted.get('title'):
                articles.append(formatted)
        
        return articles
    
    async def get_trending_topics(self, limit: int = 50) -> List[Dict[str, Any]]:
        """获取全球热门话题
        
        通过聚合多个国家和分类的头条新闻来识别热门话题
        
        Args:
            limit: 返回数量限制
            
        Returns:
            热门话题列表
        """
        all_articles = []
        
        # 并发获取不同地区和分类的新闻
        tasks = []
        
        # 获取主要国家的头条
        for country in ['us', 'gb', 'de', 'fr', 'jp']:
            tasks.append(self.get_top_headlines(country=country, page_size=10))
        
        # 获取不同分类的头条
        for category in ['technology', 'business', 'science', 'health']:
            tasks.append(self.get_top_headlines(category=category, page_size=10))
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        for result in results:
            if isinstance(result, list):
                all_articles.extend(result)
        
        # 去重和排序
        seen_titles = set()
        unique_articles = []
        
        for article in all_articles:
            title_key = article['title'][:50] if article.get('title') else ''
            if title_key and title_key not in seen_titles:
                seen_titles.add(title_key)
                unique_articles.append(article)
        
        # 按发布时间排序
        unique_articles.sort(key=lambda x: x.get('publishedAt', ''), reverse=True)
        
        return unique_articles[:limit]
    
    async def get_hot_by_category(self, category: str = 'general') -> List[Dict[str, Any]]:
        """获取分类热点新闻
        
        Args:
            category: 新闻分类
            
        Returns:
            该分类的热点新闻
        """
        # 获取该分类的热门新闻源
        sources = self.top_sources.get(category, [])
        sources_str = ','.join(sources) if sources else None
        
        # 获取头条新闻
        headlines = await self.get_top_headlines(
            category=category if category in self.categories else None,
            sources=sources_str,
            page_size=30
        )
        
        return headlines
    
    async def aggregate_all_sources(self) -> Dict[str, List[Dict[str, Any]]]:
        """聚合所有数据源
        
        Returns:
            按分类组织的新闻数据
        """
        aggregated_data = {}
        
        # 并发获取所有分类的新闻
        tasks = []
        categories = ['general', 'technology', 'business', 'science', 'sports', 'entertainment']
        
        for category in categories:
            tasks.append(self.get_hot_by_category(category))
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        for category, result in zip(categories, results):
            if isinstance(result, list):
                aggregated_data[category] = result
            else:
                logger.error(f"Failed to get {category} news: {result}")
                aggregated_data[category] = []
        
        # 添加全球趋势
        trending = await self.get_trending_topics(30)
        aggregated_data['trending'] = trending
        
        return aggregated_data
    
    def _format_article(self, article: Dict[str, Any]) -> Dict[str, Any]:
        """格式化文章数据
        
        Args:
            article: 原始文章数据
            
        Returns:
            格式化后的文章数据
        """
        return {
            'title': article.get('title', ''),
            'description': article.get('description', ''),
            'url': article.get('url', ''),
            'source': article.get('source', {}).get('name', '') if isinstance(article.get('source'), dict) else str(article.get('source', '')),
            'author': article.get('author', ''),
            'publishedAt': article.get('publishedAt', ''),
            'urlToImage': article.get('urlToImage', ''),
            'content': article.get('content', ''),
            'platform': 'newsapi',
            'category': 'news',
            'fetch_time': datetime.utcnow().isoformat() + 'Z'
        }
    
    async def search_trend(self, keyword: str, days: int = 7) -> Dict[str, Any]:
        """搜索关键词趋势
        
        Args:
            keyword: 搜索关键词
            days: 时间范围（天数）
            
        Returns:
            趋势数据
        """
        # 计算时间范围
        to_date = datetime.utcnow()
        from_date = to_date - timedelta(days=days)
        
        # 搜索新闻
        articles = await self.search_everything(
            q=keyword,
            from_date=from_date.strftime('%Y-%m-%d'),
            to_date=to_date.strftime('%Y-%m-%d'),
            sort_by='publishedAt',
            page_size=100
        )
        
        # 按日期统计
        daily_counts = {}
        for article in articles:
            if article.get('publishedAt'):
                date = article['publishedAt'][:10]  # 提取日期部分
                daily_counts[date] = daily_counts.get(date, 0) + 1
        
        return {
            'keyword': keyword,
            'total_articles': len(articles),
            'daily_distribution': daily_counts,
            'top_sources': self._get_top_sources(articles),
            'sample_articles': articles[:5]
        }
    
    def _get_top_sources(self, articles: List[Dict[str, Any]], limit: int = 5) -> List[Dict[str, Any]]:
        """统计热门新闻源
        
        Args:
            articles: 文章列表
            limit: 返回数量
            
        Returns:
            热门新闻源列表
        """
        source_counts = {}
        
        for article in articles:
            source = article.get('source', 'Unknown')
            source_counts[source] = source_counts.get(source, 0) + 1
        
        # 排序并返回前N个
        sorted_sources = sorted(
            source_counts.items(),
            key=lambda x: x[1],
            reverse=True
        )
        
        return [
            {'source': source, 'count': count}
            for source, count in sorted_sources[:limit]
        ]


# 删除后面的重复和混乱代码
async def main():
    """测试NewsAPI聚合器"""
    import logging
    logging.basicConfig(level=logging.INFO)
    
    async with NewsAPIAggregator() as aggregator:
        # 测试获取头条新闻
        print("\n=== 获取美国头条新闻 ===")
        us_headlines = await aggregator.get_top_headlines(country='us', page_size=5)
        for article in us_headlines:
            print(f"- {article['title']}")
            print(f"  来源: {article['source']}")
            print(f"  时间: {article['publishedAt']}")
        
        # 测试获取科技新闻
        print("\n=== 获取科技类新闻 ===")
        tech_news = await aggregator.get_hot_by_category('technology')
        for article in tech_news[:5]:
            print(f"- {article['title']}")
        
        # 测试获取全球热门话题
        print("\n=== 全球热门话题 ===")
        trending = await aggregator.get_trending_topics(10)
        for idx, article in enumerate(trending, 1):
            print(f"{idx}. {article['title']}")
        
        return len(us_headlines) > 0



if __name__ == "__main__":
    import asyncio
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")