"""
RSSHub聚合器 - 集成RSSHub获取多平台内容
支持自托管RSSHub实例和公共实例
"""
import asyncio
import aiohttp
import feedparser
import logging
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import hashlib
import os

logger = logging.getLogger(__name__)


class RSSHubAggregator:
    """RSSHub聚合器
    
    通过RSSHub获取各平台的RSS订阅内容
    支持自托管和公共RSSHub实例
    """
    
    def __init__(self, rsshub_url: str = None):
        """初始化RSSHub聚合器
        
        Args:
            rsshub_url: RSSHub实例地址，默认使用官方实例
        """
        # RSSHub实例地址（可以自托管）
        self.rsshub_url = rsshub_url or os.getenv('RSSHUB_URL', 'https://rsshub.app')
        self.session = None
        self.timeout = aiohttp.ClientTimeout(total=30)
        
        # 配置RSS源路径
        self.rss_routes = {
            # 新闻类
            'bbc_news': '/bbc/world',
            'cnn_top': '/cnn/top',
            'reuters_world': '/reuters/world',
            'guardian_world': '/guardian/world',
            'nytimes_index': '/nytimes/index',
            'wsj_news': '/wsj/news/opinion',
            
            # 科技类
            'hackernews': '/hackernews/story',
            'producthunt': '/producthunt/today',
            'techcrunch': '/techcrunch/news',
            'theverge': '/theverge/index',
            'wired': '/wired/news',
            'arstechnica': '/arstechnica/index',
            
            # 社交媒体
            'twitter_trends': '/twitter/trends/worldwide',
            'reddit_popular': '/reddit/hot',
            'v2ex_hot': '/v2ex/topics/hot',
            'hackernews_best': '/hackernews/best/weekly',
            
            # 中文社区
            'zhihu_hot': '/zhihu/hotlist',
            'zhihu_daily': '/zhihu/daily',
            'weibo_search': '/weibo/search/hot',
            'douban_movie': '/douban/movie/weekly',
            'bilibili_ranking': '/bilibili/ranking/all/1/3',
            'juejin_trending': '/juejin/trending/all/weekly',
            
            # 开发者
            'github_trending': '/github/trending/daily',
            'github_trending_python': '/github/trending/daily/python',
            'github_trending_js': '/github/trending/daily/javascript',
            'stackoverflow_hot': '/stackoverflow/hot',
            
            # 财经
            'economist_world': '/economist/world',
            'ft_myft': '/ft/myft',
            'bloomberg_top': '/bloomberg/top',
            
            # 学术
            'nature_news': '/nature/news',
            'science_news': '/science/news',
            'arxiv_cs': '/arxiv/cs',
            
            # 娱乐
            'youtube_trending': '/youtube/trending',
            'spotify_top': '/spotify/top/tracks/global/weekly',
            'imdb_top': '/imdb/top',
            
            # 自定义源
            'custom_feed': None  # 用于添加自定义RSS源
        }
        
        # 分类映射
        self.category_map = {
            'news': ['bbc_news', 'cnn_top', 'reuters_world', 'guardian_world', 'nytimes_index'],
            'tech': ['hackernews', 'producthunt', 'techcrunch', 'theverge', 'wired'],
            'social': ['twitter_trends', 'reddit_popular', 'v2ex_hot'],
            'chinese': ['zhihu_hot', 'weibo_search', 'bilibili_ranking', 'juejin_trending'],
            'developer': ['github_trending', 'stackoverflow_hot'],
            'finance': ['economist_world', 'ft_myft', 'bloomberg_top'],
            'academic': ['nature_news', 'science_news', 'arxiv_cs'],
            'entertainment': ['youtube_trending', 'spotify_top', 'imdb_top']
        }
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        self.session = aiohttp.ClientSession(timeout=self.timeout)
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self.session:
            await self.session.close()
    
    async def fetch_rss(self, route: str, custom_url: str = None, 
                       max_retries: int = 3, timeout: int = 60) -> List[Dict[str, Any]]:
        """获取RSS内容，支持重试
        
        Args:
            route: RSS路由路径
            custom_url: 自定义RSS URL（可选）
            max_retries: 最大重试次数
            timeout: 超时时间（秒）
            
        Returns:
            解析后的RSS内容列表
        """
        # 构建URL
        if custom_url:
            url = custom_url
        else:
            url = f"{self.rsshub_url}{route}"
        
        # 重试机制
        for attempt in range(max_retries):
            try:
                if not self.session:
                    self.session = aiohttp.ClientSession(
                        timeout=aiohttp.ClientTimeout(total=timeout)
                    )
                
                # 获取RSS内容
                async with self.session.get(url) as response:
                    if response.status == 200:
                        content = await response.text()
                        
                        # 解析RSS
                        feed = feedparser.parse(content)
                        
                        if feed.bozo:
                            logger.warning(f"RSS parse warning for {url}: {feed.bozo_exception}")
                        
                        items = []
                        for entry in feed.entries[:20]:  # 限制数量
                            item = self._parse_entry(entry, route)
                            if item:
                                items.append(item)
                        
                        return items
                        
                    elif response.status == 429:  # Rate limit
                        wait_time = 2 ** attempt
                        logger.warning(f"Rate limit hit for {url}, waiting {wait_time}s...")
                        await asyncio.sleep(wait_time)
                        continue
                        
                    else:
                        logger.error(f"Failed to fetch RSS from {url}: {response.status}")
                        if attempt < max_retries - 1:
                            await asyncio.sleep(2 ** attempt)
                            continue
                        return []
                
            except asyncio.TimeoutError:
                logger.warning(f"Timeout fetching RSS from {route} (attempt {attempt + 1}/{max_retries})")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 ** attempt)
                    continue
                logger.error(f"All retry attempts failed for {route}")
                return []
                
            except Exception as e:
                logger.warning(f"Error fetching RSS from {route} (attempt {attempt + 1}/{max_retries}): {e}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 ** attempt)
                    continue
                logger.error(f"All retry attempts failed for {route}: {e}")
                return []
        
        return []
    
    def _parse_entry(self, entry: Any, source: str) -> Optional[Dict[str, Any]]:
        """解析RSS条目
        
        Args:
            entry: RSS条目
            source: 来源标识
            
        Returns:
            格式化的条目数据
        """
        try:
            # 提取基本信息
            title = entry.get('title', '')
            link = entry.get('link', '')
            description = entry.get('description', '') or entry.get('summary', '')
            
            # 解析时间
            published = None
            if hasattr(entry, 'published_parsed'):
                published = datetime(*entry.published_parsed[:6])
            elif hasattr(entry, 'updated_parsed'):
                published = datetime(*entry.updated_parsed[:6])
            else:
                published = datetime.utcnow()
            
            # 提取作者
            author = entry.get('author', '')
            if not author and hasattr(entry, 'authors'):
                author = ', '.join([a.get('name', '') for a in entry.authors])
            
            # 提取标签
            tags = []
            if hasattr(entry, 'tags'):
                tags = [tag.get('term', '') for tag in entry.tags]
            
            # 生成唯一ID
            content_hash = hashlib.md5(f"{title}{link}".encode()).hexdigest()
            
            return {
                'id': content_hash,
                'title': title,
                'description': description[:500],  # 限制描述长度
                'url': link,
                'author': author,
                'published_at': published.isoformat() if published else None,
                'tags': tags,
                'source': source.replace('/', '_'),
                'platform': 'rsshub',
                'fetch_time': datetime.utcnow().isoformat() + 'Z'
            }
            
        except Exception as e:
            logger.error(f"Error parsing RSS entry: {e}")
            return None
    
    async def get_category_feeds(self, category: str) -> List[Dict[str, Any]]:
        """获取分类下的所有RSS源
        
        Args:
            category: 分类名称
            
        Returns:
            该分类下所有RSS源的内容
        """
        routes = self.category_map.get(category, [])
        if not routes:
            logger.warning(f"Unknown category: {category}")
            return []
        
        all_items = []
        tasks = []
        
        for route_name in routes:
            if route_name in self.rss_routes and self.rss_routes[route_name]:
                tasks.append(self.fetch_rss(self.rss_routes[route_name]))
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        for result in results:
            if isinstance(result, list):
                all_items.extend(result)
        
        # 按发布时间排序
        all_items.sort(key=lambda x: x.get('published_at', ''), reverse=True)
        
        return all_items
    
    async def get_trending_topics(self, limit: int = 50) -> List[Dict[str, Any]]:
        """获取热门话题
        
        通过聚合多个平台的热门内容识别趋势话题
        
        Args:
            limit: 返回数量限制
            
        Returns:
            热门话题列表
        """
        # 获取关键平台的热门内容
        key_routes = [
            'hackernews', 'reddit_popular', 'twitter_trends',
            'zhihu_hot', 'github_trending', 'producthunt'
        ]
        
        all_items = []
        tasks = []
        
        for route_name in key_routes:
            if route_name in self.rss_routes and self.rss_routes[route_name]:
                tasks.append(self.fetch_rss(self.rss_routes[route_name]))
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        for result in results:
            if isinstance(result, list):
                all_items.extend(result)
        
        # 简单去重（基于标题相似度）
        unique_items = []
        seen_titles = set()
        
        for item in all_items:
            title_key = item['title'][:30].lower() if item.get('title') else ''
            if title_key and title_key not in seen_titles:
                seen_titles.add(title_key)
                unique_items.append(item)
        
        # 按时间排序并返回
        unique_items.sort(key=lambda x: x.get('published_at', ''), reverse=True)
        
        return unique_items[:limit]
    
    async def search_feeds(self, keyword: str, sources: List[str] = None) -> List[Dict[str, Any]]:
        """搜索RSS源中的内容
        
        Args:
            keyword: 搜索关键词
            sources: 指定搜索的源列表
            
        Returns:
            匹配的内容列表
        """
        # 确定要搜索的源
        if sources:
            routes_to_search = [self.rss_routes.get(s) for s in sources if s in self.rss_routes]
        else:
            # 搜索所有源
            routes_to_search = [r for r in self.rss_routes.values() if r]
        
        all_items = []
        tasks = []
        
        for route in routes_to_search:
            tasks.append(self.fetch_rss(route))
        
        # 限制并发数
        semaphore = asyncio.Semaphore(5)
        
        async def fetch_with_limit(route):
            async with semaphore:
                return await self.fetch_rss(route)
        
        results = await asyncio.gather(*[fetch_with_limit(r) for r in routes_to_search], return_exceptions=True)
        
        # 过滤匹配关键词的内容
        keyword_lower = keyword.lower()
        
        for result in results:
            if isinstance(result, list):
                for item in result:
                    if (keyword_lower in item.get('title', '').lower() or 
                        keyword_lower in item.get('description', '').lower()):
                        all_items.append(item)
        
        return all_items
    
    async def aggregate_all_sources(self) -> Dict[str, List[Dict[str, Any]]]:
        """聚合所有数据源
        
        Returns:
            按分类组织的RSS数据
        """
        aggregated_data = {}
        
        # 并发获取所有分类的内容
        tasks = []
        categories = list(self.category_map.keys())
        
        for category in categories:
            tasks.append(self.get_category_feeds(category))
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        for category, result in zip(categories, results):
            if isinstance(result, list):
                aggregated_data[category] = result
                logger.info(f"Category {category}: {len(result)} items")
            else:
                logger.error(f"Failed to get {category} feeds: {result}")
                aggregated_data[category] = []
        
        # 添加热门话题
        trending = await self.get_trending_topics(30)
        aggregated_data['trending'] = trending
        
        return aggregated_data
    
    async def add_custom_feed(self, name: str, url: str) -> List[Dict[str, Any]]:
        """添加自定义RSS源
        
        Args:
            name: 源名称
            url: RSS URL
            
        Returns:
            RSS内容
        """
        items = await self.fetch_rss(name, custom_url=url)
        
        # 保存自定义源供后续使用
        self.rss_routes[name] = url
        
        return items


async def main():
    """测试RSSHub聚合器"""
    logging.basicConfig(level=logging.INFO)
    
    async with RSSHubAggregator() as aggregator:
        # 测试获取科技类新闻
        print("\n=== 获取科技类RSS ===")
        tech_feeds = await aggregator.get_category_feeds('tech')
        print(f"获取到 {len(tech_feeds)} 条科技新闻")
        for item in tech_feeds[:5]:
            print(f"- {item['title']}")
            print(f"  来源: {item['source']}")
            print(f"  时间: {item['published_at']}")
        
        # 测试获取热门话题
        print("\n=== 全球热门话题 ===")
        trending = await aggregator.get_trending_topics(10)
        for idx, item in enumerate(trending, 1):
            print(f"{idx}. {item['title']}")
            print(f"   平台: {item['source']}")
        
        # 测试搜索功能
        print("\n=== 搜索AI相关内容 ===")
        ai_content = await aggregator.search_feeds('AI', sources=['hackernews', 'reddit_popular'])
        print(f"找到 {len(ai_content)} 条相关内容")
        for item in ai_content[:3]:
            print(f"- {item['title']}")
        
        # 测试聚合所有源
        print("\n=== 聚合所有数据源 ===")
        all_data = await aggregator.aggregate_all_sources()
        for category, items in all_data.items():
            print(f"{category}: {len(items)} 条")
        
        return len(tech_feeds) > 0


if __name__ == "__main__":
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")