"""免费热榜数据服务
整合多个免费数据源，提供统一的热榜数据接口
"""
import asyncio
import aiohttp
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime
import json
import feedparser
from bs4 import BeautifulSoup

logger = logging.getLogger(__name__)


class FreeHotService:
    """免费热榜数据服务
    
    整合 RSSHub、V2EX、GitHub、HackerNews 等免费数据源
    """
    
    def __init__(self):
        self.session = None
        self.timeout = aiohttp.ClientTimeout(total=30)
        
        # RSSHub 公共实例
        self.rsshub_base = "https://rsshub.app"
        
        # 免费 API 端点
        self.free_apis = {
            'v2ex': {
                'url': 'https://www.v2ex.com/api/topics/hot.json',
                'type': 'json'
            },
            'github': {
                'url': 'https://api.github.com/search/repositories?q=stars:>1&sort=stars&order=desc&per_page=30',
                'type': 'json'
            },
            'hackernews': {
                'url': 'https://hacker-news.firebaseio.com/v0/topstories.json',
                'detail_url': 'https://hacker-news.firebaseio.com/v0/item/{}.json',
                'type': 'hackernews'
            },
            'juejin': {
                'url': 'https://api.juejin.cn/recommend_api/v1/article/recommend_all_feed',
                'type': 'json_post'
            }
        }
        
        # RSSHub 路由配置
        self.rsshub_routes = {
            'weibo': '/weibo/search/hot',
            'zhihu': '/zhihu/hotlist',
            'baidu': '/baidu/tieba/hot',
            'bilibili': '/bilibili/ranking/0/3',
            'douban': '/douban/movie/playing',
            'sspai': '/sspai/index',
            '36kr': '/36kr/hot',
            'toutiao': '/toutiao/today',
        }
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        self.session = aiohttp.ClientSession(timeout=self.timeout)
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self.session:
            await self.session.close()
    
    async def fetch_rsshub(self, route: str) -> List[Dict[str, Any]]:
        """从 RSSHub 获取数据
        
        Args:
            route: RSSHub 路由
            
        Returns:
            解析后的数据列表
        """
        try:
            url = f"{self.rsshub_base}{route}"
            logger.info(f"Fetching RSSHub: {url}")
            
            if not self.session:
                self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            async with self.session.get(url) as response:
                if response.status != 200:
                    logger.error(f"RSSHub request failed: {response.status}")
                    return []
                
                content = await response.text()
                
            # 解析 RSS
            feed = feedparser.parse(content)
            items = []
            
            for entry in feed.entries[:30]:
                item = {
                    'title': entry.get('title', ''),
                    'url': entry.get('link', ''),
                    'description': entry.get('description', '')[:200],
                    'published': entry.get('published', ''),
                }
                items.append(item)
            
            return items
            
        except Exception as e:
            logger.error(f"Error fetching RSSHub {route}: {e}")
            return []
    
    async def fetch_v2ex_hot(self) -> List[Dict[str, Any]]:
        """获取 V2EX 热门话题"""
        try:
            if not self.session:
                self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            url = self.free_apis['v2ex']['url']
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                
                data = await response.json()
                
            items = []
            for topic in data[:20]:
                items.append({
                    'title': topic.get('title', ''),
                    'url': f"https://www.v2ex.com/t/{topic.get('id', '')}",
                    'description': topic.get('content', '')[:200] if topic.get('content') else '',
                    'replies': topic.get('replies', 0),
                    'member': topic.get('member', {}).get('username', '')
                })
            
            return items
            
        except Exception as e:
            logger.error(f"Error fetching V2EX: {e}")
            return []
    
    async def fetch_github_trending(self) -> List[Dict[str, Any]]:
        """获取 GitHub 热门项目"""
        try:
            if not self.session:
                self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            # GitHub API 不需要认证也可以使用（有频率限制）
            url = self.free_apis['github']['url']
            headers = {'Accept': 'application/vnd.github.v3+json'}
            
            async with self.session.get(url, headers=headers) as response:
                if response.status != 200:
                    return []
                
                data = await response.json()
            
            items = []
            for repo in data.get('items', [])[:20]:
                items.append({
                    'title': repo.get('full_name', ''),
                    'url': repo.get('html_url', ''),
                    'description': repo.get('description', '')[:200] if repo.get('description') else '',
                    'stars': repo.get('stargazers_count', 0),
                    'language': repo.get('language', ''),
                    'topics': repo.get('topics', [])
                })
            
            return items
            
        except Exception as e:
            logger.error(f"Error fetching GitHub: {e}")
            return []
    
    async def fetch_hackernews_top(self, limit: int = 20) -> List[Dict[str, Any]]:
        """获取 HackerNews 热门"""
        try:
            if not self.session:
                self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            # 获取热门文章ID列表
            url = self.free_apis['hackernews']['url']
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                story_ids = await response.json()
            
            # 获取前N篇文章详情
            items = []
            for story_id in story_ids[:limit]:
                detail_url = self.free_apis['hackernews']['detail_url'].format(story_id)
                
                async with self.session.get(detail_url) as response:
                    if response.status == 200:
                        story = await response.json()
                        if story and story.get('type') == 'story':
                            items.append({
                                'title': story.get('title', ''),
                                'url': story.get('url', f"https://news.ycombinator.com/item?id={story_id}"),
                                'score': story.get('score', 0),
                                'by': story.get('by', ''),
                                'descendants': story.get('descendants', 0),
                                'time': story.get('time', 0)
                            })
                
                if len(items) >= 10:  # 限制请求数量
                    break
                    
                await asyncio.sleep(0.1)  # 避免请求过快
            
            return items
            
        except Exception as e:
            logger.error(f"Error fetching HackerNews: {e}")
            return []
    
    async def scrape_tophub_web(self, platform: str = 'all') -> List[Dict[str, Any]]:
        """爬取 tophub.today 网页版数据
        
        Args:
            platform: 平台名称或 'all' 获取所有
            
        Returns:
            热榜数据列表
        """
        try:
            if not self.session:
                self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            # 不同平台的路径
            paths = {
                'weibo': '/n/KqndgxeLl9',
                'zhihu': '/n/mproPpoq6O',
                'baidu': '/n/Jb0vmloB1G',
                'bilibili': '/n/74KvxwXwoO',
                'douyin': '/n/DpQvNABoNE',
                'all': '/'
            }
            
            path = paths.get(platform, '/')
            url = f"https://tophub.today{path}"
            
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            }
            
            async with self.session.get(url, headers=headers) as response:
                if response.status != 200:
                    logger.error(f"Failed to fetch tophub web: {response.status}")
                    return []
                
                html = await response.text()
            
            # 解析 HTML
            soup = BeautifulSoup(html, 'html.parser')
            items = []
            
            # 查找热榜条目
            for item in soup.select('.cc-cd-cb-ll'):
                try:
                    rank_elem = item.select_one('.cc-cd-cb-ll-nu')
                    title_elem = item.select_one('.cc-cd-cb-ll-tt')
                    heat_elem = item.select_one('.cc-cd-cb-ll-cl')
                    
                    if title_elem:
                        items.append({
                            'rank': rank_elem.text.strip() if rank_elem else '',
                            'title': title_elem.text.strip(),
                            'heat': heat_elem.text.strip() if heat_elem else '',
                            'platform': platform
                        })
                except:
                    continue
            
            return items[:30]
            
        except Exception as e:
            logger.error(f"Error scraping tophub web: {e}")
            return []
    
    def normalize_data(self, source: str, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """标准化不同来源的数据格式
        
        Args:
            source: 数据源名称
            data: 原始数据
            
        Returns:
            标准化的数据列表
        """
        normalized = []
        
        for idx, item in enumerate(data):
            normalized_item = {
                'platform': source,
                'source': 'free_api',
                'rank': idx + 1,
                'title': item.get('title', ''),
                'url': item.get('url', ''),
                'description': item.get('description', ''),
                'fetch_time': datetime.utcnow().isoformat() + 'Z',
                'heat_value': 0,  # 免费API通常没有热度值
                'extra': {}
            }
            
            # 特殊处理不同平台的额外字段
            if source == 'v2ex':
                normalized_item['heat_value'] = item.get('replies', 0)
                normalized_item['extra']['author'] = item.get('member', '')
            elif source == 'github':
                normalized_item['heat_value'] = item.get('stars', 0)
                normalized_item['extra']['language'] = item.get('language', '')
                normalized_item['extra']['topics'] = item.get('topics', [])
            elif source == 'hackernews':
                normalized_item['heat_value'] = item.get('score', 0)
                normalized_item['extra']['by'] = item.get('by', '')
                normalized_item['extra']['comments'] = item.get('descendants', 0)
            
            normalized.append(normalized_item)
        
        return normalized
    
    async def get_multi_platform_hot(self, platforms: List[str] = None) -> Dict[str, List[Dict[str, Any]]]:
        """获取多个平台的热榜数据
        
        Args:
            platforms: 平台列表
            
        Returns:
            各平台的热榜数据
        """
        if not platforms:
            platforms = ['v2ex', 'github', 'hackernews', 'weibo', 'zhihu']
        
        results = {}
        
        for platform in platforms:
            try:
                data = []
                
                # 免费 API 数据源
                if platform == 'v2ex':
                    data = await self.fetch_v2ex_hot()
                elif platform == 'github':
                    data = await self.fetch_github_trending()
                elif platform == 'hackernews':
                    data = await self.fetch_hackernews_top()
                # RSSHub 数据源
                elif platform in self.rsshub_routes:
                    route = self.rsshub_routes[platform]
                    data = await self.fetch_rsshub(route)
                # 网页爬取作为后备
                else:
                    data = await self.scrape_tophub_web(platform)
                
                if data:
                    normalized = self.normalize_data(platform, data)
                    results[platform] = normalized
                    logger.info(f"获取 {platform} 热榜成功: {len(normalized)} 条")
                else:
                    results[platform] = []
                    logger.warning(f"获取 {platform} 热榜失败")
                    
            except Exception as e:
                logger.error(f"获取 {platform} 热榜出错: {e}")
                results[platform] = []
            
            # 避免请求过快
            await asyncio.sleep(1)
        
        return results


async def main():
    """测试函数"""
    logging.basicConfig(level=logging.INFO)
    
    async with FreeHotService() as service:
        print("\n=== 免费数据源测试 ===")
        
        # 测试 V2EX
        print("\n1. V2EX 热门话题:")
        v2ex_data = await service.fetch_v2ex_hot()
        for item in v2ex_data[:3]:
            print(f"  - {item['title'][:50]}")
        
        # 测试 GitHub
        print("\n2. GitHub 热门项目:")
        github_data = await service.fetch_github_trending()
        for item in github_data[:3]:
            print(f"  - {item['title']} ⭐ {item.get('stars', 0)}")
        
        # 测试 HackerNews
        print("\n3. HackerNews 热门:")
        hn_data = await service.fetch_hackernews_top(5)
        for item in hn_data:
            print(f"  - {item['title'][:50]} (score: {item.get('score', 0)})")
        
        # 测试 RSSHub
        print("\n4. RSSHub 微博热搜:")
        weibo_data = await service.fetch_rsshub('/weibo/search/hot')
        for item in weibo_data[:3]:
            print(f"  - {item['title'][:50]}")
        
        # 测试多平台
        print("\n5. 多平台热榜:")
        multi_data = await service.get_multi_platform_hot(['v2ex', 'github'])
        for platform, items in multi_data.items():
            if items:
                print(f"\n{platform}: {len(items)} 条")
                for item in items[:2]:
                    print(f"  {item['rank']}. {item['title'][:40]}")
        
        return True


if __name__ == "__main__":
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")