"""热点新闻聚合服务
整合多个免费热榜网站的数据
"""
import asyncio
import aiohttp
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime
from bs4 import BeautifulSoup
import json
import re

logger = logging.getLogger(__name__)


class HotNewsAggregator:
    """热点新闻聚合器
    
    爬取多个热榜网站，提供统一的数据接口
    """
    
    def __init__(self):
        self.session = None
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }
        
        # 热榜网站配置
        self.hot_sites = {
            'mofish': {
                'name': '鱼塘热榜',
                'url': 'https://mo.fish',
                'selector': '.list-group-item',  # CSS选择器
                'parse_method': 'mofish'
            },
            'anyknew': {
                'name': 'AnyKnew',
                'url': 'https://www.anyknew.com/hot',
                'selector': '.hot-item',
                'parse_method': 'anyknew'
            },
            'rebang': {
                'name': '热榜中心',
                'url': 'https://rebang.today',
                'selector': '.rb-item',
                'parse_method': 'rebang'
            },
            'baidu': {
                'name': '百度热搜',
                'url': 'https://top.baidu.com/board?tab=realtime',
                'selector': '.c-single-text-ellipsis',
                'parse_method': 'baidu'
            },
            'sogou': {
                'name': '搜狗热搜',
                'url': 'https://www.sogou.com/hot',
                'selector': '.hot-item',
                'parse_method': 'sogou'
            },
            'so360': {
                'name': '360热搜',
                'url': 'https://www.so.com/hot',
                'selector': '.hot-item',
                'parse_method': 'so360'
            },
            'juejin': {
                'name': '掘金热榜',
                'url': 'https://juejin.cn/hot',
                'selector': '.entry-link',
                'parse_method': 'juejin'
            },
            'toutiao_io': {
                'name': '开发者头条',
                'url': 'https://toutiao.io',
                'selector': '.post',
                'parse_method': 'toutiao_io'
            }
        }
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        connector = aiohttp.TCPConnector(
            force_close=True,
            limit=10,
            limit_per_host=2
        )
        timeout = aiohttp.ClientTimeout(total=30, connect=10)
        self.session = aiohttp.ClientSession(
            connector=connector,
            timeout=timeout,
            headers=self.headers
        )
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self.session:
            await self.session.close()
    
    async def fetch_mofish(self) -> List[Dict[str, Any]]:
        """爬取鱼塘热榜"""
        try:
            url = self.hot_sites['mofish']['url']
            logger.info(f"Fetching {url}")
            
            async with self.session.get(url) as response:
                if response.status != 200:
                    logger.error(f"Failed to fetch mofish: {response.status}")
                    return []
                
                html = await response.text()
            
            soup = BeautifulSoup(html, 'html.parser')
            items = []
            
            # 查找所有热榜板块
            sections = soup.find_all('div', class_='col-md-4')
            
            for section in sections[:3]:  # 只取前3个板块
                platform = section.find('h3')
                if not platform:
                    continue
                    
                platform_name = platform.text.strip()
                
                # 获取该板块的热榜条目
                hot_items = section.find_all('a', class_='list-group-item')
                
                for idx, item in enumerate(hot_items[:10], 1):
                    title = item.text.strip()
                    href = item.get('href', '')
                    
                    items.append({
                        'rank': idx,
                        'title': title,
                        'url': href,
                        'platform': platform_name,
                        'source': 'mofish'
                    })
            
            logger.info(f"Found {len(items)} items from mofish")
            return items
            
        except Exception as e:
            logger.error(f"Error fetching mofish: {e}")
            return []
    
    async def fetch_dailyhot_api(self) -> List[Dict[str, Any]]:
        """使用 DailyHot 开源项目的 API
        
        DailyHot 是一个开源项目，提供了免费的 API
        """
        try:
            # DailyHot 项目的公开 API
            urls = {
                'weibo': 'https://api.vvhan.com/api/wbhot',
                'zhihu': 'https://api.vvhan.com/api/hotlist?type=zhihu',
                'douyin': 'https://api.vvhan.com/api/hotlist?type=douyinHot',
                'baidu': 'https://api.vvhan.com/api/hotlist?type=baiduRD'
            }
            
            all_items = []
            
            for platform, url in urls.items():
                try:
                    async with self.session.get(url) as response:
                        if response.status == 200:
                            data = await response.json()
                            
                            if data.get('success') or 'data' in data:
                                items_data = data.get('data', [])
                                
                                for idx, item in enumerate(items_data[:10], 1):
                                    all_items.append({
                                        'rank': idx,
                                        'title': item.get('title', item.get('word', '')),
                                        'url': item.get('url', ''),
                                        'heat': item.get('hot', item.get('hotScore', '')),
                                        'platform': platform,
                                        'source': 'dailyhot_api'
                                    })
                                    
                                logger.info(f"Got {len(items_data)} items from {platform}")
                        
                    await asyncio.sleep(0.5)  # 避免请求过快
                    
                except Exception as e:
                    logger.error(f"Error fetching {platform}: {e}")
                    continue
            
            return all_items
            
        except Exception as e:
            logger.error(f"Error fetching dailyhot API: {e}")
            return []
    
    async def fetch_juejin_api(self) -> List[Dict[str, Any]]:
        """掘金热榜 API"""
        try:
            url = 'https://api.juejin.cn/content_api/v1/content/article_rank'
            params = {
                'category_id': '1',
                'type': 'hot'
            }
            
            async with self.session.get(url, params=params) as response:
                if response.status != 200:
                    return []
                
                data = await response.json()
                
            items = []
            articles = data.get('data', [])
            
            for idx, article in enumerate(articles[:20], 1):
                items.append({
                    'rank': idx,
                    'title': article.get('content', {}).get('title', ''),
                    'url': f"https://juejin.cn/post/{article.get('content', {}).get('content_id', '')}",
                    'author': article.get('author', {}).get('user_name', ''),
                    'views': article.get('content_counter', {}).get('view', 0),
                    'platform': 'juejin',
                    'source': 'juejin_api'
                })
            
            logger.info(f"Got {len(items)} items from Juejin")
            return items
            
        except Exception as e:
            logger.error(f"Error fetching Juejin: {e}")
            return []
    
    async def fetch_tophub_web(self) -> List[Dict[str, Any]]:
        """爬取 tophub.today 网页版（作为备选）"""
        try:
            url = 'https://tophub.today'
            
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                
                html = await response.text()
            
            soup = BeautifulSoup(html, 'html.parser')
            items = []
            
            # 查找热榜条目
            containers = soup.find_all('div', class_='cc-cd')[:3]  # 只取前3个板块
            
            for container in containers:
                platform = container.find('div', class_='cc-cd-lb')
                if not platform:
                    continue
                
                platform_name = platform.text.strip()
                
                entries = container.find_all('a', class_='cc-cd-cb-l')[:10]
                
                for idx, entry in enumerate(entries, 1):
                    title_elem = entry.find('span', class_='cc-cd-cb-tt')
                    if title_elem:
                        items.append({
                            'rank': idx,
                            'title': title_elem.text.strip(),
                            'url': entry.get('href', ''),
                            'platform': platform_name,
                            'source': 'tophub_web'
                        })
            
            logger.info(f"Got {len(items)} items from tophub web")
            return items
            
        except Exception as e:
            logger.error(f"Error fetching tophub web: {e}")
            return []
    
    def normalize_data(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """标准化数据格式"""
        normalized = []
        
        for item in items:
            normalized_item = {
                'platform': item.get('platform', 'unknown'),
                'source': item.get('source', 'unknown'),
                'rank': item.get('rank', 0),
                'title': item.get('title', ''),
                'url': item.get('url', ''),
                'heat_value': item.get('heat', item.get('heat_value', 0)),
                'description': item.get('description', ''),
                'fetch_time': datetime.utcnow().isoformat() + 'Z',
                'extra': {}
            }
            
            # 保存额外字段
            for key in ['author', 'views', 'replies', 'tags']:
                if key in item:
                    normalized_item['extra'][key] = item[key]
            
            normalized.append(normalized_item)
        
        return normalized
    
    async def get_all_hot_news(self) -> Dict[str, List[Dict[str, Any]]]:
        """获取所有热点新闻"""
        results = {
            'mofish': [],
            'dailyhot': [],
            'juejin': [],
            'tophub': [],
            'all': []
        }
        
        # 并发获取各个数据源
        tasks = [
            self.fetch_mofish(),
            self.fetch_dailyhot_api(),
            self.fetch_juejin_api(),
            self.fetch_tophub_web()
        ]
        
        try:
            responses = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 处理结果
            sources = ['mofish', 'dailyhot', 'juejin', 'tophub']
            
            for source, response in zip(sources, responses):
                if isinstance(response, Exception):
                    logger.error(f"Error from {source}: {response}")
                    results[source] = []
                else:
                    normalized = self.normalize_data(response)
                    results[source] = normalized
                    results['all'].extend(normalized)
                    logger.info(f"{source}: {len(normalized)} items")
            
            # 按热度排序
            results['all'].sort(key=lambda x: x.get('heat_value', 0), reverse=True)
            
        except Exception as e:
            logger.error(f"Error getting all hot news: {e}")
        
        return results


async def main():
    """测试函数"""
    logging.basicConfig(level=logging.INFO)
    
    async with HotNewsAggregator() as aggregator:
        print("\n=== 热点新闻聚合测试 ===\n")
        
        # 获取所有热点新闻
        results = await aggregator.get_all_hot_news()
        
        # 显示结果
        for source, items in results.items():
            if source == 'all':
                continue
                
            print(f"\n{source}: {len(items)} 条")
            for item in items[:3]:
                print(f"  {item['rank']}. {item['title'][:40]}...")
        
        print(f"\n总计: {len(results['all'])} 条热点新闻")
        
        # 显示综合热点 Top 10
        print("\n=== 综合热点 Top 10 ===")
        for idx, item in enumerate(results['all'][:10], 1):
            print(f"{idx}. [{item['platform']}] {item['title'][:50]}")
        
        return len(results['all']) > 0


if __name__ == "__main__":
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")