"""可靠的热点新闻服务
使用最稳定的免费API和网页爬取
"""
import asyncio
import aiohttp
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime
from bs4 import BeautifulSoup
import json

logger = logging.getLogger(__name__)


class ReliableHotService:
    """可靠的热点服务
    
    优先使用稳定的免费 API 和网页爬取
    """
    
    def __init__(self):
        self.session = None
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'application/json, text/plain, */*'
        }
    
    async def __aenter__(self):
        connector = aiohttp.TCPConnector(force_close=True)
        timeout = aiohttp.ClientTimeout(total=15)
        self.session = aiohttp.ClientSession(
            connector=connector,
            timeout=timeout,
            headers=self.headers
        )
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()
    
    async def fetch_juejin_hot(self) -> List[Dict[str, Any]]:
        """掘金热榜 - 技术类热点"""
        try:
            # 掘金热榜 API
            url = 'https://api.juejin.cn/content_api/v1/content/article_rank'
            params = {
                'category_id': '1',
                'type': 'hot'
            }
            
            async with self.session.get(url, params=params) as response:
                if response.status != 200:
                    logger.error(f"Juejin API failed: {response.status}")
                    return []
                
                data = await response.json()
            
            items = []
            articles = data.get('data', [])
            
            for idx, article in enumerate(articles[:30], 1):
                content = article.get('content', {})
                counter = article.get('content_counter', {})
                author = article.get('author', {})
                
                items.append({
                    'rank': idx,
                    'title': content.get('title', ''),
                    'url': f"https://juejin.cn/post/{content.get('content_id', '')}",
                    'heat_value': counter.get('view', 0),
                    'platform': '掘金',
                    'category': '技术',
                    'extra': {
                        'author': author.get('user_name', ''),
                        'likes': counter.get('like', 0),
                        'comments': counter.get('comment', 0)
                    }
                })
            
            logger.info(f"Got {len(items)} items from Juejin")
            return items
            
        except Exception as e:
            logger.error(f"Error fetching Juejin: {e}")
            return []
    
    async def fetch_hackernews_hot(self) -> List[Dict[str, Any]]:
        """HackerNews 热门 - 国际技术热点"""
        try:
            # 获取热门故事ID
            url = 'https://hacker-news.firebaseio.com/v0/topstories.json'
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                story_ids = await response.json()
            
            items = []
            # 获取前15个故事详情
            for story_id in story_ids[:15]:
                detail_url = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json'
                
                try:
                    async with self.session.get(detail_url) as response:
                        if response.status == 200:
                            story = await response.json()
                            
                            if story and story.get('type') == 'story':
                                items.append({
                                    'rank': len(items) + 1,
                                    'title': story.get('title', ''),
                                    'url': story.get('url', f"https://news.ycombinator.com/item?id={story_id}"),
                                    'heat_value': story.get('score', 0),
                                    'platform': 'HackerNews',
                                    'category': '国际科技',
                                    'extra': {
                                        'author': story.get('by', ''),
                                        'comments': story.get('descendants', 0),
                                        'time': story.get('time', 0)
                                    }
                                })
                except:
                    continue
                
                await asyncio.sleep(0.1)  # 避免请求过快
            
            logger.info(f"Got {len(items)} items from HackerNews")
            return items
            
        except Exception as e:
            logger.error(f"Error fetching HackerNews: {e}")
            return []
    
    async def scrape_baidu_hot(self) -> List[Dict[str, Any]]:
        """百度热搜 - 综合热点"""
        try:
            url = 'https://top.baidu.com/board?tab=realtime'
            
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                html = await response.text()
            
            soup = BeautifulSoup(html, 'html.parser')
            items = []
            
            # 查找热搜列表
            hot_items = soup.find_all('div', class_='c-single-text-ellipsis')[:30]
            
            for idx, item in enumerate(hot_items, 1):
                title = item.text.strip()
                if title:
                    # 查找对应的链接
                    parent = item.find_parent('a')
                    href = parent.get('href', '') if parent else ''
                    
                    # 查找热度值
                    heat_elem = item.find_parent('div', class_='content_1YWBm')
                    if heat_elem:
                        heat_text = heat_elem.find('div', class_='hot-index_1Bl1a')
                        heat_value = heat_text.text.strip() if heat_text else '0'
                    else:
                        heat_value = '0'
                    
                    items.append({
                        'rank': idx,
                        'title': title,
                        'url': f"https://www.baidu.com/s?wd={title}",
                        'heat_value': heat_value,
                        'platform': '百度',
                        'category': '综合热搜',
                        'extra': {}
                    })
            
            logger.info(f"Got {len(items)} items from Baidu")
            return items
            
        except Exception as e:
            logger.error(f"Error scraping Baidu: {e}")
            return []
    
    async def scrape_36kr_hot(self) -> List[Dict[str, Any]]:
        """36氪热榜 - 创业科技热点"""
        try:
            url = 'https://www.36kr.com/hot-list/catalog'
            
            async with self.session.get(url) as response:
                if response.status != 200:
                    return []
                html = await response.text()
            
            soup = BeautifulSoup(html, 'html.parser')
            items = []
            
            # 查找热榜文章
            articles = soup.find_all('a', class_='article-item-title')[:20]
            
            for idx, article in enumerate(articles, 1):
                title = article.text.strip()
                href = article.get('href', '')
                
                if title:
                    items.append({
                        'rank': idx,
                        'title': title,
                        'url': f"https://www.36kr.com{href}" if href.startswith('/') else href,
                        'heat_value': 0,
                        'platform': '36氪',
                        'category': '创业科技',
                        'extra': {}
                    })
            
            logger.info(f"Got {len(items)} items from 36kr")
            return items
            
        except Exception as e:
            logger.error(f"Error scraping 36kr: {e}")
            return []
    
    def normalize_data(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """标准化数据格式"""
        normalized = []
        
        for item in items:
            # 处理热度值
            heat_value = item.get('heat_value', 0)
            if isinstance(heat_value, str):
                # 提取数字
                import re
                numbers = re.findall(r'\d+', heat_value.replace('万', '0000').replace('亿', '00000000'))
                heat_value = int(numbers[0]) if numbers else 0
            
            normalized_item = {
                'platform': item.get('platform', 'unknown'),
                'category': item.get('category', 'unknown'),
                'rank': item.get('rank', 0),
                'title': item.get('title', ''),
                'url': item.get('url', ''),
                'heat_value': heat_value,
                'fetch_time': datetime.utcnow().isoformat() + 'Z',
                'extra': item.get('extra', {})
            }
            
            normalized.append(normalized_item)
        
        return normalized
    
    async def get_all_hot_news(self) -> Dict[str, Any]:
        """获取所有热点新闻"""
        results = {
            'status': 'success',
            'timestamp': datetime.utcnow().isoformat() + 'Z',
            'sources': {},
            'all': [],
            'summary': {
                'total_sources': 0,
                'successful': 0,
                'total_items': 0
            }
        }
        
        # 定义数据源
        sources = [
            ('juejin', self.fetch_juejin_hot()),
            ('hackernews', self.fetch_hackernews_hot()),
            ('baidu', self.scrape_baidu_hot()),
            ('36kr', self.scrape_36kr_hot())
        ]
        
        # 并发获取
        for source_name, task in sources:
            try:
                items = await task
                normalized = self.normalize_data(items)
                
                results['sources'][source_name] = normalized
                results['all'].extend(normalized)
                
                if len(normalized) > 0:
                    results['summary']['successful'] += 1
                    results['summary']['total_items'] += len(normalized)
                    logger.info(f"{source_name}: {len(normalized)} items")
                else:
                    logger.warning(f"{source_name}: 0 items")
                    
            except Exception as e:
                logger.error(f"Error processing {source_name}: {e}")
                results['sources'][source_name] = []
            
            results['summary']['total_sources'] += 1
        
        # 按热度排序
        results['all'].sort(key=lambda x: x.get('heat_value', 0), reverse=True)
        
        return results
    
    async def get_multi_platform_hot(self, platforms: List[str] = None) -> Dict[str, List[Dict[str, Any]]]:
        """兼容 Tophub 接口格式"""
        all_data = await self.get_all_hot_news()
        
        if not platforms:
            return all_data['sources']
        
        result = {}
        for platform in platforms:
            if platform in all_data['sources']:
                result[platform] = all_data['sources'][platform]
            else:
                result[platform] = []
        
        return result


async def main():
    """测试函数"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    async with ReliableHotService() as service:
        print("\n=== 可靠热点服务测试 ===\n")
        
        # 获取所有热点
        results = await service.get_all_hot_news()
        
        # 显示摘要
        summary = results['summary']
        print(f"数据源: {summary['successful']}/{summary['total_sources']} 成功")
        print(f"总数据: {summary['total_items']} 条\n")
        
        # 显示各平台数据
        for source, items in results['sources'].items():
            if items:
                print(f"{source}: {len(items)} 条")
                for item in items[:3]:
                    print(f"  {item['rank']}. {item['title'][:40]}...")
                print()
        
        # 显示综合热点 Top 10
        print("=== 综合热点 Top 10 ===")
        for idx, item in enumerate(results['all'][:10], 1):
            print(f"{idx}. [{item['platform']}] {item['title'][:40]} (热度: {item['heat_value']})")
        
        return summary['successful'] > 0


if __name__ == "__main__":
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")