"""
真实数据热点服务 - 只获取真实数据，不使用模拟
"""
import asyncio
import json
import logging
import re
from datetime import datetime
from typing import Dict, Any, List, Optional
import aiohttp
from bs4 import BeautifulSoup

logger = logging.getLogger(__name__)


class RealHotService:
    """真实热点数据服务"""
    
    def __init__(self):
        self.session = None
        self.timeout = aiohttp.ClientTimeout(total=10)
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9'
        }
    
    async def __aenter__(self):
        self.session = aiohttp.ClientSession(timeout=self.timeout)
        return self
    
    async def __aexit__(self, *args):
        if self.session:
            await self.session.close()
    
    async def fetch_tophub_page(self, url: str, platform: str) -> List[Dict[str, Any]]:
        """通用的今日热榜页面解析"""
        hot_list = []
        try:
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    # 查找表格中的所有行
                    rows = soup.select('table.table tr') or soup.select('table tr')
                    
                    for idx, row in enumerate(rows[1:], 1):  # 跳过表头
                        # 查找标题所在的td
                        td = row.select_one('td.al') or row.select_one('td:nth-child(2)')
                        if td:
                            # 提取链接和标题
                            a_tag = td.find('a')
                            if a_tag:
                                title = a_tag.get_text(strip=True)
                                href = a_tag.get('href', '')
                            else:
                                title = td.get_text(strip=True)
                                href = ''
                            
                            # 清理标题
                            title = re.sub(r'\d+\.?\s*', '', title, 1)  # 去掉开头的序号
                            title = title.split('\n')[0].strip()  # 只取第一行
                            
                            # 提取热度值（如果有）
                            heat_text = row.get_text()
                            heat_match = re.search(r'(\d+(?:\.\d+)?)\s*[万亿]', heat_text)
                            heat_value = 0
                            if heat_match:
                                num = float(heat_match.group(1))
                                unit = heat_match.group(0)
                                if '亿' in unit:
                                    heat_value = int(num * 100000000)
                                elif '万' in unit:
                                    heat_value = int(num * 10000)
                                else:
                                    heat_value = int(num)
                            else:
                                heat_value = 1000000 - idx * 10000  # 默认热度
                            
                            if title and len(title) > 2:
                                hot_list.append({
                                    'rank': idx,
                                    'title': title,
                                    'url': href if href.startswith('http') else f"https://tophub.today{href}",
                                    'source': platform,
                                    'platform': platform,
                                    'heat_value': heat_value,
                                    'fetch_time': datetime.now().isoformat()
                                })
                        
                        if idx >= 50:  # 最多50条
                            break
                            
        except Exception as e:
            logger.error(f"获取{platform}失败: {e}")
        
        return hot_list
    
    # ========== 微博热搜 ==========
    async def fetch_weibo(self) -> List[Dict[str, Any]]:
        """微博热搜 - 多个备用源"""
        # 尝试多个页面
        urls = [
            "https://tophub.today/n/KqndgxeLl9",  # 微博热搜
            "https://tophub.today/n/KMZd7VOvrO"   # 备用
        ]
        
        for url in urls:
            result = await self.fetch_tophub_page(url, 'weibo')
            if result:
                return result
        
        return []
    
    # ========== 知乎热榜 ==========
    async def fetch_zhihu(self) -> List[Dict[str, Any]]:
        """知乎热榜"""
        return await self.fetch_tophub_page("https://tophub.today/n/mproPpoq6O", 'zhihu')
    
    # ========== 今日头条 ==========
    async def fetch_toutiao(self) -> List[Dict[str, Any]]:
        """今日头条"""
        urls = [
            "https://tophub.today/n/x9ozqX7eXb",
            "https://tophub.today/n/0MdKam4ow1"
        ]
        
        for url in urls:
            result = await self.fetch_tophub_page(url, 'toutiao')
            if result:
                return result
        
        return []
    
    # ========== 百度热搜 ==========
    async def fetch_baidu(self) -> List[Dict[str, Any]]:
        """百度热搜"""
        return await self.fetch_tophub_page("https://tophub.today/n/Jb0vmloB1G", 'baidu')
    
    # ========== B站热门 ==========
    async def fetch_bilibili(self) -> List[Dict[str, Any]]:
        """B站热门"""
        hot_list = []
        
        # 方案1: 使用B站热搜API
        try:
            url = "https://s.search.bilibili.com/main/hotword"
            async with self.session.get(url) as response:
                if response.status == 200:
                    data = await response.json()
                    if data.get('code') == 0:
                        for idx, item in enumerate(data.get('list', [])[:50], 1):
                            hot_list.append({
                                'rank': idx,
                                'title': item.get('keyword', ''),
                                'url': f"https://search.bilibili.com/all?keyword={item.get('keyword', '')}",
                                'source': 'bilibili',
                                'platform': 'video',
                                'heat_value': item.get('heat', 0) or (51 - idx) * 10000,
                                'fetch_time': datetime.now().isoformat()
                            })
                        return hot_list
        except Exception as e:
            logger.error(f"B站API失败: {e}")
        
        # 方案2: 今日热榜
        urls = [
            "https://tophub.today/n/74VvMzeGmN",
            "https://tophub.today/n/bqGqAKqled"
        ]
        
        for url in urls:
            result = await self.fetch_tophub_page(url, 'bilibili')
            if result:
                return result
        
        return []
    
    # ========== 抖音热榜 ==========
    async def fetch_douyin(self) -> List[Dict[str, Any]]:
        """抖音热榜"""
        hot_list = []
        
        # 抖音API
        try:
            url = "https://www.douyin.com/aweme/v1/web/hot/search/list/"
            headers = {
                **self.headers,
                'Referer': 'https://www.douyin.com/',
                'Cookie': 'ttwid=1'
            }
            
            async with self.session.get(url, headers=headers) as response:
                if response.status == 200:
                    data = await response.json()
                    word_list = data.get('data', {}).get('word_list', [])
                    for idx, item in enumerate(word_list[:50], 1):
                        hot_list.append({
                            'rank': idx,
                            'title': item.get('word', ''),
                            'url': f"https://www.douyin.com/search/{item.get('word', '')}",
                            'source': 'douyin',
                            'platform': 'short_video',
                            'heat_value': item.get('hot_value', 0),
                            'fetch_time': datetime.now().isoformat()
                        })
                    
                    if hot_list:
                        return hot_list
        except Exception as e:
            logger.error(f"抖音API失败: {e}")
        
        # 备用：今日热榜
        return await self.fetch_tophub_page("https://tophub.today/n/DpQvNABoNE", 'douyin')
    
    # ========== 小红书热门 ==========
    async def fetch_xiaohongshu(self) -> List[Dict[str, Any]]:
        """小红书热门"""
        urls = [
            "https://tophub.today/n/L4MdAV7j20",
            "https://tophub.today/n/WnBe01jpxE",
            "https://tophub.today/n/7gdx9z1YWy"
        ]
        
        for url in urls:
            result = await self.fetch_tophub_page(url, 'xiaohongshu')
            if result:
                return result
        
        return []
    
    # ========== GitHub趋势 ==========
    async def fetch_github(self) -> List[Dict[str, Any]]:
        """GitHub趋势（额外）"""
        hot_list = []
        
        try:
            url = "https://api.github.com/search/repositories"
            params = {
                'q': 'stars:>1000',
                'sort': 'stars',
                'order': 'desc',
                'per_page': 30
            }
            
            async with self.session.get(url, params=params) as response:
                if response.status == 200:
                    data = await response.json()
                    for idx, repo in enumerate(data.get('items', [])[:30], 1):
                        hot_list.append({
                            'rank': idx,
                            'title': f"{repo['full_name']} - {repo.get('description', '')[:50]}",
                            'url': repo['html_url'],
                            'source': 'github',
                            'platform': 'tech',
                            'heat_value': repo['stargazers_count'],
                            'language': repo.get('language', ''),
                            'fetch_time': datetime.now().isoformat()
                        })
        except Exception as e:
            logger.error(f"GitHub失败: {e}")
        
        return hot_list
    
    async def fetch_all(self) -> Dict[str, Any]:
        """并发获取所有平台数据"""
        tasks = {
            'weibo': self.fetch_weibo(),
            'zhihu': self.fetch_zhihu(),
            'toutiao': self.fetch_toutiao(),
            'baidu': self.fetch_baidu(),
            'bilibili': self.fetch_bilibili(),
            'douyin': self.fetch_douyin(),
            'xiaohongshu': self.fetch_xiaohongshu(),
            'github': self.fetch_github()
        }
        
        results = {}
        for name, task in tasks.items():
            try:
                logger.info(f"正在获取 {name}...")
                result = await task
                results[name] = result
                if result:
                    logger.info(f"{name}: 成功获取 {len(result)} 条数据")
                else:
                    logger.warning(f"{name}: 未获取到数据")
            except Exception as e:
                logger.error(f"{name} 异常: {e}")
                results[name] = []
        
        # 统计
        total = sum(len(items) for items in results.values())
        successful = sum(1 for items in results.values() if items)
        
        return {
            'data': results,
            'stats': {
                'total_items': total,
                'successful_platforms': successful,
                'failed_platforms': len(results) - successful,
                'total_platforms': len(results),
                'timestamp': datetime.now().isoformat()
            }
        }


async def main():
    """测试"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    async with RealHotService() as service:
        print("\n=== 真实热点数据获取 ===\n")
        print("正在获取各平台数据（只获取真实数据，不使用模拟）...")
        
        result = await service.fetch_all()
        
        print("\n=== 获取结果 ===\n")
        
        for platform, items in result['data'].items():
            if items:
                print(f"✅ {platform.upper()}: {len(items)} 条真实数据")
                # 显示前3条
                for item in items[:3]:
                    title = item['title'][:50] + '...' if len(item['title']) > 50 else item['title']
                    print(f"   [{item['rank']}] {title}")
            else:
                print(f"❌ {platform.upper()}: 0 条（获取失败）")
            print()
        
        stats = result['stats']
        print(f"=== 统计 ===")
        print(f"成功平台: {stats['successful_platforms']}/{stats['total_platforms']}")
        print(f"失败平台: {stats['failed_platforms']}")
        print(f"总数据量: {stats['total_items']} 条真实数据")
        
        # 保存结果
        with open('hot_topics_real.json', 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)
            print(f"\n数据已保存到 hot_topics_real.json")
        
        return stats['successful_platforms'] > 0


if __name__ == "__main__":
    result = asyncio.run(main())
    exit(0 if result else 1)