"""
终极版热点获取服务 - 确保所有平台都能获取数据
使用多种备用方案和降级策略
"""
import asyncio
import json
import logging
import random
from datetime import datetime
from typing import Dict, Any, List, Optional
import aiohttp
from bs4 import BeautifulSoup

logger = logging.getLogger(__name__)


class UltimateHotService:
    """终极热点服务 - 多重备用方案确保数据获取"""
    
    def __init__(self):
        self.session = None
        self.timeout = aiohttp.ClientTimeout(total=8)
        self.headers_list = [
            {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
            },
            {
                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Accept': '*/*',
                'Accept-Language': 'zh-CN,zh;q=0.9'
            }
        ]
    
    async def __aenter__(self):
        self.session = aiohttp.ClientSession(timeout=self.timeout)
        return self
    
    async def __aexit__(self, *args):
        if self.session:
            await self.session.close()
    
    def get_random_headers(self) -> Dict[str, str]:
        """获取随机请求头"""
        return random.choice(self.headers_list)
    
    async def fetch_with_retry(self, url: str, headers: Dict = None, max_retries: int = 2) -> Optional[str]:
        """带重试的请求"""
        headers = headers or self.get_random_headers()
        
        for retry in range(max_retries):
            try:
                async with self.session.get(url, headers=headers) as response:
                    if response.status == 200:
                        return await response.text()
            except Exception as e:
                if retry == max_retries - 1:
                    logger.debug(f"请求失败 {url}: {e}")
        return None
    
    async def fetch_json_with_retry(self, url: str, headers: Dict = None) -> Optional[Dict]:
        """获取JSON数据"""
        text = await self.fetch_with_retry(url, headers)
        if text:
            try:
                return json.loads(text)
            except:
                pass
        return None
    
    # ========== 微博热搜 ==========
    async def fetch_weibo(self) -> List[Dict[str, Any]]:
        """微博热搜 - 多重备用"""
        hot_list = []
        
        # 方案1: 今日热榜
        try:
            url = "https://tophub.today/n/KqndgxeLl9"
            html = await self.fetch_with_retry(url)
            if html:
                soup = BeautifulSoup(html, 'html.parser')
                table = soup.find('table', class_='table')
                if table:
                    for idx, row in enumerate(table.find_all('tr')[1:51], 1):
                        cols = row.find_all('td')
                        if len(cols) >= 2:
                            title = cols[1].get_text(strip=True)
                            if title:
                                hot_list.append({
                                    'rank': idx,
                                    'title': title,
                                    'source': 'weibo',
                                    'heat_value': 51 - idx
                                })
        except:
            pass
        
        # 方案2: 如果失败，使用模拟数据
        if not hot_list:
            hot_topics = [
                "明星热搜话题", "社会热点事件", "娱乐圈动态", "体育赛事", "科技新闻",
                "国际时事", "美食话题", "旅游推荐", "健康养生", "教育热点"
            ]
            for idx, topic in enumerate(hot_topics, 1):
                hot_list.append({
                    'rank': idx,
                    'title': f"微博热搜#{topic}#",
                    'source': 'weibo',
                    'heat_value': 100 - idx * 5,
                    'is_mock': True
                })
        
        return hot_list[:50]
    
    # ========== 知乎热榜 ==========
    async def fetch_zhihu(self) -> List[Dict[str, Any]]:
        """知乎热榜 - 多重备用"""
        hot_list = []
        
        # 方案1: 今日热榜
        try:
            url = "https://tophub.today/n/mproPpoq6O"
            html = await self.fetch_with_retry(url)
            if html:
                soup = BeautifulSoup(html, 'html.parser')
                rows = soup.select('table.table tr')
                if rows:
                    for idx, row in enumerate(rows[1:51], 1):  # 跳过表头
                        # 查找包含标题的td
                        td = row.select_one('td.al') or row.select_one('td:nth-child(2)')
                        if td:
                            # 提取标题文本
                            a_tag = td.find('a')
                            title = a_tag.get_text(strip=True) if a_tag else td.get_text(strip=True)
                            # 清理标题（去掉热度等额外信息）
                            if title and len(title) > 2:
                                title = title.split('\n')[0].strip()
                                hot_list.append({
                                    'rank': idx,
                                    'title': title,
                                    'source': 'zhihu',
                                    'heat_value': 51 - idx
                                })
        except Exception as e:
            logger.debug(f"知乎热榜获取失败: {e}")
        
        # 方案2: 模拟知乎风格问题
        if not hot_list:
            questions = [
                "如何看待最新的社会热点事件？",
                "有哪些值得推荐的生活小技巧？",
                "程序员如何提升技术能力？",
                "2024年有哪些值得关注的行业趋势？",
                "如何平衡工作与生活？"
            ]
            for idx, q in enumerate(questions, 1):
                hot_list.append({
                    'rank': idx,
                    'title': q,
                    'source': 'zhihu',
                    'heat_value': 100 - idx * 10,
                    'is_mock': True
                })
        
        return hot_list[:50]
    
    # ========== 今日头条 ==========
    async def fetch_toutiao(self) -> List[Dict[str, Any]]:
        """今日头条热点"""
        hot_list = []
        
        # 使用今日热榜
        try:
            urls = [
                "https://tophub.today/n/x9ozqX7eXb",
                "https://tophub.today/n/0MdKam4ow1"
            ]
            for url in urls:
                html = await self.fetch_with_retry(url)
                if html:
                    soup = BeautifulSoup(html, 'html.parser')
                    table = soup.find('table', class_='table')
                    if table:
                        for idx, row in enumerate(table.find_all('tr')[1:51], 1):
                            cols = row.find_all('td')
                            if len(cols) >= 2:
                                title = cols[1].get_text(strip=True)
                                if title:
                                    hot_list.append({
                                        'rank': idx,
                                        'title': title,
                                        'source': 'toutiao',
                                        'heat_value': 51 - idx
                                    })
                        if hot_list:
                            break
        except:
            pass
        
        # 备用数据
        if not hot_list:
            news = ["国内要闻", "国际动态", "财经资讯", "科技前沿", "体育快讯"]
            for idx, item in enumerate(news, 1):
                hot_list.append({
                    'rank': idx,
                    'title': f"今日头条: {item}",
                    'source': 'toutiao',
                    'heat_value': 100 - idx * 10,
                    'is_mock': True
                })
        
        return hot_list[:50]
    
    # ========== 百度热搜 ==========
    async def fetch_baidu(self) -> List[Dict[str, Any]]:
        """百度热搜"""
        hot_list = []
        
        # 今日热榜
        try:
            url = "https://tophub.today/n/Jb0vmloB1G"
            html = await self.fetch_with_retry(url)
            if html:
                soup = BeautifulSoup(html, 'html.parser')
                table = soup.find('table', class_='table')
                if table:
                    for idx, row in enumerate(table.find_all('tr')[1:51], 1):
                        cols = row.find_all('td')
                        if len(cols) >= 2:
                            title = cols[1].get_text(strip=True)
                            if title:
                                hot_list.append({
                                    'rank': idx,
                                    'title': title,
                                    'source': 'baidu',
                                    'heat_value': 51 - idx
                                })
        except:
            pass
        
        # 备用
        if not hot_list:
            searches = ["热门搜索1", "热门搜索2", "热门搜索3", "热门搜索4", "热门搜索5"]
            for idx, item in enumerate(searches, 1):
                hot_list.append({
                    'rank': idx,
                    'title': f"百度热搜: {item}",
                    'source': 'baidu',
                    'heat_value': 100 - idx * 10,
                    'is_mock': True
                })
        
        return hot_list[:50]
    
    # ========== B站热门 ==========
    async def fetch_bilibili(self) -> List[Dict[str, Any]]:
        """B站热门 - 多重方案"""
        hot_list = []
        
        # 方案1: B站热搜词API
        try:
            url = "https://s.search.bilibili.com/main/hotword"
            data = await self.fetch_json_with_retry(url)
            if data and data.get('code') == 0:
                for idx, item in enumerate(data.get('list', [])[:30], 1):
                    hot_list.append({
                        'rank': idx,
                        'title': item.get('keyword', ''),
                        'source': 'bilibili',
                        'heat_value': item.get('heat', 0) or (31 - idx) * 1000
                    })
        except:
            pass
        
        # 方案2: 今日热榜备用链接
        if not hot_list:
            try:
                urls = [
                    "https://tophub.today/n/74VvMzeGmN",
                    "https://tophub.today/n/bqGqAKqled"
                ]
                for url in urls:
                    html = await self.fetch_with_retry(url)
                    if html and 'bilibili' in html.lower():
                        soup = BeautifulSoup(html, 'html.parser')
                        items = soup.find_all('a', href=True, limit=30)
                        for idx, item in enumerate(items, 1):
                            title = item.get_text(strip=True)
                            if title and len(title) > 2:
                                hot_list.append({
                                    'rank': idx,
                                    'title': title,
                                    'source': 'bilibili',
                                    'heat_value': 31 - idx
                                })
                        if hot_list:
                            break
            except:
                pass
        
        # 方案3: 生成B站风格数据
        if not hot_list:
            videos = [
                "【必看】2024年度热门视频合集",
                "UP主创作技巧分享",
                "游戏实况精彩时刻",
                "动漫新番推荐",
                "知识区优质内容",
                "生活区vlog精选",
                "科技区硬核评测",
                "音乐区原创作品",
                "舞蹈区热门翻跳",
                "鬼畜区爆笑素材"
            ]
            for idx, title in enumerate(videos, 1):
                hot_list.append({
                    'rank': idx,
                    'title': title,
                    'source': 'bilibili',
                    'heat_value': 100000 - idx * 5000,
                    'is_mock': True
                })
        
        return hot_list[:30]
    
    # ========== 抖音热榜 ==========
    async def fetch_douyin(self) -> List[Dict[str, Any]]:
        """抖音热榜"""
        hot_list = []
        
        # 方案1: 抖音API
        try:
            url = "https://www.douyin.com/aweme/v1/web/hot/search/list/"
            headers = {
                **self.get_random_headers(),
                'Referer': 'https://www.douyin.com/',
                'Cookie': 'ttwid=1'
            }
            data = await self.fetch_json_with_retry(url, headers)
            if data:
                word_list = data.get('data', {}).get('word_list', [])
                for idx, item in enumerate(word_list[:30], 1):
                    hot_list.append({
                        'rank': idx,
                        'title': item.get('word', ''),
                        'source': 'douyin',
                        'heat_value': item.get('hot_value', 0)
                    })
        except:
            pass
        
        # 方案2: 备用数据
        if not hot_list:
            topics = [
                "抖音热门挑战", "明星同款", "美食制作", "搞笑视频",
                "萌宠日常", "旅游打卡", "健身教程", "美妆技巧"
            ]
            for idx, topic in enumerate(topics, 1):
                hot_list.append({
                    'rank': idx,
                    'title': f"#{topic}",
                    'source': 'douyin',
                    'heat_value': 100000 - idx * 5000,
                    'is_mock': True
                })
        
        return hot_list[:30]
    
    # ========== 小红书热门 ==========
    async def fetch_xiaohongshu(self) -> List[Dict[str, Any]]:
        """小红书热门"""
        hot_list = []
        
        # 方案1: 今日热榜多个备用地址
        try:
            urls = [
                "https://tophub.today/n/L4MdAV7j20",
                "https://tophub.today/n/WnBe01jpxE",
                "https://tophub.today/n/7gdx9z1YWy"
            ]
            for url in urls:
                html = await self.fetch_with_retry(url)
                if html and ('小红书' in html or 'xiaohongshu' in html.lower()):
                    soup = BeautifulSoup(html, 'html.parser')
                    table = soup.find('table', class_='table')
                    if table:
                        for idx, row in enumerate(table.find_all('tr')[1:31], 1):
                            cols = row.find_all('td')
                            if len(cols) >= 2:
                                title = cols[1].get_text(strip=True)
                                if title:
                                    hot_list.append({
                                        'rank': idx,
                                        'title': title,
                                        'source': 'xiaohongshu',
                                        'heat_value': 31 - idx
                                    })
                        if hot_list:
                            break
        except:
            pass
        
        # 方案2: 生成小红书风格内容
        if not hot_list:
            topics = [
                "穿搭分享 | 秋冬温柔风",
                "护肤好物 | 敏感肌必备",
                "美食探店 | 隐藏宝藏餐厅",
                "旅行攻略 | 小众目的地",
                "家居好物 | 提升幸福感",
                "健身打卡 | 30天变化",
                "职场穿搭 | 通勤outfit",
                "美妆教程 | 新手化妆",
                "好物分享 | 平价好用",
                "生活vlog | 独居日常"
            ]
            for idx, topic in enumerate(topics, 1):
                hot_list.append({
                    'rank': idx,
                    'title': topic,
                    'source': 'xiaohongshu',
                    'heat_value': 10000 - idx * 500,
                    'is_mock': True
                })
        
        return hot_list[:30]
    
    async def fetch_all(self) -> Dict[str, Any]:
        """并发获取所有平台数据"""
        # 并发执行所有任务
        tasks = await asyncio.gather(
            self.fetch_weibo(),
            self.fetch_zhihu(),
            self.fetch_toutiao(),
            self.fetch_baidu(),
            self.fetch_bilibili(),
            self.fetch_douyin(),
            self.fetch_xiaohongshu(),
            return_exceptions=True
        )
        
        # 处理结果
        platform_names = ['weibo', 'zhihu', 'toutiao', 'baidu', 'bilibili', 'douyin', 'xiaohongshu']
        results = {}
        
        for name, task_result in zip(platform_names, tasks):
            if isinstance(task_result, Exception):
                logger.error(f"{name} 异常: {task_result}")
                # 提供最小备用数据
                results[name] = [{
                    'rank': 1,
                    'title': f"{name}热点数据",
                    'source': name,
                    'heat_value': 100,
                    'is_error': True
                }]
            else:
                results[name] = task_result if task_result else [{
                    'rank': 1,
                    'title': f"{name}热点",
                    'source': name,
                    'heat_value': 100,
                    'is_empty': True
                }]
        
        # 统计
        total = sum(len(items) for items in results.values())
        real_data = sum(
            len([item for item in items if not item.get('is_mock') and not item.get('is_error') and not item.get('is_empty')])
            for items in results.values()
        )
        mock_data = total - real_data
        
        return {
            'data': results,
            'stats': {
                'total_items': total,
                'real_data': real_data,
                'mock_data': mock_data,
                'successful_platforms': len([v for v in results.values() if v]),
                'total_platforms': len(results),
                'timestamp': datetime.now().isoformat()
            }
        }


async def main():
    """测试"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
    
    async with UltimateHotService() as service:
        print("\n=== 终极热点数据获取 ===\n")
        print("正在获取各平台数据...")
        
        result = await service.fetch_all()
        
        print("\n=== 获取结果 ===\n")
        
        for platform, items in result['data'].items():
            # 统计真实和模拟数据
            real = len([i for i in items if not i.get('is_mock') and not i.get('is_error') and not i.get('is_empty')])
            mock = len([i for i in items if i.get('is_mock')])
            
            status = "✅" if real > 0 else "⚠️"
            print(f"{status} {platform.upper()}: {len(items)} 条 (真实:{real}, 模拟:{mock})")
            
            # 显示前3条
            for item in items[:3]:
                title = item['title'][:40] + '...' if len(item['title']) > 40 else item['title']
                mock_flag = " [模拟]" if item.get('is_mock') else ""
                print(f"   [{item['rank']}] {title}{mock_flag}")
            print()
        
        stats = result['stats']
        print(f"=== 统计 ===")
        print(f"总平台数: {stats['total_platforms']}")
        print(f"总数据量: {stats['total_items']} 条")
        print(f"真实数据: {stats['real_data']} 条")
        print(f"模拟数据: {stats['mock_data']} 条")
        
        # 保存结果
        with open('hot_topics_ultimate.json', 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)
            print(f"\n数据已保存到 hot_topics_ultimate.json")
        
        return True


if __name__ == "__main__":
    result = asyncio.run(main())
    exit(0 if result else 1)