"""
整合多数据源的热点服务
使用多种方式获取各平台热点数据
"""
import asyncio
import json
import logging
import hashlib
import re
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Tuple
import aiohttp
from bs4 import BeautifulSoup

logger = logging.getLogger(__name__)


class IntegratedHotService:
    """整合热点服务 - 综合多种获取方式"""
    
    def __init__(self):
        self.session = None
        self.timeout = aiohttp.ClientTimeout(total=15)
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
        }
    
    async def __aenter__(self):
        self.session = aiohttp.ClientSession(timeout=self.timeout)
        return self
    
    async def __aexit__(self, *args):
        if self.session:
            await self.session.close()
    
    def generate_hash(self, text: str) -> str:
        """生成哈希值用于去重"""
        return hashlib.md5(text.encode()).hexdigest()
    
    # ========== 微博热搜替代方案 ==========
    async def fetch_weibo_alternative(self) -> List[Dict[str, Any]]:
        """获取微博热搜 - 使用第三方聚合平台"""
        hot_list = []
        
        # 方案1: 使用今日热榜聚合站
        try:
            url = "https://tophub.today/n/KqndgxeLl9"  # 今日热榜的微博热搜页
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    items = soup.find_all('tr', class_='')[:50]
                    for idx, item in enumerate(items, 1):
                        title_elem = item.find('a', class_='')
                        if title_elem:
                            hot_item = {
                                'rank': idx,
                                'title': title_elem.get_text(strip=True),
                                'url': f"https://s.weibo.com/weibo?q=%23{title_elem.get_text(strip=True)}%23",
                                'source': 'weibo',
                                'platform': 'social',
                                'heat_value': 51 - idx,
                                'fetch_time': datetime.utcnow().isoformat() + 'Z'
                            }
                            hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"今日热榜获取微博失败: {e}")
        
        # 方案2: 使用AnyKnew聚合平台
        if not hot_list:
            try:
                url = "https://www.anyknew.com/api/v1/sites/weibo/pages"
                async with self.session.get(url) as response:
                    if response.status == 200:
                        data = await response.json()
                        for idx, item in enumerate(data.get('data', {}).get('items', [])[:50], 1):
                            hot_item = {
                                'rank': idx,
                                'title': item.get('title', ''),
                                'url': item.get('url', ''),
                                'source': 'weibo',
                                'platform': 'social',
                                'heat_value': item.get('heat', 0),
                                'fetch_time': datetime.utcnow().isoformat() + 'Z'
                            }
                            hot_list.append(hot_item)
            except Exception as e:
                logger.warning(f"AnyKnew获取微博失败: {e}")
        
        return hot_list
    
    # ========== 知乎热榜替代方案 ==========
    async def fetch_zhihu_alternative(self) -> List[Dict[str, Any]]:
        """获取知乎热榜 - 使用第三方聚合"""
        hot_list = []
        
        # 方案1: 使用今日热榜
        try:
            url = "https://tophub.today/n/mproPpoq6O"  # 今日热榜的知乎热榜页
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    items = soup.find_all('tr', class_='')[:50]
                    for idx, item in enumerate(items, 1):
                        title_elem = item.find('a', class_='')
                        if title_elem:
                            hot_item = {
                                'rank': idx,
                                'title': title_elem.get_text(strip=True),
                                'url': title_elem.get('href', ''),
                                'source': 'zhihu',
                                'platform': 'qa',
                                'heat_value': 51 - idx,
                                'fetch_time': datetime.utcnow().isoformat() + 'Z'
                            }
                            hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"今日热榜获取知乎失败: {e}")
        
        return hot_list
    
    # ========== 今日头条替代方案 ==========
    async def fetch_toutiao_alternative(self) -> List[Dict[str, Any]]:
        """获取今日头条热点 - 使用第三方聚合"""
        hot_list = []
        
        # 方案1: 使用今日热榜
        try:
            url = "https://tophub.today/n/x9ozqX7eXb"  # 今日热榜的头条热榜页
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    items = soup.find_all('tr', class_='')[:50]
                    for idx, item in enumerate(items, 1):
                        title_elem = item.find('a', class_='')
                        if title_elem:
                            hot_item = {
                                'rank': idx,
                                'title': title_elem.get_text(strip=True),
                                'url': title_elem.get('href', ''),
                                'source': 'toutiao',
                                'platform': 'news',
                                'heat_value': 51 - idx,
                                'fetch_time': datetime.utcnow().isoformat() + 'Z'
                            }
                            hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"今日热榜获取头条失败: {e}")
        
        return hot_list
    
    # ========== 百度热搜 ==========
    async def fetch_baidu_hot(self) -> List[Dict[str, Any]]:
        """获取百度热搜 - 使用今日热榜聚合"""
        hot_list = []
        
        # 使用今日热榜的百度热搜页面
        try:
            url = "https://tophub.today/n/Jb0vmloB1G"  # 今日热榜的百度热搜页
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    # 查找热榜表格行
                    table = soup.find('table', class_='table')
                    if table:
                        rows = table.find_all('tr')[1:]  # 跳过表头
                        for idx, row in enumerate(rows[:50], 1):
                            cols = row.find_all('td')
                            if len(cols) >= 2:
                                title_elem = cols[1].find('a')
                                if title_elem:
                                    title = title_elem.get_text(strip=True)
                                    hot_item = {
                                        'rank': idx,
                                        'title': title,
                                        'url': f"https://www.baidu.com/s?wd={title}",
                                        'source': 'baidu',
                                        'platform': 'search',
                                        'heat_value': 51 - idx,
                                        'fetch_time': datetime.utcnow().isoformat() + 'Z'
                                    }
                                    hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"获取百度热搜失败: {e}")
        
        return hot_list
    
    # ========== B站热门 ==========
    async def fetch_bilibili_hot(self) -> List[Dict[str, Any]]:
        """获取B站热门视频 - 使用今日热榜聚合"""
        hot_list = []
        
        # 方案1: 使用今日热榜的B站排行榜页面
        try:
            url = "https://tophub.today/n/74VvMZeGmN"  # 今日热榜的B站全站排行榜
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    # 查找热榜表格行
                    table = soup.find('table', class_='table')
                    if table:
                        rows = table.find_all('tr')[1:]  # 跳过表头
                        for idx, row in enumerate(rows[:30], 1):
                            cols = row.find_all('td')
                            if len(cols) >= 2:
                                title_elem = cols[1].find('a')
                                if title_elem:
                                    title = title_elem.get_text(strip=True)
                                    href = title_elem.get('href', '')
                                    hot_item = {
                                        'rank': idx,
                                        'title': title,
                                        'url': href if href.startswith('http') else f"https://www.bilibili.com{href}",
                                        'source': 'bilibili',
                                        'platform': 'video',
                                        'heat_value': 31 - idx,
                                        'fetch_time': datetime.utcnow().isoformat() + 'Z'
                                    }
                                    hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"获取B站热门失败: {e}")
        
        return hot_list
    
    # ========== 抖音热榜 ==========
    async def fetch_douyin_hot(self) -> List[Dict[str, Any]]:
        """获取抖音热榜"""
        hot_list = []
        
        try:
            # 使用抖音网页版API
            url = "https://www.douyin.com/aweme/v1/web/hot/search/list/"
            headers = {
                **self.headers,
                'Referer': 'https://www.douyin.com/',
                'Cookie': 'ttwid=1'  # 最小化Cookie
            }
            
            async with self.session.get(url, headers=headers) as response:
                if response.status == 200:
                    data = await response.json()
                    word_list = data.get('data', {}).get('word_list', [])
                    for idx, item in enumerate(word_list[:30], 1):
                        hot_item = {
                            'rank': idx,
                            'title': item.get('word', ''),
                            'url': f"https://www.douyin.com/search/{item.get('word', '')}",
                            'source': 'douyin',
                            'platform': 'short_video',
                            'heat_value': item.get('hot_value', 0),
                            'fetch_time': datetime.utcnow().isoformat() + 'Z'
                        }
                        hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"获取抖音热榜失败: {e}")
        
        return hot_list
    
    # ========== 小红书热门 ==========  
    async def fetch_xiaohongshu_hot(self) -> List[Dict[str, Any]]:
        """获取小红书热门 - 使用第三方聚合"""
        hot_list = []
        
        try:
            url = "https://tophub.today/n/L4MdAV7j20"  # 今日热榜的小红书话题榜
            async with self.session.get(url, headers=self.headers) as response:
                if response.status == 200:
                    html = await response.text()
                    soup = BeautifulSoup(html, 'html.parser')
                    
                    # 查找热榜表格行
                    table = soup.find('table', class_='table')
                    if table:
                        rows = table.find_all('tr')[1:]  # 跳过表头
                        for idx, row in enumerate(rows[:30], 1):
                            cols = row.find_all('td')
                            if len(cols) >= 2:
                                title_elem = cols[1].find('a')
                                if title_elem:
                                    title = title_elem.get_text(strip=True)
                                    hot_item = {
                                        'rank': idx,
                                        'title': title,
                                        'url': title_elem.get('href', ''),
                                        'source': 'xiaohongshu',
                                        'platform': 'social',
                                        'heat_value': 31 - idx,
                                        'fetch_time': datetime.utcnow().isoformat() + 'Z'
                                    }
                                    hot_list.append(hot_item)
        except Exception as e:
            logger.warning(f"获取小红书热门失败: {e}")
        
        return hot_list
    
    async def fetch_all_platforms(self) -> Dict[str, Any]:
        """并发获取所有平台数据"""
        tasks = {
            'weibo': self.fetch_weibo_alternative(),
            'zhihu': self.fetch_zhihu_alternative(),
            'toutiao': self.fetch_toutiao_alternative(),
            'baidu': self.fetch_baidu_hot(),
            'bilibili': self.fetch_bilibili_hot(),
            'douyin': self.fetch_douyin_hot(),
            'xiaohongshu': self.fetch_xiaohongshu_hot()
        }
        
        results = {}
        for name, task in tasks.items():
            try:
                logger.info(f"正在获取 {name} 数据...")
                results[name] = await task
                logger.info(f"{name}: 成功获取 {len(results[name])} 条数据")
            except Exception as e:
                logger.error(f"{name} 失败: {e}")
                results[name] = []
        
        # 统计
        total = sum(len(items) for items in results.values())
        successful = sum(1 for items in results.values() if items)
        
        return {
            'data': results,
            'stats': {
                'total_items': total,
                'successful_platforms': successful,
                'total_platforms': len(results),
                'fetch_time': datetime.utcnow().isoformat() + 'Z'
            }
        }
    
    def save_to_json(self, data: Dict[str, Any], filename: str = "hot_topics.json"):
        """保存数据到JSON文件"""
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2, default=str)
        logger.info(f"数据已保存到 {filename}")


async def main():
    """测试主函数"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    async with IntegratedHotService() as service:
        print("=== 开始获取各平台热点数据 ===\n")
        
        result = await service.fetch_all_platforms()
        
        print("\n=== 获取结果汇总 ===\n")
        
        for platform, items in result['data'].items():
            if items:
                print(f"{platform.upper()}: {len(items)} 条")
                # 显示前3条
                for item in items[:3]:
                    title = item['title'][:50] + '...' if len(item['title']) > 50 else item['title']
                    print(f"  [{item['rank']}] {title}")
            else:
                print(f"{platform.upper()}: 获取失败")
            print()
        
        stats = result['stats']
        print(f"=== 统计 ===")
        print(f"成功平台: {stats['successful_platforms']}/{stats['total_platforms']}")
        print(f"总数据量: {stats['total_items']} 条")
        
        # 保存到文件
        service.save_to_json(result, "hot_topics_integrated.json")
        
        return stats['total_items'] > 0


if __name__ == "__main__":
    import asyncio
    result = asyncio.run(main())
    exit(0 if result else 1)