"""百度热搜爬虫实现"""
import asyncio
import hashlib
import json
import logging
import random
import re
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Tuple
from urllib.parse import quote, unquote, urlparse

import aiohttp
from bs4 import BeautifulSoup

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class BaiduHotSpider(BaseSpider):
    """百度热搜爬虫"""
    
    # 百度热搜端点
    BAIDU_HOT_BOARDS = {
        "realtime": "https://top.baidu.com/board?tab=realtime",  # 实时热点
        "homepage": "https://top.baidu.com/board?tab=homepage",  # 热搜榜
        "novel": "https://top.baidu.com/board?tab=novel",       # 小说榜
        "movie": "https://top.baidu.com/board?tab=movie",       # 电影榜
        "teleplay": "https://top.baidu.com/board?tab=teleplay", # 电视剧榜
        "car": "https://top.baidu.com/board?tab=car",           # 汽车榜
        "game": "https://top.baidu.com/board?tab=game"          # 游戏榜
    }
    
    # 搜索接口
    BAIDU_SEARCH_URL = "https://www.baidu.com/s"
    
    # User-Agent池
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Edge/120.0.0.0',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:121.0) Gecko/20100101 Firefox/121.0',
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0'
    ]
    
    # 分类关键词映射（用于自动分类）
    CATEGORY_KEYWORDS = {
        "娱乐": ["明星", "演员", "歌手", "电影", "电视剧", "综艺", "音乐", "演唱会", "偶像", "粉丝"],
        "社会": ["事件", "案件", "警方", "调查", "受害", "纠纷", "社会", "民生", "公益", "救援"],
        "科技": ["科技", "互联网", "AI", "手机", "电脑", "软件", "应用", "数码", "技术", "创新"],
        "体育": ["体育", "足球", "篮球", "比赛", "冠军", "球队", "运动员", "奥运", "世界杯", "联赛"],
        "财经": ["股票", "基金", "投资", "经济", "金融", "银行", "房价", "公司", "上市", "财报"],
        "国际": ["美国", "俄罗斯", "日本", "韩国", "欧洲", "国际", "外交", "战争", "联合国", "全球"],
        "教育": ["高考", "中考", "大学", "学校", "教育", "学生", "老师", "考试", "招生", "培训"],
        "健康": ["疫情", "病毒", "医院", "医生", "健康", "疾病", "治疗", "疫苗", "防控", "症状"],
        "汽车": ["汽车", "车型", "新能源", "特斯拉", "比亚迪", "驾驶", "车展", "油价", "充电", "自动驾驶"],
        "游戏": ["游戏", "王者", "原神", "英雄联盟", "和平精英", "Steam", "主机", "手游", "电竞", "玩家"]
    }
    
    # 限流配置
    RATE_LIMIT = 2000  # 2秒间隔（百度限制较严）
    MAX_CONCURRENT_REQUESTS = 3  # 最大并发请求数
    UPDATE_INTERVAL = 900  # 15分钟更新间隔（秒）
    
    def __init__(self, mongodb_client=None, redis_client=None):
        """初始化百度爬虫
        
        Args:
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流和缓存）
        """
        super().__init__(name="BaiduHotSpider")
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
    def get_random_user_agent(self) -> str:
        """获取随机User-Agent"""
        return random.choice(self.USER_AGENTS)
    
    async def fetch_with_rate_limit(self, url: str, **kwargs) -> Optional[str]:
        """带限流的请求方法
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            响应内容，失败返回None
        """
        async with self.request_semaphore:
            # 实现速率限制
            current_time = asyncio.get_event_loop().time()
            time_since_last = (current_time - self.last_request_time) * 1000
            
            if time_since_last < self.RATE_LIMIT:
                # 添加随机延迟（0-1秒）
                wait_time = (self.RATE_LIMIT - time_since_last) / 1000 + random.uniform(0, 1)
                await asyncio.sleep(wait_time)
            
            # 更新请求头
            self.headers['User-Agent'] = self.get_random_user_agent()
            self.last_request_time = asyncio.get_event_loop().time()
            
            return await self.fetch(url, **kwargs)
    
    def parse_hot_value(self, hot_text: str) -> Tuple[int, str]:
        """解析热度值
        
        Args:
            hot_text: 热度文本，如"4835万"
            
        Returns:
            (数值, 显示文本)
        """
        if not hot_text:
            return 0, ""
        
        # 去除空白字符
        hot_text = hot_text.strip()
        
        # 提取数字和单位
        match = re.match(r'([\d.]+)([万亿]?)', hot_text)
        if not match:
            return 0, hot_text
        
        num_str, unit = match.groups()
        try:
            num = float(num_str)
            if unit == '万':
                num = int(num * 10000)
            elif unit == '亿':
                num = int(num * 100000000)
            else:
                num = int(num)
            return num, hot_text
        except ValueError:
            return 0, hot_text
    
    def auto_categorize(self, title: str) -> List[str]:
        """基于关键词自动分类
        
        Args:
            title: 热搜标题
            
        Returns:
            分类列表
        """
        categories = []
        for category, keywords in self.CATEGORY_KEYWORDS.items():
            for keyword in keywords:
                if keyword in title:
                    categories.append(category)
                    break
        
        # 如果没有匹配的分类，返回"其他"
        if not categories:
            categories = ["其他"]
        
        return categories
    
    def calculate_trend(self, current_rank: int, history: List[Dict]) -> Tuple[str, float, bool]:
        """计算热度趋势
        
        Args:
            current_rank: 当前排名
            history: 历史记录
            
        Returns:
            (趋势类型, 变化百分比, 是否爆款)
        """
        if not history:
            return "new", 0.0, True  # 新上榜视为爆款
        
        # 获取最近一次的排名
        last_record = history[-1]
        last_rank = last_record.get('rank', 0)
        
        if last_rank == 0:
            return "new", 0.0, True
        
        # 计算排名变化
        rank_change = last_rank - current_rank
        change_percent = abs(rank_change / last_rank * 100)
        
        # 判断趋势
        if rank_change > 5:  # 上升超过5名
            trend = "rise"
            is_explosive = rank_change > 10  # 上升超过10名视为爆款
        elif rank_change < -5:  # 下降超过5名
            trend = "fall"
            is_explosive = False
        else:
            trend = "stable"
            is_explosive = False
        
        return trend, change_percent, is_explosive
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析百度热搜页面
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            热搜数据列表
        """
        soup = BeautifulSoup(html, 'html.parser')
        hot_list = []
        
        # 查找热搜列表容器
        # 百度热搜使用React渲染，需要解析特定的DOM结构
        hot_items = soup.select('.category-wrap_iQLoo .content_1YWBm')
        
        if not hot_items:
            # 尝试其他可能的选择器
            hot_items = soup.select('.theme-hot-list .list-item')
        
        if not hot_items:
            logger.warning("未找到热搜列表，页面结构可能已更改")
            # 尝试从script标签中提取JSON数据
            scripts = soup.find_all('script')
            for script in scripts:
                if script.string and 'hotList' in script.string:
                    # 提取JSON数据
                    match = re.search(r'hotList["\']:\s*(\[.*?\])', script.string, re.DOTALL)
                    if match:
                        try:
                            hot_data = json.loads(match.group(1))
                            return await self.parse_json_data(hot_data)
                        except json.JSONDecodeError:
                            continue
            return []
        
        # 解析每个热搜项
        for idx, item in enumerate(hot_items, 1):
            try:
                # 提取标题
                title_elem = item.select_one('.c-single-text-ellipsis, .title-text')
                if not title_elem:
                    continue
                title = title_elem.get_text(strip=True)
                
                # 提取链接
                link_elem = item.select_one('a')
                href = link_elem.get('href', '') if link_elem else ''
                if not href.startswith('http'):
                    href = f"https://www.baidu.com/s?wd={quote(title)}"
                
                # 提取热度值
                hot_elem = item.select_one('.hot-index, .hot-score, .trend_2RttY')
                hot_value, hot_text = self.parse_hot_value(
                    hot_elem.get_text(strip=True) if hot_elem else str(1000000 - idx * 10000)
                )
                
                # 提取描述（如果有）
                desc_elem = item.select_one('.c-color-text, .desc_3CTjT')
                description = desc_elem.get_text(strip=True) if desc_elem else ""
                
                # 生成URL哈希（用于去重）
                url_hash = self.generate_hash(title)
                
                # 自动分类
                categories = self.auto_categorize(title)
                
                # 构建数据结构
                hot_item = {
                    "_id": url_hash,  # 使用哈希作为_id
                    "url_hash": url_hash,
                    "title": title,
                    "url": href,
                    "rank": idx,
                    "category": "|".join(categories),
                    "source": "baidu",
                    "content": {
                        "search_index": hot_value,
                        "search_index_text": hot_text or f"{hot_value:,}",
                        "trend": "stable",  # 初始设为稳定，后续更新时计算
                        "trend_percent": 0.0,
                        "is_explosive": False,
                        "description": description,
                        "related_searches": [],  # 需要单独请求获取
                        "search_results": [],  # 需要单独请求获取
                        "history": []  # 历史记录
                    },
                    "fetch_time": datetime.now(),
                    "update_time": datetime.now(),
                    "expire_at": datetime.now() + timedelta(days=30)
                }
                
                hot_list.append(hot_item)
                
            except Exception as e:
                logger.error(f"解析热搜项失败: {e}")
                continue
        
        return hot_list
    
    async def parse_json_data(self, hot_data: List[Dict]) -> List[Dict[str, Any]]:
        """解析JSON格式的热搜数据
        
        Args:
            hot_data: JSON格式的热搜数据
            
        Returns:
            标准化的热搜数据列表
        """
        hot_list = []
        
        for idx, item in enumerate(hot_data, 1):
            try:
                title = item.get('word', '').strip()
                if not title:
                    continue
                
                # 生成搜索链接
                url = f"https://www.baidu.com/s?wd={quote(title)}"
                
                # 解析热度值
                hot_value = item.get('hotScore', 0)
                if isinstance(hot_value, str):
                    hot_value, hot_text = self.parse_hot_value(hot_value)
                else:
                    hot_text = f"{hot_value:,}"
                
                # 获取描述
                description = item.get('desc', '')
                
                # 生成URL哈希
                url_hash = self.generate_hash(title)
                
                # 自动分类
                categories = self.auto_categorize(title)
                
                # 获取标签（如"爆"）
                label = item.get('label', '')
                is_explosive = '爆' in label or '新' in label
                
                # 构建数据结构
                hot_item = {
                    "_id": url_hash,
                    "url_hash": url_hash,
                    "title": title,
                    "url": url,
                    "rank": idx,
                    "category": "|".join(categories),
                    "source": "baidu",
                    "content": {
                        "search_index": hot_value,
                        "search_index_text": hot_text,
                        "trend": "new" if is_explosive else "stable",
                        "trend_percent": 0.0,
                        "is_explosive": is_explosive,
                        "description": description,
                        "related_searches": [],
                        "search_results": [],
                        "history": []
                    },
                    "fetch_time": datetime.now(),
                    "update_time": datetime.now(),
                    "expire_at": datetime.now() + timedelta(days=30)
                }
                
                hot_list.append(hot_item)
                
            except Exception as e:
                logger.error(f"解析JSON数据项失败: {e}")
                continue
        
        return hot_list
    
    async def fetch_related_searches(self, keyword: str) -> List[str]:
        """获取相关搜索词
        
        Args:
            keyword: 搜索关键词
            
        Returns:
            相关搜索词列表
        """
        try:
            url = f"{self.BAIDU_SEARCH_URL}?wd={quote(keyword)}"
            html = await self.fetch_with_rate_limit(url)
            
            if not html:
                return []
            
            soup = BeautifulSoup(html, 'html.parser')
            related_searches = []
            
            # 查找相关搜索区域
            rs_elements = soup.select('#rs table a, .rs-link')
            
            for elem in rs_elements[:10]:  # 最多取10个相关搜索
                text = elem.get_text(strip=True)
                if text and text != keyword:
                    related_searches.append(text)
            
            return related_searches
            
        except Exception as e:
            logger.error(f"获取相关搜索失败 {keyword}: {e}")
            return []
    
    async def fetch_search_results(self, keyword: str, limit: int = 3) -> List[Dict[str, str]]:
        """获取搜索结果摘要
        
        Args:
            keyword: 搜索关键词
            limit: 结果数量限制
            
        Returns:
            搜索结果列表
        """
        try:
            url = f"{self.BAIDU_SEARCH_URL}?wd={quote(keyword)}"
            html = await self.fetch_with_rate_limit(url)
            
            if not html:
                return []
            
            soup = BeautifulSoup(html, 'html.parser')
            search_results = []
            
            # 查找搜索结果
            results = soup.select('#content_left .result, .c-container')
            
            for result in results[:limit]:
                try:
                    # 提取标题
                    title_elem = result.select_one('h3 a, .t a')
                    if not title_elem:
                        continue
                    
                    title = title_elem.get_text(strip=True)
                    result_url = title_elem.get('href', '')
                    
                    # 提取摘要
                    snippet_elem = result.select_one('.c-abstract, .c-span-last')
                    snippet = snippet_elem.get_text(strip=True) if snippet_elem else ""
                    
                    # 提取来源
                    source_elem = result.select_one('.c-showurl, .c-color-gray')
                    source = source_elem.get_text(strip=True) if source_elem else "百度"
                    
                    # 过滤广告
                    if '广告' in str(result.get('class', [])):
                        continue
                    
                    search_results.append({
                        "title": title,
                        "snippet": snippet[:200],  # 限制摘要长度
                        "source": source,
                        "url": result_url
                    })
                    
                except Exception as e:
                    logger.debug(f"解析搜索结果项失败: {e}")
                    continue
            
            return search_results
            
        except Exception as e:
            logger.error(f"获取搜索结果失败 {keyword}: {e}")
            return []
    
    async def enrich_hot_item(self, hot_item: Dict[str, Any]) -> Dict[str, Any]:
        """丰富热搜数据（获取相关搜索和搜索结果）
        
        Args:
            hot_item: 热搜项
            
        Returns:
            丰富后的热搜项
        """
        title = hot_item['title']
        
        # 获取相关搜索
        related_searches = await self.fetch_related_searches(title)
        hot_item['content']['related_searches'] = related_searches
        
        # 获取搜索结果摘要
        search_results = await self.fetch_search_results(title)
        hot_item['content']['search_results'] = search_results
        
        return hot_item
    
    async def crawl_hot_board(self, board_type: str = "realtime") -> List[Dict[str, Any]]:
        """爬取指定类型的热搜榜
        
        Args:
            board_type: 榜单类型
            
        Returns:
            热搜数据列表
        """
        if board_type not in self.BAIDU_HOT_BOARDS:
            logger.error(f"不支持的榜单类型: {board_type}")
            return []
        
        url = self.BAIDU_HOT_BOARDS[board_type]
        logger.info(f"开始爬取百度{board_type}榜: {url}")
        
        try:
            html = await self.fetch_with_rate_limit(url)
            if not html:
                logger.error(f"获取{board_type}榜失败")
                return []
            
            # 解析热搜数据
            hot_list = await self.parse(html, url)
            
            # 丰富数据（获取相关搜索和搜索结果）
            enriched_list = []
            for hot_item in hot_list[:10]:  # 只对前10个热搜获取详细信息
                enriched_item = await self.enrich_hot_item(hot_item)
                enriched_list.append(enriched_item)
            
            # 其余的保持基础信息
            enriched_list.extend(hot_list[10:])
            
            logger.info(f"成功爬取{len(enriched_list)}条{board_type}热搜")
            return enriched_list
            
        except Exception as e:
            logger.error(f"爬取{board_type}榜失败: {e}")
            return []
    
    async def update_with_history(self, hot_item: Dict[str, Any]) -> Dict[str, Any]:
        """更新热搜数据并保留历史记录
        
        Args:
            hot_item: 新的热搜数据
            
        Returns:
            更新后的热搜数据
        """
        if not self.mongodb_client:
            return hot_item
        
        try:
            db = self.mongodb_client.aipaper
            collection = db.hot_topics
            
            # 查找已存在的记录
            existing = await collection.find_one({"url_hash": hot_item["url_hash"]})
            
            if existing:
                # 获取历史记录
                history = existing.get("content", {}).get("history", [])
                
                # 添加当前状态到历史
                history.append({
                    "rank": existing.get("rank"),
                    "search_index": existing.get("content", {}).get("search_index", 0),
                    "fetch_time": existing.get("fetch_time")
                })
                
                # 保留最近24小时的历史
                cutoff_time = datetime.now() - timedelta(hours=24)
                history = [h for h in history if h.get("fetch_time", datetime.min) > cutoff_time]
                
                # 计算趋势
                trend, trend_percent, is_explosive = self.calculate_trend(
                    hot_item["rank"], 
                    history
                )
                
                # 更新内容
                hot_item["content"]["history"] = history[-50:]  # 最多保留50条历史
                hot_item["content"]["trend"] = trend
                hot_item["content"]["trend_percent"] = trend_percent
                hot_item["content"]["is_explosive"] = is_explosive
                
                # 如果相关搜索和搜索结果为空，保留原有的
                if not hot_item["content"]["related_searches"] and existing.get("content"):
                    hot_item["content"]["related_searches"] = existing["content"].get("related_searches", [])
                if not hot_item["content"]["search_results"] and existing.get("content"):
                    hot_item["content"]["search_results"] = existing["content"].get("search_results", [])
            
            # 更新或插入数据
            await collection.replace_one(
                {"url_hash": hot_item["url_hash"]},
                hot_item,
                upsert=True
            )
            
        except Exception as e:
            logger.error(f"更新历史记录失败: {e}")
        
        return hot_item
    
    async def check_update_interval(self) -> bool:
        """检查是否需要更新
        
        Returns:
            True表示需要更新，False表示跳过
        """
        if not self.redis_client:
            return True
        
        try:
            last_update_key = "baidu_hot_spider:last_update"
            last_update = await self.redis_client.get(last_update_key)
            
            if last_update:
                last_time = datetime.fromisoformat(last_update.decode())
                if datetime.now() - last_time < timedelta(seconds=self.UPDATE_INTERVAL):
                    logger.info("距离上次更新未满15分钟，跳过本次爬取")
                    return False
            
            # 更新最后爬取时间
            await self.redis_client.set(
                last_update_key, 
                datetime.now().isoformat(),
                ex=self.UPDATE_INTERVAL
            )
            return True
            
        except Exception as e:
            logger.error(f"检查更新间隔失败: {e}")
            return True
    
    async def crawl_all_boards(self) -> Dict[str, List[Dict[str, Any]]]:
        """爬取所有榜单
        
        Returns:
            各榜单数据字典
        """
        # 检查更新间隔
        if not await self.check_update_interval():
            return {}
        
        all_data = {}
        
        # 主要爬取实时热点和热搜榜
        main_boards = ["realtime", "homepage"]
        
        for board_type in main_boards:
            try:
                data = await self.crawl_hot_board(board_type)
                
                # 更新历史记录
                updated_data = []
                for item in data:
                    updated_item = await self.update_with_history(item)
                    updated_data.append(updated_item)
                
                all_data[board_type] = updated_data
                
                # 避免请求过快
                await asyncio.sleep(random.uniform(2, 3))
                
            except Exception as e:
                logger.error(f"爬取{board_type}榜失败: {e}")
                all_data[board_type] = []
        
        return all_data
    
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据到MongoDB
        
        Args:
            data: 待存储的数据
            
        Returns:
            存储是否成功
        """
        if not self.mongodb_client:
            logger.warning("MongoDB客户端未初始化")
            return False
        
        try:
            db = self.mongodb_client.aipaper
            collection = db.hot_topics
            
            # 批量更新或插入
            operations = []
            for item in data:
                operations.append(
                    {
                        "replaceOne": {
                            "filter": {"url_hash": item["url_hash"]},
                            "replacement": item,
                            "upsert": True
                        }
                    }
                )
            
            if operations:
                result = await collection.bulk_write(operations)
                logger.info(f"存储成功: 插入{result.upserted_count}条，更新{result.modified_count}条")
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"存储数据失败: {e}")
            return False


async def main():
    """测试主函数"""
    logging.basicConfig(level=logging.INFO)
    
    async with BaiduHotSpider() as spider:
        # 获取实时热点
        hot_items = await spider.crawl_hot_board('realtime')
        
        if hot_items:
            print(f"\n=== 百度热搜测试结果 ===")
            print(f"获取到 {len(hot_items)} 条热搜")
            
            # 显示前5条
            for item in hot_items[:5]:
                print(f"{item.get('rank', 0)}. {item.get('title', 'N/A')} - 热度: {item.get('heat_value', 'N/A')}")
        else:
            print("未能获取热搜数据")
        
        return len(hot_items) > 0


    async def crawl_hot(self) -> List[Dict[str, Any]]:
        """统一接口：获取热搜数据"""
        return await self.crawl_hot_board()

if __name__ == "__main__":
    result = asyncio.run(main())
    exit(0 if result else 1)