"""微博热搜爬虫"""
import json
import re
import asyncio
import os
from typing import Dict, Any, List, Optional
from datetime import datetime
from urllib.parse import urljoin, quote
import logging

from .base_spider import BaseSpider
from ..utils.anti_crawler import anti_crawler_strategy, weibo_cookie_manager

logger = logging.getLogger(__name__)


class WeiboHotSpider(BaseSpider):
    """微博热搜爬虫
    
    爬取微博热搜榜Top 50，包括详情页和热门评论
    支持API和网页爬虫两种模式
    """
    
    def __init__(self, cookie_pool: List[str] = None, use_api: bool = False):
        """初始化微博爬虫
        
        Args:
            cookie_pool: Cookie池
            use_api: 是否使用API模式（需要配置API密钥）
        """
        super().__init__(name="WeiboHotSpider")
        
        # API模式配置
        self.use_api = use_api
        self.api_key = os.getenv('WEIBO_API_KEY')
        self.api_base = "https://api.weibo.com/2"
        
        # 微博热搜相关URL
        self.hot_search_url = "https://s.weibo.com/top/summary"
        self.detail_url_template = "https://s.weibo.com/weibo?q={keyword}&topic_ad="
        self.comment_api_template = "https://weibo.com/ajax/statuses/buildComments?is_reload=1&id={mid}&is_show_bulletin=2&is_mix=0&count=20"
        
        # Cookie池管理
        if cookie_pool:
            for cookie in cookie_pool:
                weibo_cookie_manager.add_cookie(cookie)
        
        # 尝试从文件加载Cookie
        cookie_file = os.path.join(os.path.dirname(__file__), '..', '..', 'config', 'weibo_cookies.txt')
        weibo_cookie_manager.load_from_file(cookie_file)
        
        # 使用反爬策略的请求头
        self.headers = anti_crawler_strategy.get_headers_with_referer(self.hot_search_url)
        
    def _get_next_cookie(self) -> Optional[str]:
        """获取下一个Cookie
        
        Returns:
            Cookie字符串，如果池为空返回None
        """
        if not self.cookie_pool:
            return None
            
        cookie = self.cookie_pool[self.current_cookie_index]
        self.current_cookie_index = (self.current_cookie_index + 1) % len(self.cookie_pool)
        return cookie
        
    async def fetch(self, url: str, **kwargs) -> Optional[str]:
        """重写fetch方法，添加Cookie和反爬支持
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            页面内容
        """
        # 检查请求频率
        if not anti_crawler_strategy.check_rate_limit('s.weibo.com', max_requests_per_minute=20):
            logger.warning("Rate limit reached, waiting...")
            await asyncio.sleep(60)  # 等待1分钟
        
        # 应用随机延迟
        await anti_crawler_strategy.apply_delay(1, 3)
        
        # 获取Cookie
        cookie = weibo_cookie_manager.get_next_cookie()
        
        # 设置请求头
        if 'headers' not in kwargs:
            kwargs['headers'] = {}
        
        # 使用随机User-Agent
        kwargs['headers']['User-Agent'] = anti_crawler_strategy.get_random_user_agent()
        
        if cookie:
            kwargs['headers']['Cookie'] = cookie
        else:
            logger.warning("No cookie available for Weibo")
        
        # 获取代理
        proxy = anti_crawler_strategy.get_proxy()
        if proxy:
            kwargs['proxy'] = proxy
            
        return await super().fetch(url, **kwargs)
        
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析热搜榜页面
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            热搜数据列表
        """
        if url and "top/summary" in url:
            return await self.parse_hot_search_list(html)
        elif url and "weibo?" in url:
            return await self.parse_detail_page(html, url)
        else:
            logger.warning(f"未知的URL类型: {url}")
            return []
            
    async def parse_hot_search_list(self, html: str) -> List[Dict[str, Any]]:
        """解析热搜榜列表
        
        Args:
            html: 热搜榜HTML
            
        Returns:
            热搜列表数据
        """
        hot_list = []
        
        # 先检查是否获取到有效页面
        if '热搜' not in html and 'realtimehot' not in html:
            logger.warning("页面可能不是热搜页面或需要登录")
            # 尝试另一种解析方式
            from bs4 import BeautifulSoup
            soup = BeautifulSoup(html, 'html.parser')
            
            # 查找热搜表格
            tbody = soup.find('tbody')
            if tbody:
                trs = tbody.find_all('tr')
                for idx, tr in enumerate(trs[1:51], 1):  # 跳过表头，取前50
                    try:
                        # 查找标题链接
                        a_tag = tr.find('a', target='_blank')
                        if not a_tag:
                            continue
                        
                        title = a_tag.get_text(strip=True)
                        href = a_tag.get('href', '')
                        
                        # 查找热度
                        span_tag = tr.find('span')
                        heat_value = span_tag.get_text(strip=True) if span_tag else '0'
                        heat_num = int(re.sub(r'[^\d]', '', heat_value)) if heat_value else 0
                        
                        hot_item = {
                            'rank': idx,
                            'title': title,
                            'url': urljoin("https://s.weibo.com", href),
                            'heat_value': heat_num,
                            'fetch_time': datetime.utcnow().isoformat() + 'Z',
                            'source': 'weibo',
                            'platform': 'weibo'
                        }
                        hot_list.append(hot_item)
                    except Exception as e:
                        logger.debug(f"解析热搜项失败: {e}")
                        continue
            
            if hot_list:
                logger.info(f"使用BeautifulSoup解析出 {len(hot_list)} 条热搜")
                return hot_list
        
        # 使用正则提取热搜数据（微博的HTML结构经常变化）
        # 匹配热搜项：<td class="td-02">...</td>
        pattern = r'<td class="td-01.*?">.*?(\d+).*?</td>.*?<td class="td-02.*?">.*?<a href="(.*?)".*?>(.*?)</a>.*?<span>(.*?)</span>'
        matches = re.findall(pattern, html, re.DOTALL)
        
        for rank, href, title, heat_value in matches[:50]:  # 只取Top 50
            try:
                # 清理标题
                title = re.sub(r'<.*?>', '', title).strip()
                
                # 解析热度值
                heat_str = re.sub(r'[^\d]', '', heat_value)
                heat_num = int(heat_str) if heat_str else 0
                
                # 检查是否是广告或置顶
                is_ad = '荐' in html[html.find(href)-100:html.find(href)]
                is_pinned = '置顶' in title or rank == '•'
                
                # 构造详情页URL
                keyword = quote(title)
                detail_url = self.detail_url_template.format(keyword=keyword)
                
                hot_item = {
                    'rank': int(rank) if rank.isdigit() else 0,
                    'title': title,
                    'url': urljoin("https://s.weibo.com", href),
                    'detail_url': detail_url,
                    'heat_value': heat_num,
                    'is_ad': is_ad,
                    'is_pinned': is_pinned,
                    'fetch_time': datetime.utcnow().isoformat() + 'Z',
                    'source': 'weibo',
                    'platform': 'weibo'
                }
                
                hot_list.append(hot_item)
                
            except Exception as e:
                logger.error(f"解析热搜项失败: {e}")
                continue
                
        logger.info(f"解析出 {len(hot_list)} 条热搜")
        return hot_list
        
    async def parse_detail_page(self, html: str, url: str) -> Dict[str, Any]:
        """解析热搜详情页
        
        Args:
            html: 详情页HTML
            url: 详情页URL
            
        Returns:
            详情数据
        """
        detail_data = {
            'url': url,
            'description': '',
            'discussion_count': 0,
            'read_count': 0,
            'category': '',
            'tags': [],
            'parse_time': datetime.utcnow().isoformat() + 'Z'
        }
        
        try:
            # 提取话题描述
            desc_pattern = r'<meta name="description" content="(.*?)"'
            desc_match = re.search(desc_pattern, html)
            if desc_match:
                detail_data['description'] = desc_match.group(1)
                
            # 提取讨论数和阅读数
            stats_pattern = r'<span>讨论<em>(\d+\.?\d*[万亿]?)</em></span>.*?<span>阅读<em>(\d+\.?\d*[万亿]?)</em></span>'
            stats_match = re.search(stats_pattern, html, re.DOTALL)
            if stats_match:
                detail_data['discussion_count'] = self._parse_number(stats_match.group(1))
                detail_data['read_count'] = self._parse_number(stats_match.group(2))
                
            # 提取分类标签
            tag_pattern = r'<a class="tag".*?>(.*?)</a>'
            tags = re.findall(tag_pattern, html)
            detail_data['tags'] = [re.sub(r'<.*?>', '', tag).strip() for tag in tags[:5]]
            
            # 推测分类
            if any(tag in detail_data['description'] for tag in ['娱乐', '明星', '电影', '音乐']):
                detail_data['category'] = '娱乐'
            elif any(tag in detail_data['description'] for tag in ['科技', '手机', '互联网', 'AI']):
                detail_data['category'] = '科技'
            elif any(tag in detail_data['description'] for tag in ['体育', '足球', '篮球', '奥运']):
                detail_data['category'] = '体育'
            else:
                detail_data['category'] = '社会'
                
        except Exception as e:
            logger.error(f"解析详情页失败: {e}")
            
        return detail_data
        
    async def fetch_comments(self, mid: str) -> List[Dict[str, Any]]:
        """获取热门评论
        
        Args:
            mid: 微博ID
            
        Returns:
            评论列表
        """
        comments = []
        url = self.comment_api_template.format(mid=mid)
        
        try:
            response_text = await self.fetch(url)
            if not response_text:
                return comments
                
            data = json.loads(response_text)
            
            for comment in data.get('data', [])[:20]:  # 只取前20条
                comment_item = {
                    'author': comment.get('user', {}).get('screen_name', ''),
                    'content': comment.get('text_raw', ''),
                    'likes': comment.get('like_counts', 0),
                    'time': comment.get('created_at', ''),
                    'comment_id': comment.get('id', '')
                }
                comments.append(comment_item)
                
        except Exception as e:
            logger.error(f"获取评论失败: {e}")
            
        return comments
        
    def _parse_number(self, text: str) -> int:
        """解析中文数字
        
        Args:
            text: 包含数字的文本，如 "1.2万"
            
        Returns:
            整数值
        """
        try:
            if '亿' in text:
                return int(float(text.replace('亿', '')) * 100000000)
            elif '万' in text:
                return int(float(text.replace('万', '')) * 10000)
            else:
                return int(float(text))
        except (ValueError, TypeError, AttributeError) as e:
            logger.debug(f"解析数字失败: {text}, 错误: {e}")
            return 0
            
    async def crawl_hot_search(self) -> List[Dict[str, Any]]:
        """爬取完整热搜数据
        
        Returns:
            包含详情和评论的热搜数据
        """
        # 获取热搜榜
        hot_list = await self.crawl(self.hot_search_url)
        
        if not hot_list:
            logger.error("获取热搜榜失败")
            return []
            
        logger.info(f"获取到 {len(hot_list)} 条热搜")
        
        # 获取每个热搜的详情（限制并发数）
        semaphore = asyncio.Semaphore(5)  # 同时最多5个请求
        
        async def fetch_detail_with_limit(item):
            async with semaphore:
                detail_html = await self.fetch(item['detail_url'])
                if detail_html:
                    detail_data = await self.parse_detail_page(detail_html, item['detail_url'])
                    item['content'] = {
                        'description': detail_data['description'],
                        'discussion_count': detail_data['discussion_count'],
                        'read_count': detail_data['read_count'],
                        'metadata': {
                            'category': detail_data['category'],
                            'tags': detail_data['tags'],
                            'is_ad': item.get('is_ad', False),
                            'is_pinned': item.get('is_pinned', False)
                        }
                    }
                    # 添加URL哈希
                    item['url_hash'] = self.generate_hash(item['url'])
                    
                await asyncio.sleep(3)  # 请求间隔3秒
                
        # 并发获取详情
        tasks = [fetch_detail_with_limit(item) for item in hot_list[:10]]  # 先处理前10个
        await asyncio.gather(*tasks, return_exceptions=True)
        
        return hot_list
    
    async def crawl_hot(self) -> List[Dict[str, Any]]:
        """统一接口：获取热搜数据"""
        return await self.crawl_hot_search()


async def main():
    """测试主函数"""
    import logging
    logging.basicConfig(level=logging.INFO)
    
    async with WeiboHotSpider() as spider:
        # 获取热搜
        hot_items = await spider.crawl_hot_search()
        
        if hot_items:
            print(f"\n=== 微博热搜测试结果 ===")
            print(f"获取到 {len(hot_items)} 条热搜")
            
            # 显示前5条
            for item in hot_items[:5]:
                print(f"{item.get('rank', 0)}. {item.get('title', 'N/A')} - 热度: {item.get('heat_value', 'N/A')}")
        else:
            print("未能获取热搜数据")
        
        return len(hot_items) > 0


if __name__ == "__main__":
    import asyncio
    result = asyncio.run(main())
    exit(0 if result else 1)