"""知乎热榜爬虫实现"""
import asyncio
import hashlib
import json
import logging
import re
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional
from urllib.parse import urljoin, urlparse, parse_qs

import aiohttp
from bs4 import BeautifulSoup

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class ZhihuHotSpider(BaseSpider):
    """知乎热榜爬虫"""
    
    # API端点配置
    ZHIHU_HOT_LIST_API = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total"
    ZHIHU_RISING_API = "https://www.zhihu.com/api/v3/feed/topstory/hot-lists/rising"
    ZHIHU_BILLBOARD_API = "https://www.zhihu.com/billboard"  # Billboard页面（无需登录）
    ZHIHU_QUESTION_API = "https://www.zhihu.com/api/v4/questions/{question_id}"
    ZHIHU_ANSWERS_API = "https://www.zhihu.com/api/v4/questions/{question_id}/answers"
    ZHIHU_COMMENTS_API = "https://www.zhihu.com/api/v4/answers/{answer_id}/root_comments"
    
    # 限流配置（毫秒）
    RATE_LIMIT = 2000  # 2秒间隔
    MAX_CONCURRENT_REQUESTS = 3
    
    def __init__(self, mongodb_client=None, redis_client=None):
        """初始化知乎爬虫
        
        Args:
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流）
        """
        super().__init__(name="ZhihuHotSpider")
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
        # 更新请求头，模拟真实浏览器
        self.headers.update({
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Referer': 'https://www.zhihu.com/hot',
            'Origin': 'https://www.zhihu.com',
            'X-Requested-With': 'XMLHttpRequest',
            'X-Zse-93': '101_3_3.0',
            'X-Zse-96': '2.0',
            'Cookie': 'd_c0=AKCTdX_B-xiPTpx_nYsz0X6H3d7oPKv9Gj0=|1700000000;'
        })
    
    async def fetch_with_rate_limit(self, url: str, **kwargs) -> Optional[str]:
        """带限流的请求方法
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            响应内容，失败返回None
        """
        async with self.request_semaphore:
            # 实现速率限制
            current_time = asyncio.get_event_loop().time()
            time_since_last = (current_time - self.last_request_time) * 1000
            if time_since_last < self.RATE_LIMIT:
                await asyncio.sleep((self.RATE_LIMIT - time_since_last) / 1000)
            
            self.last_request_time = asyncio.get_event_loop().time()
            return await self.fetch(url, **kwargs)
    
    async def fetch_hot_list_from_billboard(self) -> List[Dict[str, Any]]:
        """从Billboard页面获取热榜（无需登录）
        
        Returns:
            热榜问题列表
        """
        url = self.ZHIHU_BILLBOARD_API
        
        try:
            # 使用普通请求头
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br'
            }
            
            response_text = await self.fetch(url, headers=headers)
            if not response_text:
                logger.error("无法获取Billboard页面")
                return []
            
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response_text, 'html.parser')
            hot_list = []
            
            # 查找热榜项目 - 知乎热榜通常在script标签中的JSON数据
            scripts = soup.find_all('script', type='text/json')
            
            for script in scripts:
                try:
                    data = json.loads(script.string)
                    # 查找热榜数据
                    if 'hotList' in str(data):
                        # 解析热榜数据结构
                        hot_items = self._extract_hot_items_from_json(data)
                        hot_list.extend(hot_items)
                except:
                    continue
            
            # 如果没找到JSON数据，尝试解析HTML结构
            if not hot_list:
                hot_list = self._parse_billboard_html(soup)
            
            logger.info(f"从Billboard获取热榜 {len(hot_list)} 条")
            return hot_list
            
        except Exception as e:
            logger.error(f"获取Billboard热榜失败: {e}")
            return []
    
    def _extract_hot_items_from_json(self, data: dict) -> List[Dict[str, Any]]:
        """从JSON数据中提取热榜项目
        
        Args:
            data: JSON数据
            
        Returns:
            热榜项目列表
        """
        hot_items = []
        # 递归查找热榜数据
        if isinstance(data, dict):
            for key, value in data.items():
                if key in ['hotList', 'data', 'list']:
                    if isinstance(value, list):
                        for idx, item in enumerate(value):
                            if isinstance(item, dict):
                                hot_item = self._parse_hot_item(item, idx + 1)
                                if hot_item:
                                    hot_items.append(hot_item)
                elif isinstance(value, (dict, list)):
                    hot_items.extend(self._extract_hot_items_from_json(value))
        elif isinstance(data, list):
            for item in data:
                hot_items.extend(self._extract_hot_items_from_json(item))
        return hot_items
    
    def _parse_hot_item(self, item: dict, rank: int) -> Optional[Dict[str, Any]]:
        """解析单个热榜项目
        
        Args:
            item: 热榜项目数据
            rank: 排名
            
        Returns:
            解析后的热榜项目
        """
        try:
            # 尝试不同的数据结构
            question_id = item.get('id') or item.get('questionId') or item.get('target', {}).get('id')
            title = item.get('title') or item.get('question', {}).get('title') or item.get('target', {}).get('title')
            
            if not (question_id and title):
                return None
            
            return {
                'question_id': str(question_id),
                'title': title,
                'rank': rank,
                'heat_value': item.get('hotScore', '') or item.get('score', '') or item.get('heat', ''),
                'url': f"https://www.zhihu.com/question/{question_id}",
                'excerpt': item.get('excerpt', '') or item.get('description', ''),
                'updated_time': datetime.now()
            }
        except:
            return None
    
    def _parse_billboard_html(self, soup: BeautifulSoup) -> List[Dict[str, Any]]:
        """从HTML页面解析热榜
        
        Args:
            soup: BeautifulSoup对象
            
        Returns:
            热榜项目列表
        """
        hot_list = []
        
        # 查找热榜容器
        hot_containers = soup.find_all('div', class_=['HotList-list', 'HotItem'])
        
        for idx, container in enumerate(hot_containers):
            # 提取链接
            link = container.find('a', href=True)
            if not link:
                continue
            
            # 提取问题ID
            href = link.get('href', '')
            question_id_match = re.search(r'/question/(\d+)', href)
            if not question_id_match:
                continue
            
            question_id = question_id_match.group(1)
            
            # 提取标题
            title_elem = container.find(['h2', 'h3']) or link
            title = title_elem.get_text(strip=True)
            
            # 提取热度
            heat_elem = container.find(['span', 'div'], class_=['HotItem-metrics', 'HotItem-score'])
            heat_value = heat_elem.get_text(strip=True) if heat_elem else ''
            
            hot_item = {
                'question_id': question_id,
                'title': title,
                'rank': idx + 1,
                'heat_value': heat_value,
                'url': f"https://www.zhihu.com/question/{question_id}",
                'updated_time': datetime.now()
            }
            
            hot_list.append(hot_item)
        
        return hot_list
    
    async def fetch_hot_list(self, list_type: str = 'total') -> List[Dict[str, Any]]:
        """获取热榜列表
        
        Args:
            list_type: 榜单类型 ('total'=总榜, 'rising'=上升榜)
            
        Returns:
            热榜问题列表
        """
        url = self.ZHIHU_HOT_LIST_API if list_type == 'total' else self.ZHIHU_RISING_API
        
        try:
            response_text = await self.fetch_with_rate_limit(url)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            hot_list = []
            
            for idx, item in enumerate(data.get('data', [])):
                target = item.get('target', {})
                
                # 提取问题ID
                question_id = target.get('id')
                if not question_id:
                    continue
                
                hot_item = {
                    'question_id': str(question_id),
                    'title': target.get('title', ''),
                    'rank': idx + 1,
                    'heat_value': item.get('detail_text', '').replace('热度', '').strip(),
                    'list_type': list_type,
                    'url': f"https://www.zhihu.com/question/{question_id}",
                    'excerpt': target.get('excerpt', ''),
                    'created': target.get('created', 0),
                    'updated_time': datetime.now()
                }
                hot_list.append(hot_item)
            
            logger.info(f"获取{list_type}热榜 {len(hot_list)} 条")
            return hot_list
            
        except Exception as e:
            logger.error(f"获取热榜失败: {e}")
            return []
    
    async def fetch_question_detail(self, question_id: str) -> Optional[Dict[str, Any]]:
        """获取问题详情
        
        Args:
            question_id: 问题ID
            
        Returns:
            问题详情
        """
        url = self.ZHIHU_QUESTION_API.format(question_id=question_id)
        
        try:
            response_text = await self.fetch_with_rate_limit(url)
            if not response_text:
                return None
            
            data = json.loads(response_text)
            
            return {
                'question_id': str(data.get('id', '')),
                'title': data.get('title', ''),
                'description': data.get('detail', ''),
                'follower_count': data.get('follower_count', 0),
                'answer_count': data.get('answer_count', 0),
                'view_count': data.get('visit_count', 0),
                'topics': [topic.get('name', '') for topic in data.get('topics', [])],
                'created_time': data.get('created', 0),
                'updated_time': data.get('updated_time', 0)
            }
            
        except Exception as e:
            logger.error(f"获取问题详情失败 {question_id}: {e}")
            return None
    
    async def fetch_answers(self, question_id: str, limit: int = 5) -> List[Dict[str, Any]]:
        """获取高赞回答
        
        Args:
            question_id: 问题ID
            limit: 获取回答数量
            
        Returns:
            回答列表
        """
        url = self.ZHIHU_ANSWERS_API.format(question_id=question_id)
        params = {
            'include': 'data[*].content,voteup_count,comment_count,created_time',
            'offset': 0,
            'limit': limit,
            'sort_by': 'default'  # 按赞同数排序
        }
        
        try:
            response_text = await self.fetch_with_rate_limit(url, params=params)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            answers = []
            
            for answer in data.get('data', []):
                # 处理作者信息脱敏
                author = answer.get('author', {})
                author_name = author.get('name', 'anonymous')
                author_hash = self.generate_hash(author_name)
                
                answer_data = {
                    'answer_id': str(answer.get('id', '')),
                    'content': answer.get('content', ''),
                    'author_hash': author_hash,
                    'author_type': author.get('type', 'people'),
                    'voteup_count': answer.get('voteup_count', 0),
                    'comment_count': answer.get('comment_count', 0),
                    'created_time': answer.get('created_time', 0),
                    'updated_time': answer.get('updated_time', 0)
                }
                
                # 提取图片URL
                images = self.extract_images(answer.get('content', ''))
                if images:
                    answer_data['images'] = images
                
                answers.append(answer_data)
            
            logger.info(f"获取问题 {question_id} 的 {len(answers)} 个回答")
            return answers
            
        except Exception as e:
            logger.error(f"获取回答失败 {question_id}: {e}")
            return []
    
    async def fetch_comments(self, answer_id: str, limit: int = 10) -> List[Dict[str, Any]]:
        """获取回答的评论
        
        Args:
            answer_id: 回答ID
            limit: 获取评论数量
            
        Returns:
            评论列表
        """
        url = self.ZHIHU_COMMENTS_API.format(answer_id=answer_id)
        params = {
            'order': 'normal',
            'limit': limit,
            'offset': 0,
            'status': 'open'
        }
        
        try:
            response_text = await self.fetch_with_rate_limit(url, params=params)
            if not response_text:
                return []
            
            data = json.loads(response_text)
            comments = []
            
            for comment in data.get('data', []):
                author = comment.get('author', {}).get('member', {})
                author_name = author.get('name', 'anonymous')
                author_hash = self.generate_hash(author_name)
                
                comment_data = {
                    'comment_id': str(comment.get('id', '')),
                    'content': comment.get('content', ''),
                    'author_hash': author_hash,
                    'vote_count': comment.get('vote_count', 0),
                    'created_time': comment.get('created_time', 0),
                    'reply_to_author': comment.get('reply_to_author', {}).get('member', {}).get('name', '')
                }
                comments.append(comment_data)
            
            return comments
            
        except Exception as e:
            logger.error(f"获取评论失败 {answer_id}: {e}")
            return []
    
    def extract_images(self, html_content: str) -> List[str]:
        """从HTML内容中提取图片URL
        
        Args:
            html_content: HTML内容
            
        Returns:
            图片URL列表
        """
        if not html_content:
            return []
        
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            images = []
            
            # 提取img标签
            for img in soup.find_all('img'):
                src = img.get('data-original') or img.get('src')
                if src:
                    # 转换为高清版本
                    src = src.replace('_b.jpg', '_r.jpg').replace('_s.jpg', '_r.jpg')
                    images.append(src)
            
            # 提取noscript中的图片（知乎特有）
            for noscript in soup.find_all('noscript'):
                noscript_soup = BeautifulSoup(noscript.string or '', 'html.parser')
                for img in noscript_soup.find_all('img'):
                    src = img.get('src')
                    if src:
                        src = src.replace('_b.jpg', '_r.jpg').replace('_s.jpg', '_r.jpg')
                        images.append(src)
            
            # 去重
            images = list(set(images))
            return images
            
        except Exception as e:
            logger.error(f"提取图片失败: {e}")
            return []
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面（这里主要用于直接调用其他方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析后的数据
        """
        # 该方法主要为了兼容基类，实际使用其他专门方法
        return []
    
    async def crawl_hot_topics(self) -> List[Dict[str, Any]]:
        """爬取热榜主流程
        
        Returns:
            热榜数据列表
        """
        all_topics = []
        
        try:
            # 首先尝试使用Billboard页面获取热榜（不需要登录）
            hot_list = await self.fetch_hot_list_from_billboard()
            
            # 如果Billboard失败，尝试API（需要登录）
            if not hot_list:
                logger.info("Billboard获取失败，尝试API方式")
                total_list = await self.fetch_hot_list('total')
                rising_list = await self.fetch_hot_list('rising')
                hot_list = total_list + rising_list
            
            # 合并榜单，去重
            hot_questions = {}
            for item in hot_list:
                if item.get('question_id'):
                    hot_questions[item['question_id']] = item
            
            logger.info(f"共获取 {len(hot_questions)} 个热榜问题")
            
            # 简化流程，只获取基本信息，不获取详情和回答（避免需要登录）
            for question_id, topic_data in hot_questions.items():
                # 生成URL哈希用于去重
                url_hash = self.generate_hash(topic_data['url'])
                topic_data['url_hash'] = url_hash
                
                # 设置数据来源和时间
                topic_data['source'] = 'zhihu'
                topic_data['fetch_time'] = datetime.now()
                topic_data['expire_at'] = datetime.now() + timedelta(days=30)
                topic_data['platform'] = 'zhihu'
                
                all_topics.append(topic_data)
            
            logger.info(f"处理完成 {len(all_topics)} 个话题")
            return all_topics
            
        except Exception as e:
            logger.error(f"爬取热榜失败: {e}")
            return all_topics
    
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据到MongoDB
        
        Args:
            data: 待存储的数据
            
        Returns:
            存储是否成功
        """
        if not self.mongodb_client:
            logger.warning("MongoDB客户端未配置，跳过存储")
            return False
        
        try:
            db = self.mongodb_client['crawler']
            collection = db['hot_topics']
            
            # 配置索引
            collection.create_index('url_hash', unique=True)
            collection.create_index([('source', 1), ('fetch_time', -1)])
            collection.create_index('heat_value', reverse=True)
            collection.create_index('expire_at', expireAfterSeconds=0)  # TTL索引
            
            # 批量更新或插入
            for item in data:
                # 检查是否需要更新
                existing = collection.find_one({'url_hash': item['url_hash']})
                
                if existing:
                    # 比较热度值决定是否更新
                    if item.get('heat_value', 0) != existing.get('heat_value', 0):
                        collection.update_one(
                            {'url_hash': item['url_hash']},
                            {'$set': item}
                        )
                        logger.info(f"更新问题: {item['title']}")
                else:
                    collection.insert_one(item)
                    logger.info(f"新增问题: {item['title']}")
            
            return True
            
        except Exception as e:
            logger.error(f"存储数据失败: {e}")
            return False
    
    def check_incremental(self, url_hash: str) -> bool:
        """检查是否需要增量更新
        
        Args:
            url_hash: URL哈希值
            
        Returns:
            是否需要更新
        """
        if not self.mongodb_client:
            return True
        
        try:
            db = self.mongodb_client['crawler']
            collection = db['hot_topics']
            
            existing = collection.find_one(
                {'url_hash': url_hash},
                {'heat_value': 1, 'fetch_time': 1}
            )
            
            if not existing:
                return True
            
            # 如果超过6小时，则更新
            time_diff = datetime.now() - existing.get('fetch_time', datetime.min)
            if time_diff.total_seconds() > 6 * 3600:
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"检查增量更新失败: {e}")
            return True


async def main():
    """测试主函数"""
    async with ZhihuHotSpider() as spider:
        # 爬取热榜
        topics = await spider.crawl_hot_topics()
        logger.info(f"爬取完成，共 {len(topics)} 个话题")
        
        # 存储数据（需要配置MongoDB）
        # await spider.store(topics)
        
        # 打印第一个话题的结构
        if topics:
            import json
            print(json.dumps(topics[0], ensure_ascii=False, indent=2, default=str))


    async def crawl_hot(self) -> List[Dict[str, Any]]:
        """统一接口：获取热搜数据"""
        return await self.crawl_hot_topics()

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    asyncio.run(main())