"""小红书热点爬虫实现"""
import asyncio
import hashlib
import json
import logging
import random
import re
import time
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Tuple
from urllib.parse import quote, unquote

import aiohttp
from bs4 import BeautifulSoup

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class XiaohongshuHotSpider(BaseSpider):
    """小红书热点爬虫"""
    
    # API端点配置
    # 注意：小红书API经常变动，这里提供的是常见端点模式
    XHS_EXPLORE_API = "https://edith.xiaohongshu.com/api/sns/v6/homefeed"
    XHS_NOTE_DETAIL_API = "https://edith.xiaohongshu.com/api/sns/v2/note"
    XHS_COMMENTS_API = "https://edith.xiaohongshu.com/api/sns/v5/note/comment/list"
    XHS_TOPIC_API = "https://edith.xiaohongshu.com/api/sns/v1/search/trending"
    
    # Web端点（备用）
    XHS_WEB_EXPLORE = "https://www.xiaohongshu.com/explore"
    
    # User-Agent池（移动端）
    USER_AGENTS = [
        'Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.31(0x18001f37) NetType/WIFI Language/zh_CN',
        'Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1',
        'Mozilla/5.0 (Android 12; Mobile; rv:109.0) Gecko/109.0 Firefox/109.0',
        'Mozilla/5.0 (Linux; Android 12; SM-G991B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Mobile Safari/537.36',
        'Dalvik/2.1.0 (Linux; U; Android 12; Redmi Note 11 Pro Build/SKQ1.211019.001)',
        'okhttp/4.9.1 Discovery/2.0 (Android; 12) Resolution/1080x2400',
        'XiaoHongShu/8.18.0 (Android; 12; SM-G991B) Build/8180400',
        'XiaoHongShu/8.18.0 (iPhone; iOS 16.0; Scale/3.0)',
        'Mozilla/5.0 (Linux; Android 11; Redmi K30 Pro) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/108.0.5359.128 Mobile Safari/537.36 XiaoHongShu/8.15.0',
        'Mozilla/5.0 (iPhone; CPU iPhone OS 15_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 XiaoHongShu/8.16.1'
    ]
    
    # 分类关键词映射
    CATEGORY_KEYWORDS = {
        "美妆": ["美妆", "化妆", "护肤", "口红", "粉底", "眼影", "面膜", "精华", "防晒", "彩妆"],
        "穿搭": ["穿搭", "搭配", "衣服", "裙子", "鞋子", "包包", "配饰", "时尚", "潮流", "OOTD"],
        "美食": ["美食", "美味", "好吃", "食谱", "烹饪", "餐厅", "甜品", "咖啡", "探店", "菜谱"],
        "旅行": ["旅行", "旅游", "景点", "打卡", "攻略", "酒店", "民宿", "行程", "游记", "度假"],
        "健身": ["健身", "运动", "瑜伽", "减肥", "塑形", "跑步", "撸铁", "有氧", "拉伸", "训练"],
        "家居": ["家居", "装修", "设计", "收纳", "家具", "软装", "改造", "布置", "装饰", "好物"],
        "母婴": ["母婴", "宝宝", "育儿", "孕期", "产后", "辅食", "早教", "亲子", "萌娃", "带娃"],
        "宠物": ["宠物", "猫咪", "狗狗", "铲屎官", "萌宠", "养宠", "宠粮", "训练", "医疗", "领养"],
        "学习": ["学习", "笔记", "考研", "英语", "考试", "自习", "背书", "学霸", "效率", "方法"],
        "职场": ["职场", "工作", "面试", "求职", "升职", "加薪", "办公", "技能", "简历", "跳槽"]
    }
    
    # 限流配置
    RATE_LIMIT = 3000  # 3秒间隔
    MAX_CONCURRENT_REQUESTS = 2  # 最大并发请求数
    UPDATE_INTERVAL = 1800  # 30分钟更新间隔（秒）
    
    def __init__(self, mongodb_client=None, redis_client=None):
        """初始化小红书爬虫
        
        Args:
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流和缓存）
        """
        super().__init__(name="XiaohongshuHotSpider")
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
        # 更新请求头，模拟移动端
        self.headers.update({
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Origin': 'https://www.xiaohongshu.com',
            'Referer': 'https://www.xiaohongshu.com/',
            'X-Requested-With': 'XMLHttpRequest'
        })
    
    def get_random_user_agent(self) -> str:
        """获取随机User-Agent"""
        return random.choice(self.USER_AGENTS)
    
    def generate_signature(self, path: str, params: Dict) -> Tuple[str, str]:
        """生成请求签名（简化版，实际需要逆向工程）
        
        Args:
            path: 请求路径
            params: 请求参数
            
        Returns:
            (X-S, X-T) 签名对
        """
        # 注意：这是简化版本，实际的签名算法需要逆向分析
        # 小红书使用了复杂的签名机制，涉及多个参数的组合和加密
        timestamp = str(int(time.time() * 1000))
        
        # 简单的签名生成（示例）
        sign_str = f"{path}:{json.dumps(params, sort_keys=True)}:{timestamp}"
        x_s = hashlib.md5(sign_str.encode()).hexdigest()[:16]
        x_t = timestamp
        
        return x_s, x_t
    
    def mask_nickname(self, nickname: str) -> str:
        """脱敏用户昵称
        
        Args:
            nickname: 原始昵称
            
        Returns:
            脱敏后的昵称
        """
        if not nickname or len(nickname) <= 2:
            return "***"
        return f"{nickname[0]}**{nickname[-1]}"
    
    def get_follower_level(self, follower_count: int) -> str:
        """获取粉丝数级别
        
        Args:
            follower_count: 粉丝数
            
        Returns:
            粉丝数级别
        """
        if follower_count < 1000:
            return "1千以下"
        elif follower_count < 10000:
            return f"{follower_count // 1000}千+"
        elif follower_count < 100000:
            return f"{follower_count // 10000}万+"
        elif follower_count < 1000000:
            return f"{follower_count // 10000}万+"
        else:
            return f"{follower_count // 1000000}百万+"
    
    def auto_categorize(self, title: str, content: str, tags: List[str]) -> List[str]:
        """基于关键词自动分类
        
        Args:
            title: 标题
            content: 内容
            tags: 原生标签
            
        Returns:
            分类列表
        """
        categories = []
        text = f"{title} {content} {' '.join(tags)}".lower()
        
        for category, keywords in self.CATEGORY_KEYWORDS.items():
            for keyword in keywords:
                if keyword.lower() in text:
                    categories.append(category)
                    break
        
        # 如果没有匹配的分类，返回"其他"
        if not categories:
            categories = ["其他"]
        
        return categories[:3]  # 最多返回3个分类
    
    async def fetch_with_rate_limit(self, url: str, **kwargs) -> Optional[str]:
        """带限流的请求方法
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            响应内容，失败返回None
        """
        async with self.request_semaphore:
            # 实现速率限制
            current_time = asyncio.get_event_loop().time()
            time_since_last = (current_time - self.last_request_time) * 1000
            
            if time_since_last < self.RATE_LIMIT:
                # 添加随机延迟（0-2秒）
                wait_time = (self.RATE_LIMIT - time_since_last) / 1000 + random.uniform(0, 2)
                await asyncio.sleep(wait_time)
            
            # 更新请求头
            self.headers['User-Agent'] = self.get_random_user_agent()
            
            # 添加签名（如果是API请求）
            if 'edith.xiaohongshu.com' in url:
                path = url.split('edith.xiaohongshu.com')[1].split('?')[0]
                x_s, x_t = self.generate_signature(path, kwargs.get('params', {}))
                self.headers['X-S'] = x_s
                self.headers['X-T'] = x_t
            
            self.last_request_time = asyncio.get_event_loop().time()
            
            return await self.fetch(url, **kwargs)
    
    async def parse_explore_notes(self, data: Dict) -> List[Dict[str, Any]]:
        """解析发现页笔记数据
        
        Args:
            data: API响应数据
            
        Returns:
            笔记列表
        """
        notes = []
        
        try:
            items = data.get('data', {}).get('items', [])
            
            for item in items:
                note_card = item.get('note_card', {})
                if not note_card:
                    continue
                
                # 提取笔记基础信息
                note_id = note_card.get('note_id', '')
                if not note_id:
                    continue
                
                # 提取作者信息
                user = note_card.get('user', {})
                user_id = user.get('user_id', '')
                nickname = user.get('nickname', '')
                
                # 提取互动数据
                interact_info = note_card.get('interact_info', {})
                
                # 提取媒体信息
                images_list = note_card.get('image_list', [])
                video_info = note_card.get('video', {})
                
                # 构建笔记数据
                note_data = {
                    "note_id": note_id,
                    "title": note_card.get('title', ''),
                    "content": note_card.get('desc', '')[:500],  # 截取前500字符
                    "source": "xiaohongshu",
                    "author": {
                        "user_id_hash": self.generate_hash(user_id),
                        "nickname_masked": self.mask_nickname(nickname),
                        "follower_level": self.get_follower_level(user.get('fans', 0))
                    },
                    "metrics": {
                        "likes": interact_info.get('liked_count', 0),
                        "collects": interact_info.get('collected_count', 0),
                        "comments": interact_info.get('comment_count', 0),
                        "shares": interact_info.get('share_count', 0)
                    },
                    "media": {
                        "images": [
                            {
                                "url": img.get('url', ''),
                                "width": img.get('width', 0),
                                "height": img.get('height', 0)
                            } for img in images_list
                        ],
                        "video": {
                            "cover_url": video_info.get('cover', {}).get('url', '') if video_info else '',
                            "duration": video_info.get('duration', 0) if video_info else 0
                        } if video_info else None
                    },
                    "tags": note_card.get('tag_list', []),
                    "topic": note_card.get('topic', {}).get('name', ''),
                    "fetch_time": datetime.now(),
                    "update_time": datetime.now()
                }
                
                # 自动分类
                categories = self.auto_categorize(
                    note_data['title'],
                    note_data['content'],
                    note_data['tags']
                )
                note_data['category'] = "|".join(categories)
                
                notes.append(note_data)
                
        except Exception as e:
            logger.error(f"解析笔记数据失败: {e}")
        
        return notes
    
    async def fetch_note_detail(self, note_id: str) -> Optional[Dict[str, Any]]:
        """获取笔记详情
        
        Args:
            note_id: 笔记ID
            
        Returns:
            笔记详情数据
        """
        try:
            url = f"{self.XHS_NOTE_DETAIL_API}/{note_id}"
            params = {
                "source_note_id": note_id,
                "image_scenes": ["CRD_PRV_WEBP", "CRD_WM_WEBP"]
            }
            
            response = await self.fetch_with_rate_limit(url, params=params)
            if response:
                data = json.loads(response)
                return data.get('data', {})
            
        except Exception as e:
            logger.error(f"获取笔记详情失败 {note_id}: {e}")
        
        return None
    
    async def fetch_note_comments(self, note_id: str, limit: int = 10) -> List[Dict[str, Any]]:
        """获取笔记评论
        
        Args:
            note_id: 笔记ID
            limit: 评论数量限制
            
        Returns:
            评论列表
        """
        comments = []
        
        try:
            params = {
                "note_id": note_id,
                "cursor": "",
                "limit": limit,
                "level": 1,
                "sort": "hot"  # 按热度排序
            }
            
            response = await self.fetch_with_rate_limit(self.XHS_COMMENTS_API, params=params)
            if response:
                data = json.loads(response)
                comment_list = data.get('data', {}).get('comments', [])
                
                for comment in comment_list[:limit]:
                    comments.append({
                        "content": comment.get('content', ''),
                        "likes": comment.get('like_count', 0),
                        "user_hash": self.generate_hash(comment.get('user_info', {}).get('user_id', ''))
                    })
                    
        except Exception as e:
            logger.error(f"获取笔记评论失败 {note_id}: {e}")
        
        return comments
    
    async def fetch_trending_topics(self) -> List[Dict[str, Any]]:
        """获取热门话题
        
        Returns:
            话题列表
        """
        topics = []
        
        try:
            response = await self.fetch_with_rate_limit(self.XHS_TOPIC_API)
            if response:
                data = json.loads(response)
                topic_list = data.get('data', {}).get('topics', [])
                
                for topic in topic_list:
                    topics.append({
                        "topic_name": topic.get('name', ''),
                        "topic_id": topic.get('id', ''),
                        "view_count": topic.get('view_count', 0),
                        "note_count": topic.get('note_count', 0),
                        "description": topic.get('desc', '')
                    })
                    
        except Exception as e:
            logger.error(f"获取热门话题失败: {e}")
        
        return topics
    
    async def crawl_explore_notes(self, page_size: int = 50) -> List[Dict[str, Any]]:
        """爬取发现页热门笔记
        
        Args:
            page_size: 获取数量
            
        Returns:
            笔记列表
        """
        all_notes = []
        
        try:
            # 构建请求参数
            params = {
                "num": min(page_size, 20),  # 单次最多20条
                "cursor": "",
                "refresh_type": 1,
                "note_index": 0,
                "unread_begin_note_id": "",
                "unread_end_note_id": "",
                "unread_note_count": 0
            }
            
            # 分页获取
            pages_needed = (page_size + 19) // 20  # 计算需要的页数
            
            for page in range(pages_needed):
                response = await self.fetch_with_rate_limit(self.XHS_EXPLORE_API, params=params)
                
                if response:
                    try:
                        data = json.loads(response)
                        notes = await self.parse_explore_notes(data)
                        all_notes.extend(notes)
                        
                        # 更新cursor以获取下一页
                        cursor = data.get('data', {}).get('cursor', '')
                        if not cursor:
                            break
                        params['cursor'] = cursor
                        
                        # 达到目标数量就停止
                        if len(all_notes) >= page_size:
                            break
                            
                    except json.JSONDecodeError as e:
                        logger.error(f"JSON解析失败: {e}")
                        # 尝试使用Web端爬取作为备选方案
                        break
                        
                # 添加延迟避免触发反爬
                await asyncio.sleep(random.uniform(1, 2))
                
        except Exception as e:
            logger.error(f"爬取发现页笔记失败: {e}")
        
        # 只返回前page_size条
        return all_notes[:page_size]
    
    async def crawl_web_explore(self) -> List[Dict[str, Any]]:
        """从Web端爬取探索页（备用方案）
        
        Returns:
            笔记列表
        """
        notes = []
        
        try:
            response = await self.fetch_with_rate_limit(self.XHS_WEB_EXPLORE)
            if response:
                soup = BeautifulSoup(response, 'html.parser')
                
                # 查找笔记卡片
                note_cards = soup.find_all('div', class_='note-item')
                
                for card in note_cards[:50]:  # 最多取50条
                    try:
                        # 提取笔记信息（根据实际页面结构调整）
                        note_data = {
                            "note_id": card.get('data-note-id', ''),
                            "title": card.find('div', class_='title').text.strip() if card.find('div', class_='title') else '',
                            "content": card.find('div', class_='desc').text.strip()[:500] if card.find('div', class_='desc') else '',
                            "source": "xiaohongshu",
                            "author": {
                                "user_id_hash": self.generate_hash(card.get('data-user-id', '')),
                                "nickname_masked": self.mask_nickname(
                                    card.find('span', class_='nickname').text.strip() if card.find('span', class_='nickname') else ''
                                ),
                                "follower_level": "未知"
                            },
                            "metrics": {
                                "likes": int(card.find('span', class_='like-count').text.strip() or 0) if card.find('span', class_='like-count') else 0,
                                "collects": 0,  # Web端可能不显示
                                "comments": 0,  # Web端可能不显示
                                "shares": 0    # Web端可能不显示
                            },
                            "media": {
                                "images": [],
                                "video": None
                            },
                            "tags": [],
                            "topic": "",
                            "category": "其他",
                            "fetch_time": datetime.now(),
                            "update_time": datetime.now()
                        }
                        
                        notes.append(note_data)
                        
                    except Exception as e:
                        logger.debug(f"解析笔记卡片失败: {e}")
                        continue
                        
        except Exception as e:
            logger.error(f"Web端爬取失败: {e}")
        
        return notes
    
    async def enrich_note_data(self, note: Dict[str, Any]) -> Dict[str, Any]:
        """丰富笔记数据（获取详情和评论）
        
        Args:
            note: 基础笔记数据
            
        Returns:
            丰富后的笔记数据
        """
        note_id = note.get('note_id', '')
        
        if note_id:
            # 获取笔记详情（如果需要更多信息）
            detail = await self.fetch_note_detail(note_id)
            if detail:
                # 更新更详细的信息
                note['content'] = detail.get('desc', note.get('content', ''))[:500]
                
            # 获取热门评论
            comments = await self.fetch_note_comments(note_id)
            note['comments_sample'] = comments
        
        return note
    
    async def check_update_interval(self) -> bool:
        """检查是否需要更新
        
        Returns:
            True表示需要更新，False表示跳过
        """
        if not self.redis_client:
            return True
        
        try:
            last_update_key = "xiaohongshu_hot_spider:last_update"
            last_update = await self.redis_client.get(last_update_key)
            
            if last_update:
                last_time = datetime.fromisoformat(last_update.decode())
                if datetime.now() - last_time < timedelta(seconds=self.UPDATE_INTERVAL):
                    logger.info("距离上次更新未满30分钟，跳过本次爬取")
                    return False
            
            # 更新最后爬取时间
            await self.redis_client.set(
                last_update_key,
                datetime.now().isoformat(),
                ex=self.UPDATE_INTERVAL
            )
            return True
            
        except Exception as e:
            logger.error(f"检查更新间隔失败: {e}")
            return True
    
    async def update_with_history(self, note: Dict[str, Any]) -> Dict[str, Any]:
        """更新笔记数据并保留历史记录
        
        Args:
            note: 新的笔记数据
            
        Returns:
            更新后的笔记数据
        """
        if not self.mongodb_client:
            return note
        
        try:
            db = self.mongodb_client.aipaper
            collection = db.hot_topics
            
            # 查找已存在的记录
            existing = await collection.find_one({"note_id": note["note_id"]})
            
            if existing:
                # 保留历史互动数据
                old_metrics = existing.get("metrics", {})
                new_metrics = note.get("metrics", {})
                
                # 计算变化
                likes_change = new_metrics.get("likes", 0) - old_metrics.get("likes", 0)
                
                # 只有显著变化才更新（超过10%或新增评论）
                if abs(likes_change) / max(old_metrics.get("likes", 1), 1) > 0.1:
                    note["update_time"] = datetime.now()
                else:
                    # 保持原有更新时间
                    note["update_time"] = existing.get("update_time", datetime.now())
                
                # 保留原有的评论样本（如果新的为空）
                if not note.get("comments_sample") and existing.get("comments_sample"):
                    note["comments_sample"] = existing["comments_sample"]
            
            # 设置TTL
            note["expire_at"] = datetime.now() + timedelta(days=30)
            
            # 更新或插入数据
            await collection.replace_one(
                {"note_id": note["note_id"]},
                note,
                upsert=True
            )
            
        except Exception as e:
            logger.error(f"更新历史记录失败: {e}")
        
        return note
    
    async def crawl(self, enrich: bool = True) -> List[Dict[str, Any]]:
        """执行完整爬取流程
        
        Args:
            enrich: 是否丰富数据（获取评论等）
            
        Returns:
            笔记数据列表
        """
        # 检查更新间隔
        if not await self.check_update_interval():
            return []
        
        logger.info("开始爬取小红书热门笔记")
        
        # 先尝试API方式爬取
        notes = await self.crawl_explore_notes(50)
        
        # 如果API失败，尝试Web端
        if not notes:
            logger.warning("API爬取失败，尝试Web端爬取")
            notes = await self.crawl_web_explore()
        
        if notes:
            # 丰富数据（只对前10条）
            if enrich:
                for i in range(min(10, len(notes))):
                    notes[i] = await self.enrich_note_data(notes[i])
                    await asyncio.sleep(random.uniform(0.5, 1))  # 避免请求过快
            
            # 获取热门话题
            topics = await self.fetch_trending_topics()
            
            # 更新历史记录
            for note in notes:
                await self.update_with_history(note)
            
            logger.info(f"成功爬取{len(notes)}条笔记，{len(topics)}个话题")
            
            # 存储到MongoDB
            if self.mongodb_client:
                await self.store(notes)
            
            return notes
        
        logger.warning("未能获取到任何笔记数据")
        return []
    
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据到MongoDB
        
        Args:
            data: 待存储的数据
            
        Returns:
            存储是否成功
        """
        if not self.mongodb_client:
            logger.warning("MongoDB客户端未初始化")
            return False
        
        try:
            db = self.mongodb_client.aipaper
            collection = db.hot_topics
            
            # 批量更新或插入
            operations = []
            for item in data:
                operations.append(
                    {
                        "replaceOne": {
                            "filter": {"note_id": item["note_id"]},
                            "replacement": item,
                            "upsert": True
                        }
                    }
                )
            
            if operations:
                result = await collection.bulk_write(operations)
                logger.info(f"存储成功: 插入{result.upserted_count}条，更新{result.modified_count}条")
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"存储数据失败: {e}")
            return False
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面内容（实现抽象方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析后的数据列表
        """
        # 这个方法主要用于Web端解析
        notes = []
        soup = BeautifulSoup(html, 'html.parser')
        
        # 根据实际页面结构解析
        # 这里是简化版本
        return notes
    
    async def crawl_hot(self) -> List[Dict[str, Any]]:
        """统一接口：获取热搜数据"""
        return await self.crawl()