"""YouTube热门爬虫模块"""
import asyncio
import json
import logging
from typing import Dict, Any, List, Optional, Set
from datetime import datetime, timedelta
import re
from urllib.parse import urlencode

import aiohttp
import redis.asyncio as redis

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class YouTubeHotSpider(BaseSpider):
    """YouTube热门视频爬虫"""
    
    # YouTube Data API v3 endpoints
    BASE_URL = "https://www.googleapis.com/youtube/v3"
    VIDEOS_ENDPOINT = f"{BASE_URL}/videos"
    COMMENTS_ENDPOINT = f"{BASE_URL}/commentThreads"
    CAPTIONS_ENDPOINT = f"{BASE_URL}/captions"
    CHANNELS_ENDPOINT = f"{BASE_URL}/channels"
    
    # API quota costs (units per request)
    QUOTA_COSTS = {
        'videos': 1,      # per video
        'search': 100,    # per search request
        'comments': 1,    # per comment
        'captions': 50,   # per caption list
        'channels': 1,    # per channel
    }
    
    # Daily quota limit (units)
    DAILY_QUOTA_LIMIT = 10000
    
    def __init__(self, api_key: str, redis_url: str = "redis://localhost:6379"):
        """初始化YouTube爬虫
        
        Args:
            api_key: YouTube Data API密钥
            redis_url: Redis连接URL（用于配额管理和去重）
        """
        super().__init__(name="YouTubeHotSpider")
        
        self.api_key = api_key
        self.redis_url = redis_url
        self.redis_client: Optional[redis.Redis] = None
        
        # 地区代码映射
        self.region_codes = ["US", "GB", "JP", "CN", "IN", "BR", "RU", "DE", "FR", "KR"]
        
        # 视频分类ID映射（YouTube预定义分类）
        self.category_mapping = {
            "1": "Film & Animation",
            "2": "Autos & Vehicles", 
            "10": "Music",
            "15": "Pets & Animals",
            "17": "Sports",
            "19": "Travel & Events",
            "20": "Gaming",
            "22": "People & Blogs",
            "23": "Comedy",
            "24": "Entertainment",
            "25": "News & Politics",
            "26": "Howto & Style",
            "27": "Education",
            "28": "Science & Technology"
        }
        
        # 配额管理键名
        self.quota_key = "youtube_api_quota"
        self.last_reset_key = "youtube_api_quota_reset"
        
    async def start(self):
        """启动爬虫并初始化Redis连接"""
        await super().start()
        if not self.redis_client:
            self.redis_client = redis.from_url(self.redis_url)
        logger.info("YouTube爬虫启动完成")
        
    async def close(self):
        """关闭爬虫并清理资源"""
        if self.redis_client:
            await self.redis_client.close()
            self.redis_client = None
        await super().close()
        
    async def _check_and_consume_quota(self, operation: str, count: int = 1) -> bool:
        """检查并消费API配额
        
        Args:
            operation: 操作类型（videos, search, comments等）
            count: 操作次数
            
        Returns:
            是否有足够配额
        """
        if not self.redis_client:
            await self.start()
            
        # 检查是否需要重置配额（每日重置）
        today = datetime.now().strftime("%Y-%m-%d")
        last_reset = await self.redis_client.get(self.last_reset_key)
        
        if not last_reset or last_reset.decode() != today:
            # 重置配额
            await self.redis_client.set(self.quota_key, 0)
            await self.redis_client.set(self.last_reset_key, today)
            logger.info("API配额已重置")
            
        # 计算所需配额
        cost_per_operation = self.QUOTA_COSTS.get(operation, 1)
        total_cost = cost_per_operation * count
        
        # 检查当前配额使用情况
        current_usage = await self.redis_client.get(self.quota_key)
        current_usage = int(current_usage) if current_usage else 0
        
        if current_usage + total_cost > self.DAILY_QUOTA_LIMIT:
            logger.warning(f"API配额不足: 当前使用 {current_usage}, 需要 {total_cost}, 限制 {self.DAILY_QUOTA_LIMIT}")
            return False
            
        # 消费配额
        await self.redis_client.incrby(self.quota_key, total_cost)
        logger.debug(f"消费配额 {total_cost} 单位，操作: {operation} x {count}")
        return True
        
    async def _api_request(self, endpoint: str, params: Dict[str, Any], 
                          quota_operation: str, quota_count: int = 1) -> Optional[Dict[str, Any]]:
        """执行YouTube Data API请求
        
        Args:
            endpoint: API端点URL
            params: 请求参数
            quota_operation: 配额操作类型
            quota_count: 配额消费数量
            
        Returns:
            API响应数据
        """
        # 检查配额
        if not await self._check_and_consume_quota(quota_operation, quota_count):
            return None
            
        # 添加API密钥
        params['key'] = self.api_key
        
        # 构建请求URL
        url = f"{endpoint}?{urlencode(params)}"
        
        try:
            html = await self.fetch(url)
            if html:
                return json.loads(html)
        except json.JSONDecodeError as e:
            logger.error(f"JSON解析失败: {e}")
        except Exception as e:
            logger.error(f"API请求失败: {e}")
            
        return None
        
    async def get_popular_videos(self, region_code: str = "US", 
                                category_id: str = None, max_results: int = 50) -> List[Dict[str, Any]]:
        """获取热门视频榜单
        
        Args:
            region_code: 地区代码
            category_id: 分类ID（可选）
            max_results: 最大结果数量
            
        Returns:
            热门视频列表
        """
        params = {
            'part': 'snippet,statistics,contentDetails,status',
            'chart': 'mostPopular',
            'regionCode': region_code,
            'maxResults': min(max_results, 50),  # YouTube API限制单次最多50个
        }
        
        if category_id:
            params['videoCategoryId'] = category_id
            
        # 执行API请求
        response = await self._api_request(
            self.VIDEOS_ENDPOINT, 
            params, 
            'videos', 
            max_results
        )
        
        if not response or 'items' not in response:
            logger.error(f"获取热门视频失败: {region_code}")
            return []
            
        videos = []
        for item in response['items']:
            try:
                video_data = await self._parse_video_item(item, region_code)
                if video_data:
                    videos.append(video_data)
            except Exception as e:
                logger.error(f"解析视频数据失败: {e}")
                continue
                
        logger.info(f"获取到 {len(videos)} 个热门视频 (地区: {region_code})")
        return videos
        
    async def _parse_video_item(self, item: Dict[str, Any], region_code: str) -> Optional[Dict[str, Any]]:
        """解析单个视频数据
        
        Args:
            item: YouTube API返回的视频数据
            region_code: 地区代码
            
        Returns:
            标准化的视频数据
        """
        try:
            snippet = item.get('snippet', {})
            statistics = item.get('statistics', {})
            content_details = item.get('contentDetails', {})
            status = item.get('status', {})
            
            video_data = {
                "video_id": item.get('id'),
                "title": snippet.get('title', ''),
                "description": snippet.get('description', ''),
                "channel_id": snippet.get('channelId', ''),
                "channel_title": snippet.get('channelTitle', ''),
                "published_at": self._parse_datetime(snippet.get('publishedAt')),
                "duration": content_details.get('duration', ''),
                "view_count": int(statistics.get('viewCount', 0)),
                "like_count": int(statistics.get('likeCount', 0)),
                "comment_count": int(statistics.get('commentCount', 0)),
                "category_id": snippet.get('categoryId', ''),
                "category_name": self.category_mapping.get(snippet.get('categoryId', ''), 'Unknown'),
                "tags": snippet.get('tags', []),
                "default_language": snippet.get('defaultAudioLanguage') or snippet.get('defaultLanguage'),
                "region_code": region_code,
                "thumbnails": snippet.get('thumbnails', {}),
                "upload_status": status.get('uploadStatus', ''),
                "privacy_status": status.get('privacyStatus', ''),
                "made_for_kids": status.get('madeForKids', False),
                "comments": [],  # 将在后续填充
                "captions": [],  # 将在后续填充
                "crawled_at": datetime.utcnow(),
                "platform": "youtube"
            }
            
            # 计算互动率
            if video_data["view_count"] > 0:
                video_data["like_rate"] = video_data["like_count"] / video_data["view_count"]
                video_data["comment_rate"] = video_data["comment_count"] / video_data["view_count"]
            else:
                video_data["like_rate"] = 0
                video_data["comment_rate"] = 0
                
            return video_data
            
        except Exception as e:
            logger.error(f"解析视频数据失败: {e}")
            return None
            
    async def get_video_comments(self, video_id: str, max_results: int = 20) -> List[Dict[str, Any]]:
        """获取视频评论
        
        Args:
            video_id: 视频ID
            max_results: 最大评论数量
            
        Returns:
            评论数据列表
        """
        params = {
            'part': 'snippet,replies',
            'videoId': video_id,
            'order': 'relevance',  # 按相关性排序（热度）
            'maxResults': min(max_results, 100),
            'textFormat': 'plainText'
        }
        
        response = await self._api_request(
            self.COMMENTS_ENDPOINT,
            params,
            'comments',
            max_results
        )
        
        if not response or 'items' not in response:
            logger.debug(f"获取评论失败或无评论: {video_id}")
            return []
            
        comments = []
        for item in response['items']:
            try:
                comment_data = self._parse_comment_item(item)
                if comment_data:
                    comments.append(comment_data)
            except Exception as e:
                logger.error(f"解析评论失败: {e}")
                continue
                
        logger.debug(f"获取到 {len(comments)} 条评论，视频: {video_id}")
        return comments
        
    def _parse_comment_item(self, item: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """解析单条评论数据
        
        Args:
            item: YouTube API返回的评论数据
            
        Returns:
            标准化的评论数据
        """
        try:
            snippet = item.get('snippet', {})
            top_level_comment = snippet.get('topLevelComment', {}).get('snippet', {})
            
            comment_data = {
                "comment_id": item.get('id'),
                "text": top_level_comment.get('textDisplay', ''),
                "author": self._anonymize_author(top_level_comment.get('authorDisplayName', '')),
                "like_count": int(top_level_comment.get('likeCount', 0)),
                "published_at": self._parse_datetime(top_level_comment.get('publishedAt')),
                "updated_at": self._parse_datetime(top_level_comment.get('updatedAt')),
                "reply_count": int(snippet.get('totalReplyCount', 0)),
                "replies": []
            }
            
            # 获取回复（如果有）
            if 'replies' in item and snippet.get('totalReplyCount', 0) > 0:
                replies = item['replies'].get('comments', [])[:5]  # 限制回复数量
                for reply in replies:
                    reply_snippet = reply.get('snippet', {})
                    comment_data['replies'].append({
                        "comment_id": reply.get('id'),
                        "text": reply_snippet.get('textDisplay', ''),
                        "author": self._anonymize_author(reply_snippet.get('authorDisplayName', '')),
                        "like_count": int(reply_snippet.get('likeCount', 0)),
                        "published_at": self._parse_datetime(reply_snippet.get('publishedAt'))
                    })
                    
            return comment_data
            
        except Exception as e:
            logger.error(f"解析评论数据失败: {e}")
            return None
            
    async def get_video_captions(self, video_id: str) -> List[Dict[str, Any]]:
        """获取视频字幕信息
        
        Args:
            video_id: 视频ID
            
        Returns:
            字幕信息列表
        """
        params = {
            'part': 'snippet',
            'videoId': video_id
        }
        
        response = await self._api_request(
            self.CAPTIONS_ENDPOINT,
            params,
            'captions'
        )
        
        if not response or 'items' not in response:
            logger.debug(f"获取字幕列表失败或无字幕: {video_id}")
            return []
            
        captions = []
        for item in response['items']:
            try:
                snippet = item.get('snippet', {})
                caption_data = {
                    "caption_id": item.get('id'),
                    "language": snippet.get('language', ''),
                    "name": snippet.get('name', ''),
                    "track_kind": snippet.get('trackKind', ''),
                    "is_auto_synced": snippet.get('isAutoSynced', False),
                    "is_large": snippet.get('isLarge', False),
                    "is_easy_reader": snippet.get('isEasyReader', False),
                    "is_draft": snippet.get('isDraft', False),
                    "failure_reason": snippet.get('failureReason', '')
                }
                captions.append(caption_data)
            except Exception as e:
                logger.error(f"解析字幕数据失败: {e}")
                continue
                
        logger.debug(f"获取到 {len(captions)} 个字幕轨道，视频: {video_id}")
        return captions
        
    async def get_channel_info(self, channel_id: str) -> Optional[Dict[str, Any]]:
        """获取频道信息
        
        Args:
            channel_id: 频道ID
            
        Returns:
            频道信息
        """
        params = {
            'part': 'snippet,statistics,brandingSettings',
            'id': channel_id
        }
        
        response = await self._api_request(
            self.CHANNELS_ENDPOINT,
            params,
            'channels'
        )
        
        if not response or 'items' not in response or not response['items']:
            logger.debug(f"获取频道信息失败: {channel_id}")
            return None
            
        try:
            item = response['items'][0]
            snippet = item.get('snippet', {})
            statistics = item.get('statistics', {})
            branding = item.get('brandingSettings', {}).get('channel', {})
            
            channel_data = {
                "channel_id": channel_id,
                "title": snippet.get('title', ''),
                "description": snippet.get('description', ''),
                "published_at": self._parse_datetime(snippet.get('publishedAt')),
                "country": snippet.get('country', ''),
                "view_count": int(statistics.get('viewCount', 0)),
                "subscriber_count": int(statistics.get('subscriberCount', 0)),
                "video_count": int(statistics.get('videoCount', 0)),
                "thumbnails": snippet.get('thumbnails', {}),
                "keywords": branding.get('keywords', ''),
                "custom_url": snippet.get('customUrl', ''),
                "crawled_at": datetime.utcnow()
            }
            
            return channel_data
            
        except Exception as e:
            logger.error(f"解析频道数据失败: {e}")
            return None
            
    async def crawl_popular_videos_by_regions(self, region_codes: List[str] = None,
                                            categories: List[str] = None,
                                            max_videos_per_region: int = 50,
                                            include_comments: bool = True,
                                            include_captions: bool = False) -> List[Dict[str, Any]]:
        """按地区爬取热门视频
        
        Args:
            region_codes: 地区代码列表
            categories: 分类ID列表
            max_videos_per_region: 每个地区最大视频数量
            include_comments: 是否包含评论
            include_captions: 是否包含字幕信息
            
        Returns:
            所有爬取的视频数据
        """
        if not region_codes:
            region_codes = self.region_codes[:3]  # 默认取前3个地区以控制配额
            
        all_videos = []
        processed_video_ids: Set[str] = set()
        
        for region_code in region_codes:
            logger.info(f"开始爬取地区: {region_code}")
            
            # 按分类或全部分类爬取
            if categories:
                for category_id in categories:
                    videos = await self.get_popular_videos(
                        region_code=region_code,
                        category_id=category_id,
                        max_results=max_videos_per_region // len(categories)
                    )
                    all_videos.extend(videos)
            else:
                videos = await self.get_popular_videos(
                    region_code=region_code,
                    max_results=max_videos_per_region
                )
                all_videos.extend(videos)
                
        # 去重处理
        unique_videos = []
        for video in all_videos:
            video_id = video.get('video_id')
            if video_id and video_id not in processed_video_ids:
                processed_video_ids.add(video_id)
                unique_videos.append(video)
                
        logger.info(f"去重后获得 {len(unique_videos)} 个唯一视频")
        
        # 批量获取评论和字幕（控制配额使用）
        for i, video in enumerate(unique_videos):
            video_id = video.get('video_id')
            if not video_id:
                continue
                
            # 获取评论（优先级较高）
            if include_comments and i < 20:  # 限制评论获取数量以控制配额
                comments = await self.get_video_comments(video_id, max_results=10)
                video['comments'] = comments
                
            # 获取字幕信息（可选，消耗配额较多）
            if include_captions and i < 10:  # 更严格的限制
                captions = await self.get_video_captions(video_id)
                video['captions'] = captions
                
            # 每处理10个视频后短暂休眠
            if (i + 1) % 10 == 0:
                await asyncio.sleep(1)
                logger.info(f"已处理 {i + 1}/{len(unique_videos)} 个视频")
                
        return unique_videos
        
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """基类方法实现（YouTube API不需要HTML解析）
        
        Args:
            html: HTML内容（此处不使用）
            url: URL（此处不使用）
            
        Returns:
            空列表（实际数据通过API获取）
        """
        logger.warning("parse方法在YouTube爬虫中不使用，请使用相应的API方法")
        return []
        
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据到MongoDB（覆盖基类方法）
        
        Args:
            data: 视频数据列表
            
        Returns:
            存储是否成功
        """
        if not data:
            return True
            
        try:
            # 这里应该连接到MongoDB并存储数据
            # 暂时只打印日志，具体存储逻辑在MongoDB pipeline中实现
            logger.info(f"准备存储 {len(data)} 个视频到MongoDB")
            
            # 数据验证和清洗
            valid_data = []
            for video in data:
                if self._validate_video_data(video):
                    # 添加数据哈希用于去重
                    video['content_hash'] = self.generate_hash(
                        f"{video['video_id']}_{video['title']}_{video['published_at']}"
                    )
                    valid_data.append(video)
                    
            logger.info(f"验证后有效数据: {len(valid_data)} 个")
            return len(valid_data) > 0
            
        except Exception as e:
            logger.error(f"存储数据失败: {e}")
            return False
            
    def _validate_video_data(self, video: Dict[str, Any]) -> bool:
        """验证视频数据的完整性
        
        Args:
            video: 视频数据
            
        Returns:
            数据是否有效
        """
        required_fields = ['video_id', 'title', 'channel_id', 'published_at']
        
        for field in required_fields:
            if not video.get(field):
                logger.warning(f"视频数据缺少必需字段: {field}")
                return False
                
        return True
        
    def _parse_datetime(self, datetime_str: str) -> Optional[datetime]:
        """解析YouTube API返回的日期时间字符串
        
        Args:
            datetime_str: ISO 8601格式的日期时间字符串
            
        Returns:
            datetime对象
        """
        if not datetime_str:
            return None
            
        try:
            # YouTube API返回ISO 8601格式: "2023-12-01T10:30:00Z"
            return datetime.fromisoformat(datetime_str.replace('Z', '+00:00'))
        except Exception as e:
            logger.warning(f"日期时间解析失败: {datetime_str}, 错误: {e}")
            return None
            
    def _anonymize_author(self, author_name: str) -> str:
        """匿名化作者信息
        
        Args:
            author_name: 原始作者名
            
        Returns:
            匿名化后的作者名
        """
        if not author_name:
            return "Anonymous"
            
        # 生成基于用户名的哈希前缀
        hash_prefix = self.generate_hash(author_name)[:8]
        return f"User_{hash_prefix}"
        
    async def get_quota_status(self) -> Dict[str, Any]:
        """获取当前配额使用状态
        
        Returns:
            配额状态信息
        """
        if not self.redis_client:
            await self.start()
            
        current_usage = await self.redis_client.get(self.quota_key)
        current_usage = int(current_usage) if current_usage else 0
        
        last_reset = await self.redis_client.get(self.last_reset_key)
        last_reset = last_reset.decode() if last_reset else "Never"
        
        return {
            "current_usage": current_usage,
            "daily_limit": self.DAILY_QUOTA_LIMIT,
            "remaining": self.DAILY_QUOTA_LIMIT - current_usage,
            "usage_percentage": (current_usage / self.DAILY_QUOTA_LIMIT) * 100,
            "last_reset": last_reset
        }