"""Reddit热门爬虫实现"""
import asyncio
import hashlib
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional
import base64
from urllib.parse import urljoin

import aiohttp
from aiohttp import ClientSession, ClientTimeout

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class RedditHotSpider(BaseSpider):
    """Reddit热门话题爬虫"""
    
    # Reddit API配置
    REDDIT_BASE_URL = "https://oauth.reddit.com"
    REDDIT_WWW_URL = "https://www.reddit.com"
    TOKEN_URL = "https://www.reddit.com/api/v1/access_token"
    
    # API端点
    SUBREDDIT_HOT_API = "/r/{subreddit}/hot"
    SUBREDDIT_NEW_API = "/r/{subreddit}/new"
    SUBREDDIT_RISING_API = "/r/{subreddit}/rising"
    SUBREDDIT_TOP_API = "/r/{subreddit}/top"
    COMMENTS_API = "/r/{subreddit}/comments/{post_id}"
    POPULAR_API = "/r/popular/hot"
    ALL_API = "/r/all/hot"
    
    # 限流配置
    RATE_LIMIT = 1000  # 1秒间隔（Reddit限制60请求/分钟）
    MAX_CONCURRENT_REQUESTS = 3
    MAX_COMMENT_DEPTH = 5  # 最大评论深度
    
    # 默认subreddit列表
    DEFAULT_SUBREDDITS = [
        'all', 'popular', 'technology', 'worldnews', 'science',
        'programming', 'gaming', 'news', 'todayilearned', 'askreddit'
    ]
    
    # NSFW过滤级别
    NSFW_FILTER_LEVELS = {
        'strict': lambda post: not post.get('over_18', False),
        'standard': lambda post: True,  # 记录但不过滤
        'loose': lambda post: True
    }
    
    def __init__(self, client_id: str = None, client_secret: str = None,
                 user_agent: str = None, mongodb_client=None, redis_client=None,
                 nsfw_filter_level: str = 'strict'):
        """初始化Reddit爬虫
        
        Args:
            client_id: Reddit应用Client ID
            client_secret: Reddit应用Client Secret
            user_agent: User Agent字符串
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流和去重）
            nsfw_filter_level: NSFW过滤级别（strict/standard/loose）
        """
        super().__init__(name="RedditHotSpider")
        self.client_id = client_id
        self.client_secret = client_secret
        self.user_agent = user_agent or "python:aipaper:v1.0 (by /u/aipaper_bot)"
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.nsfw_filter_level = nsfw_filter_level
        self.nsfw_filter = self.NSFW_FILTER_LEVELS.get(nsfw_filter_level, 
                                                        self.NSFW_FILTER_LEVELS['strict'])
        
        self.access_token = None
        self.token_expires_at = None
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
        # 更新请求头
        self.headers.update({
            'User-Agent': self.user_agent
        })
    
    async def authenticate(self) -> bool:
        """获取Reddit OAuth2访问令牌
        
        Returns:
            是否认证成功
        """
        if not self.client_id or not self.client_secret:
            logger.error("Reddit API凭证未配置")
            return False
        
        # 检查token是否仍然有效
        if self.access_token and self.token_expires_at:
            if datetime.now() < self.token_expires_at:
                return True
        
        # 获取新的token
        auth_str = f"{self.client_id}:{self.client_secret}"
        auth_bytes = auth_str.encode('ascii')
        auth_b64 = base64.b64encode(auth_bytes).decode('ascii')
        
        headers = {
            'Authorization': f'Basic {auth_b64}',
            'User-Agent': self.user_agent,
            'Content-Type': 'application/x-www-form-urlencoded'
        }
        
        data = {
            'grant_type': 'client_credentials',
            'device_id': 'DO_NOT_TRACK_THIS_DEVICE'
        }
        
        try:
            if not self.session:
                await self.start()
            
            async with self.session.post(self.TOKEN_URL, headers=headers, data=data) as response:
                if response.status == 200:
                    token_data = await response.json()
                    self.access_token = token_data.get('access_token')
                    expires_in = token_data.get('expires_in', 3600)
                    self.token_expires_at = datetime.now() + timedelta(seconds=expires_in - 60)
                    
                    # 更新请求头
                    self.headers['Authorization'] = f'Bearer {self.access_token}'
                    logger.info("Reddit认证成功")
                    return True
                else:
                    logger.error(f"Reddit认证失败: {response.status}")
                    return False
                    
        except Exception as e:
            logger.error(f"Reddit认证异常: {e}")
            return False
    
    async def fetch_with_rate_limit(self, url: str, **kwargs) -> Optional[Dict]:
        """带限流的请求方法
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            响应JSON数据，失败返回None
        """
        async with self.request_semaphore:
            # 实现速率限制
            current_time = asyncio.get_event_loop().time()
            time_since_last = (current_time - self.last_request_time) * 1000
            if time_since_last < self.RATE_LIMIT:
                await asyncio.sleep((self.RATE_LIMIT - time_since_last) / 1000)
            
            self.last_request_time = asyncio.get_event_loop().time()
            
            # 确保已认证
            if not await self.authenticate():
                return None
            
            try:
                if not self.session:
                    await self.start()
                
                async with self.session.get(url, headers=self.headers, **kwargs) as response:
                    if response.status == 200:
                        return await response.json()
                    elif response.status == 401:
                        # Token过期，重新认证
                        self.access_token = None
                        if await self.authenticate():
                            # 重试请求
                            async with self.session.get(url, headers=self.headers, **kwargs) as retry_response:
                                if retry_response.status == 200:
                                    return await retry_response.json()
                    else:
                        logger.warning(f"Reddit API请求失败 {url}: {response.status}")
                        return None
                        
            except Exception as e:
                logger.error(f"Reddit API请求异常 {url}: {e}")
                return None
    
    async def fetch_subreddit_posts(self, subreddit: str, sort: str = 'hot',
                                   time_filter: str = 'day', limit: int = 50) -> List[Dict[str, Any]]:
        """获取subreddit帖子
        
        Args:
            subreddit: subreddit名称
            sort: 排序方式（hot/new/rising/top）
            time_filter: 时间过滤器（用于top排序：hour/day/week/month/year/all）
            limit: 获取数量限制
            
        Returns:
            帖子列表
        """
        # 构建API端点
        if sort == 'hot':
            endpoint = self.SUBREDDIT_HOT_API.format(subreddit=subreddit)
        elif sort == 'new':
            endpoint = self.SUBREDDIT_NEW_API.format(subreddit=subreddit)
        elif sort == 'rising':
            endpoint = self.SUBREDDIT_RISING_API.format(subreddit=subreddit)
        elif sort == 'top':
            endpoint = self.SUBREDDIT_TOP_API.format(subreddit=subreddit)
        else:
            endpoint = self.SUBREDDIT_HOT_API.format(subreddit=subreddit)
        
        url = urljoin(self.REDDIT_BASE_URL, endpoint)
        
        params = {
            'limit': limit,
            'raw_json': 1  # 获取原始JSON，不进行HTML编码
        }
        
        if sort == 'top':
            params['t'] = time_filter
        
        response_data = await self.fetch_with_rate_limit(url, params=params)
        if not response_data:
            return []
        
        posts = []
        for child in response_data.get('data', {}).get('children', []):
            post_data = child.get('data', {})
            
            # 应用NSFW过滤
            if not self.nsfw_filter(post_data):
                logger.info(f"过滤NSFW内容: {post_data.get('title')}")
                continue
            
            # 解析帖子数据
            post = {
                'post_id': post_data.get('id'),
                'subreddit': post_data.get('subreddit'),
                'title': post_data.get('title'),
                'author': self.generate_hash(post_data.get('author', 'deleted')),  # 匿名化
                'content': post_data.get('selftext', ''),
                'url': post_data.get('url'),
                'permalink': f"{self.REDDIT_WWW_URL}{post_data.get('permalink')}",
                'score': post_data.get('score', 0),
                'upvote_ratio': post_data.get('upvote_ratio', 0),
                'num_comments': post_data.get('num_comments', 0),
                'created_utc': datetime.fromtimestamp(post_data.get('created_utc', 0)),
                'is_nsfw': post_data.get('over_18', False),
                'is_video': post_data.get('is_video', False),
                'is_self': post_data.get('is_self', False),  # 是否为文本帖
                'flair': post_data.get('link_flair_text'),
                'awards': len(post_data.get('all_awardings', [])),
                'is_crosspost': 'crosspost_parent_list' in post_data,
                'content_type': self._get_content_type(post_data),
                'thumbnail': post_data.get('thumbnail'),
                'preview_images': self._extract_preview_images(post_data),
                'hot_score': self._calculate_hot_score(post_data),
                'is_controversial': self._is_controversial(post_data)
            }
            posts.append(post)
        
        return posts
    
    async def fetch_post_comments(self, subreddit: str, post_id: str,
                                 sort: str = 'best', limit: int = 20) -> List[Dict[str, Any]]:
        """获取帖子评论
        
        Args:
            subreddit: subreddit名称
            post_id: 帖子ID
            sort: 评论排序（best/top/new/controversial）
            limit: 评论数量限制
            
        Returns:
            评论树结构
        """
        endpoint = self.COMMENTS_API.format(subreddit=subreddit, post_id=post_id)
        url = urljoin(self.REDDIT_BASE_URL, endpoint)
        
        params = {
            'sort': sort,
            'limit': limit,
            'raw_json': 1,
            'depth': self.MAX_COMMENT_DEPTH
        }
        
        response_data = await self.fetch_with_rate_limit(url, params=params)
        if not response_data or len(response_data) < 2:
            return []
        
        # Reddit API返回两个列表：[帖子信息, 评论信息]
        comments_data = response_data[1].get('data', {}).get('children', [])
        
        comments = []
        for child in comments_data:
            if child.get('kind') != 't1':  # t1表示评论
                continue
            
            comment = self._parse_comment(child.get('data', {}))
            if comment:
                comments.append(comment)
        
        return comments
    
    def _parse_comment(self, comment_data: Dict, depth: int = 0) -> Optional[Dict[str, Any]]:
        """递归解析评论树
        
        Args:
            comment_data: 评论数据
            depth: 当前深度
            
        Returns:
            解析后的评论
        """
        if depth >= self.MAX_COMMENT_DEPTH:
            return None
        
        if comment_data.get('body') == '[deleted]' or comment_data.get('body') == '[removed]':
            return None
        
        comment = {
            'comment_id': comment_data.get('id'),
            'author': self.generate_hash(comment_data.get('author', 'deleted')),
            'body': comment_data.get('body', ''),
            'score': comment_data.get('score', 0),
            'created_utc': datetime.fromtimestamp(comment_data.get('created_utc', 0)),
            'is_submitter': comment_data.get('is_submitter', False),
            'depth': depth,
            'replies': []
        }
        
        # 递归处理子评论
        replies_data = comment_data.get('replies')
        if isinstance(replies_data, dict):
            for reply_child in replies_data.get('data', {}).get('children', []):
                if reply_child.get('kind') == 't1':
                    reply = self._parse_comment(reply_child.get('data', {}), depth + 1)
                    if reply:
                        comment['replies'].append(reply)
        
        return comment
    
    def _get_content_type(self, post_data: Dict) -> str:
        """判断内容类型
        
        Args:
            post_data: 帖子数据
            
        Returns:
            内容类型
        """
        if post_data.get('is_self'):
            return 'text'
        elif post_data.get('is_video'):
            return 'video'
        elif post_data.get('post_hint') == 'image':
            return 'image'
        elif post_data.get('post_hint') == 'link':
            return 'link'
        elif 'gallery_data' in post_data:
            return 'gallery'
        else:
            return 'other'
    
    def _extract_preview_images(self, post_data: Dict) -> List[str]:
        """提取预览图片
        
        Args:
            post_data: 帖子数据
            
        Returns:
            图片URL列表
        """
        images = []
        preview = post_data.get('preview', {})
        
        if preview and 'images' in preview:
            for img in preview['images']:
                if 'source' in img:
                    # Reddit图片URL需要HTML解码
                    url = img['source'].get('url', '').replace('&amp;', '&')
                    if url:
                        images.append(url)
        
        return images[:5]  # 最多返回5张图片
    
    def _calculate_hot_score(self, post_data: Dict) -> float:
        """计算热度得分（基于Reddit热度算法）
        
        Args:
            post_data: 帖子数据
            
        Returns:
            热度得分
        """
        score = post_data.get('score', 0)
        num_comments = post_data.get('num_comments', 0)
        created_utc = post_data.get('created_utc', 0)
        
        # 时间衰减因子（小时）
        hours_old = (datetime.now().timestamp() - created_utc) / 3600
        time_decay = max(1, hours_old ** 1.5)
        
        # 综合得分
        hot_score = (score + num_comments * 2) / time_decay
        
        # 奖项加成
        awards_count = len(post_data.get('all_awardings', []))
        hot_score *= (1 + awards_count * 0.1)
        
        return round(hot_score, 2)
    
    def _is_controversial(self, post_data: Dict) -> bool:
        """判断是否为争议内容
        
        Args:
            post_data: 帖子数据
            
        Returns:
            是否争议
        """
        upvote_ratio = post_data.get('upvote_ratio', 0)
        score = post_data.get('score', 0)
        num_comments = post_data.get('num_comments', 0)
        
        # 高投票量但支持率接近50%，或评论数远超点赞数
        is_controversial = (
            (score > 100 and 0.45 <= upvote_ratio <= 0.55) or
            (num_comments > score * 3 and score > 50)
        )
        
        return is_controversial
    
    async def aggregate_cross_subreddit(self, subreddits: List[str] = None,
                                       sort: str = 'hot', limit_per_sub: int = 20) -> List[Dict[str, Any]]:
        """跨版块内容聚合
        
        Args:
            subreddits: subreddit列表
            sort: 排序方式
            limit_per_sub: 每个subreddit的帖子数量限制
            
        Returns:
            聚合后的帖子列表
        """
        subreddits = subreddits or self.DEFAULT_SUBREDDITS
        
        # 并发获取多个subreddit的数据
        tasks = []
        for subreddit in subreddits:
            task = self.fetch_subreddit_posts(subreddit, sort, limit=limit_per_sub)
            tasks.append(task)
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 合并结果
        all_posts = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                logger.error(f"获取 r/{subreddits[i]} 失败: {result}")
                continue
            all_posts.extend(result)
        
        # 去重（基于post_id）
        seen_ids = set()
        unique_posts = []
        for post in all_posts:
            if post['post_id'] not in seen_ids:
                seen_ids.add(post['post_id'])
                unique_posts.append(post)
        
        # 按热度得分排序
        unique_posts.sort(key=lambda x: x['hot_score'], reverse=True)
        
        return unique_posts
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析数据（实现基类抽象方法）
        
        Args:
            html: HTML内容（Reddit使用API，此参数未使用）
            url: 页面URL
            
        Returns:
            解析后的数据列表
        """
        # Reddit使用API而非HTML解析，此方法仅为兼容基类
        return []
    
    async def crawl_reddit_hot(self, include_comments: bool = True) -> List[Dict[str, Any]]:
        """爬取Reddit热门内容主方法
        
        Args:
            include_comments: 是否包含评论
            
        Returns:
            爬取的数据
        """
        logger.info("开始爬取Reddit热门内容")
        
        # 获取跨版块聚合数据
        posts = await self.aggregate_cross_subreddit()
        
        if not posts:
            logger.warning("未获取到Reddit数据")
            return []
        
        # 获取评论（限制数量避免请求过多）
        if include_comments:
            for post in posts[:10]:  # 只获取前10个帖子的评论
                try:
                    comments = await self.fetch_post_comments(
                        post['subreddit'], 
                        post['post_id'],
                        limit=10
                    )
                    post['comments'] = comments
                except Exception as e:
                    logger.error(f"获取评论失败 {post['post_id']}: {e}")
                    post['comments'] = []
        
        # 格式化为统一的存储格式
        formatted_data = []
        for i, post in enumerate(posts):
            formatted_item = {
                'url_hash': self.generate_hash(post['permalink']),
                'title': post['title'],
                'url': post['permalink'],
                'rank': i + 1,
                'category': post.get('flair') or post['subreddit'],
                'source': 'reddit',
                'platform': 'reddit',
                'content': {
                    'post_id': post['post_id'],
                    'subreddit': post['subreddit'],
                    'body': post.get('content', ''),
                    'author_hash': post['author'],
                    'score': post['score'],
                    'upvote_ratio': post['upvote_ratio'],
                    'num_comments': post['num_comments'],
                    'content_type': post['content_type'],
                    'is_nsfw': post['is_nsfw'],
                    'is_controversial': post['is_controversial'],
                    'hot_score': post['hot_score'],
                    'awards': post['awards'],
                    'preview_images': post.get('preview_images', []),
                    'comments': post.get('comments', []),
                    'metadata': {
                        'is_video': post['is_video'],
                        'is_self': post['is_self'],
                        'is_crosspost': post['is_crosspost'],
                        'created_utc': post['created_utc'].isoformat()
                    }
                },
                'fetch_time': datetime.now().isoformat(),
                'update_time': datetime.now().isoformat(),
                'expire_at': (datetime.now() + timedelta(days=7)).isoformat()
            }
            formatted_data.append(formatted_item)
        
        # 存储数据
        if self.mongodb_client:
            await self.store(formatted_data)
        
        logger.info(f"成功爬取Reddit热门内容 {len(formatted_data)} 条")
        return formatted_data
    
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据到MongoDB
        
        Args:
            data: 待存储的数据
            
        Returns:
            存储是否成功
        """
        if not self.mongodb_client:
            logger.warning("MongoDB客户端未配置，跳过存储")
            return False
        
        try:
            collection = self.mongodb_client.get_collection('hot_topics')
            
            # 批量upsert操作
            operations = []
            for item in data:
                operations.append({
                    'updateOne': {
                        'filter': {'url_hash': item['url_hash']},
                        'update': {'$set': item},
                        'upsert': True
                    }
                })
            
            if operations:
                result = await collection.bulk_write(operations)
                logger.info(f"MongoDB存储完成: 插入{result.upserted_count}条，更新{result.modified_count}条")
                return True
                
        except Exception as e:
            logger.error(f"MongoDB存储失败: {e}")
            
        return False


# 使用示例
async def main():
    """测试Reddit爬虫"""
    # 需要配置Reddit API凭证
    spider = RedditHotSpider(
        client_id='your_client_id',
        client_secret='your_client_secret',
        user_agent='python:aipaper:v1.0 (by /u/your_username)'
    )
    
    async with spider:
        data = await spider.crawl_reddit_hot()
        print(f"爬取到 {len(data)} 条Reddit热门内容")
        
        # 打印示例数据
        if data:
            print(f"\n示例数据:")
            print(f"标题: {data[0]['title']}")
            print(f"热度得分: {data[0]['content']['hot_score']}")
            print(f"评论数: {data[0]['content']['num_comments']}")


if __name__ == '__main__':
    asyncio.run(main())