"""Twitter(X)热点爬虫实现"""
import asyncio
import hashlib
import json
import logging
import re
from datetime import datetime, timedelta, timezone
from typing import Dict, Any, List, Optional, Tuple
from collections import Counter
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend

import aiohttp
from aiohttp import ClientTimeout

from .base_spider import BaseSpider

logger = logging.getLogger(__name__)


class TwitterHotSpider(BaseSpider):
    """Twitter热点爬虫"""
    
    # API端点配置
    TWITTER_API_BASE = "https://api.twitter.com/2"
    TWITTER_TRENDS_ENDPOINT = f"{TWITTER_API_BASE}/trends/place"
    TWITTER_SEARCH_ENDPOINT = f"{TWITTER_API_BASE}/tweets/search/recent"
    TWITTER_TWEETS_ENDPOINT = f"{TWITTER_API_BASE}/tweets"
    
    # 限流配置（毫秒）
    RATE_LIMIT = 60000  # 60秒间隔（每小时更新）
    MAX_CONCURRENT_REQUESTS = 5
    
    # 热门推文阈值
    DEFAULT_MIN_LIKES = 100
    DEFAULT_MIN_RETWEETS = 50
    
    def __init__(self, mongodb_client=None, redis_client=None, api_key: str = None):
        """初始化Twitter爬虫
        
        Args:
            mongodb_client: MongoDB客户端
            redis_client: Redis客户端（用于限流和缓存）
            api_key: Twitter API Bearer Token
        """
        super().__init__(name="TwitterHotSpider")
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
        self.api_key = api_key
        self.last_request_time = 0
        self.request_semaphore = asyncio.Semaphore(self.MAX_CONCURRENT_REQUESTS)
        
        # 敏感词库
        self.sensitive_words = set()
        self.nsfw_patterns = []
        
        # API调用统计
        self.api_call_count = 0
        self.api_quota_remaining = 450  # Twitter API v2 rate limit
        
        # 更新请求头
        if self.api_key:
            self.headers.update({
                'Authorization': f'Bearer {self.api_key}',
                'Content-Type': 'application/json',
                'User-Agent': 'AI-Writing-Platform/1.0'
            })
    
    def set_api_key(self, api_key: str):
        """设置API密钥
        
        Args:
            api_key: Bearer Token
        """
        self.api_key = api_key
        self.headers['Authorization'] = f'Bearer {api_key}'
    
    @staticmethod
    def encrypt_api_key(api_key: str, password: str) -> str:
        """加密API密钥
        
        Args:
            api_key: 原始API密钥
            password: 加密密码
            
        Returns:
            加密后的密钥（base64编码）
        """
        # 使用PBKDF2生成密钥
        kdf = PBKDF2HMAC(
            algorithm=hashes.SHA256(),
            length=32,
            salt=b'aipaper_salt_2024',
            iterations=100000,
            backend=default_backend()
        )
        key = base64.urlsafe_b64encode(kdf.derive(password.encode()))
        f = Fernet(key)
        encrypted = f.encrypt(api_key.encode())
        return base64.b64encode(encrypted).decode()
    
    @staticmethod
    def decrypt_api_key(encrypted_key: str, password: str) -> str:
        """解密API密钥
        
        Args:
            encrypted_key: 加密的密钥（base64编码）
            password: 解密密码
            
        Returns:
            原始API密钥
        """
        kdf = PBKDF2HMAC(
            algorithm=hashes.SHA256(),
            length=32,
            salt=b'aipaper_salt_2024',
            iterations=100000,
            backend=default_backend()
        )
        key = base64.urlsafe_b64encode(kdf.derive(password.encode()))
        f = Fernet(key)
        encrypted = base64.b64decode(encrypted_key.encode())
        decrypted = f.decrypt(encrypted)
        return decrypted.decode()
    
    async def verify_api_key(self) -> bool:
        """验证API密钥有效性
        
        Returns:
            密钥是否有效
        """
        if not self.api_key:
            logger.error("No API key provided")
            return False
        
        try:
            # 测试API连通性
            url = f"{self.TWITTER_API_BASE}/users/me"
            async with aiohttp.ClientSession() as session:
                async with session.get(url, headers=self.headers) as response:
                    if response.status == 200:
                        logger.info("API key verified successfully")
                        return True
                    elif response.status == 401:
                        logger.error("Invalid API key")
                        return False
                    else:
                        logger.error(f"API verification failed: {response.status}")
                        return False
        except Exception as e:
            logger.error(f"Error verifying API key: {e}")
            return False
    
    async def _rate_limit_check(self):
        """检查并执行限流"""
        current_time = asyncio.get_event_loop().time() * 1000
        time_since_last = current_time - self.last_request_time
        
        if time_since_last < self.RATE_LIMIT:
            wait_time = (self.RATE_LIMIT - time_since_last) / 1000
            logger.info(f"Rate limiting: waiting {wait_time:.2f} seconds")
            await asyncio.sleep(wait_time)
        
        self.last_request_time = asyncio.get_event_loop().time() * 1000
    
    async def _exponential_backoff_retry(self, func, *args, max_retries: int = 3, **kwargs):
        """指数退避重试机制
        
        Args:
            func: 要执行的函数
            max_retries: 最大重试次数
            
        Returns:
            函数执行结果
        """
        for attempt in range(max_retries):
            try:
                return await func(*args, **kwargs)
            except aiohttp.ClientResponseError as e:
                if e.status == 429:  # Too Many Requests
                    wait_time = 2 ** attempt * 60  # 指数退避
                    logger.warning(f"Rate limit hit, waiting {wait_time} seconds")
                    await asyncio.sleep(wait_time)
                elif e.status == 503:  # Service Unavailable
                    wait_time = 2 ** attempt * 10
                    logger.warning(f"Service unavailable, retrying in {wait_time} seconds")
                    await asyncio.sleep(wait_time)
                else:
                    raise
            except Exception as e:
                if attempt == max_retries - 1:
                    raise
                logger.warning(f"Retry attempt {attempt + 1}: {e}")
                await asyncio.sleep(2 ** attempt)
        
        return None
    
    async def fetch_global_trends(self, woeid: int = 1) -> List[Dict[str, Any]]:
        """获取全球或地区趋势
        
        Args:
            woeid: Where On Earth ID（1表示全球）
            
        Returns:
            趋势列表
        """
        await self._rate_limit_check()
        
        # Twitter API v2 doesn't have direct trends endpoint, use search instead
        # We'll search for popular topics
        trends = []
        
        try:
            # Get trending topics through search
            url = f"{self.TWITTER_SEARCH_ENDPOINT}"
            params = {
                'query': 'lang:en OR lang:zh -is:retweet',
                'max_results': 100,
                'sort_order': 'relevancy',
                'tweet.fields': 'created_at,public_metrics,lang,entities',
                'expansions': 'author_id'
            }
            
            async with self.request_semaphore:
                response = await self._exponential_backoff_retry(
                    self._make_api_request, url, params=params
                )
            
            if response and 'data' in response:
                # Extract hashtags and topics from tweets
                hashtag_counter = Counter()
                
                for tweet in response['data']:
                    if 'entities' in tweet and 'hashtags' in tweet['entities']:
                        for hashtag in tweet['entities']['hashtags']:
                            hashtag_counter[hashtag['tag']] += 1
                
                # Convert to trends format
                for idx, (tag, count) in enumerate(hashtag_counter.most_common(20), 1):
                    trends.append({
                        'name': f"#{tag}",
                        'tweet_volume': count * 1000,  # Estimated volume
                        'rank': idx,
                        'url': f"https://twitter.com/search?q=%23{tag}",
                        'promoted': False,
                        'timestamp': datetime.now(timezone.utc).isoformat()
                    })
                
                logger.info(f"Fetched {len(trends)} trending topics")
            
        except Exception as e:
            logger.error(f"Error fetching trends: {e}")
            # Fallback to cached data if available
            if self.redis_client:
                cached = await self._get_cached_trends()
                if cached:
                    return cached
        
        return trends
    
    async def fetch_hot_tweets(self, query: str = None, min_likes: int = None, 
                              min_retweets: int = None) -> List[Dict[str, Any]]:
        """获取热门推文
        
        Args:
            query: 搜索查询
            min_likes: 最少点赞数
            min_retweets: 最少转发数
            
        Returns:
            热门推文列表
        """
        await self._rate_limit_check()
        
        min_likes = min_likes or self.DEFAULT_MIN_LIKES
        min_retweets = min_retweets or self.DEFAULT_MIN_RETWEETS
        
        hot_tweets = []
        
        try:
            url = f"{self.TWITTER_SEARCH_ENDPOINT}"
            params = {
                'query': query or '-is:retweet has:images OR has:media',
                'max_results': 100,
                'sort_order': 'relevancy',
                'tweet.fields': 'created_at,public_metrics,lang,entities,possibly_sensitive',
                'expansions': 'author_id,attachments.media_keys',
                'media.fields': 'type,url,preview_image_url',
                'user.fields': 'name,username,verified,public_metrics'
            }
            
            async with self.request_semaphore:
                response = await self._exponential_backoff_retry(
                    self._make_api_request, url, params=params
                )
            
            if response and 'data' in response:
                tweets = response['data']
                users = {u['id']: u for u in response.get('includes', {}).get('users', [])}
                media = {m['media_key']: m for m in response.get('includes', {}).get('media', [])}
                
                for tweet in tweets:
                    metrics = tweet.get('public_metrics', {})
                    
                    # Filter by engagement thresholds
                    if (metrics.get('like_count', 0) >= min_likes or 
                        metrics.get('retweet_count', 0) >= min_retweets):
                        
                        # Get author info
                        author = users.get(tweet.get('author_id'), {})
                        
                        # Get media info
                        media_items = []
                        if 'attachments' in tweet and 'media_keys' in tweet['attachments']:
                            for key in tweet['attachments']['media_keys']:
                                if key in media:
                                    media_items.append(media[key])
                        
                        hot_tweet = {
                            'id': tweet['id'],
                            'text': tweet['text'],
                            'created_at': tweet.get('created_at'),
                            'lang': tweet.get('lang'),
                            'author': {
                                'id': author.get('id'),
                                'name': author.get('name'),
                                'username': author.get('username'),
                                'verified': author.get('verified', False),
                                'followers_count': author.get('public_metrics', {}).get('followers_count', 0)
                            },
                            'metrics': metrics,
                            'engagement_rate': self._calculate_engagement_rate(metrics, author.get('public_metrics', {})),
                            'media': media_items,
                            'hashtags': self._extract_hashtags(tweet),
                            'possibly_sensitive': tweet.get('possibly_sensitive', False),
                            'url': f"https://twitter.com/{author.get('username')}/status/{tweet['id']}"
                        }
                        
                        hot_tweets.append(hot_tweet)
                
                # Sort by engagement
                hot_tweets.sort(key=lambda x: x['metrics'].get('like_count', 0) + 
                                             x['metrics'].get('retweet_count', 0) * 2, 
                               reverse=True)
                
                logger.info(f"Fetched {len(hot_tweets)} hot tweets")
            
        except Exception as e:
            logger.error(f"Error fetching hot tweets: {e}")
        
        return hot_tweets[:50]  # Return top 50 hot tweets
    
    def _extract_hashtags(self, tweet: Dict[str, Any]) -> List[str]:
        """提取推文中的Hashtag
        
        Args:
            tweet: 推文数据
            
        Returns:
            Hashtag列表
        """
        hashtags = []
        
        if 'entities' in tweet and 'hashtags' in tweet['entities']:
            hashtags = [h['tag'] for h in tweet['entities']['hashtags']]
        
        # Also extract from text using regex
        text_hashtags = re.findall(r'#(\w+)', tweet.get('text', ''))
        hashtags.extend([tag for tag in text_hashtags if tag not in hashtags])
        
        return hashtags
    
    def _calculate_engagement_rate(self, tweet_metrics: Dict[str, Any], 
                                  author_metrics: Dict[str, Any]) -> float:
        """计算互动率
        
        Args:
            tweet_metrics: 推文统计数据
            author_metrics: 作者统计数据
            
        Returns:
            互动率（百分比）
        """
        total_engagement = (
            tweet_metrics.get('like_count', 0) +
            tweet_metrics.get('retweet_count', 0) +
            tweet_metrics.get('reply_count', 0) +
            tweet_metrics.get('quote_count', 0)
        )
        
        followers = author_metrics.get('followers_count', 1)
        if followers > 0:
            return (total_engagement / followers) * 100
        return 0
    
    async def analyze_hashtags(self, tweets: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析Hashtag趋势
        
        Args:
            tweets: 推文列表
            
        Returns:
            Hashtag分析结果
        """
        hashtag_stats = Counter()
        hashtag_first_seen = {}
        hashtag_associations = {}
        
        for tweet in tweets:
            hashtags = tweet.get('hashtags', [])
            created_at = tweet.get('created_at', '')
            
            for tag in hashtags:
                hashtag_stats[tag] += 1
                
                # Track first appearance
                if tag not in hashtag_first_seen:
                    hashtag_first_seen[tag] = created_at
                
                # Track associations
                if tag not in hashtag_associations:
                    hashtag_associations[tag] = Counter()
                
                for other_tag in hashtags:
                    if other_tag != tag:
                        hashtag_associations[tag][other_tag] += 1
        
        # Generate word cloud data
        word_cloud_data = [
            {'text': tag, 'value': count}
            for tag, count in hashtag_stats.most_common(50)
        ]
        
        return {
            'top_hashtags': hashtag_stats.most_common(20),
            'total_unique': len(hashtag_stats),
            'word_cloud_data': word_cloud_data,
            'first_seen': hashtag_first_seen,
            'associations': {
                tag: assoc.most_common(5)
                for tag, assoc in list(hashtag_associations.items())[:10]
            },
            'trending_score': self._calculate_trending_score(hashtag_stats, hashtag_first_seen)
        }
    
    def _calculate_trending_score(self, hashtag_stats: Counter, 
                                 first_seen: Dict[str, str]) -> List[Dict[str, Any]]:
        """计算Hashtag趋势分数
        
        Args:
            hashtag_stats: Hashtag统计
            first_seen: 首次出现时间
            
        Returns:
            趋势分数列表
        """
        trending = []
        now = datetime.now(timezone.utc)
        
        for tag, count in hashtag_stats.most_common(10):
            first_time = datetime.fromisoformat(first_seen[tag].replace('Z', '+00:00'))
            hours_ago = (now - first_time).total_seconds() / 3600
            
            # Calculate trending score (newer + more mentions = higher score)
            if hours_ago > 0:
                score = count / (hours_ago ** 0.5)
            else:
                score = count * 10  # Very new, high score
            
            trending.append({
                'hashtag': tag,
                'count': count,
                'hours_ago': round(hours_ago, 1),
                'trending_score': round(score, 2)
            })
        
        trending.sort(key=lambda x: x['trending_score'], reverse=True)
        return trending
    
    async def collect_interaction_stats(self, tweets: List[Dict[str, Any]]) -> Dict[str, Any]:
        """收集用户互动统计
        
        Args:
            tweets: 推文列表
            
        Returns:
            互动统计数据
        """
        total_likes = 0
        total_retweets = 0
        total_replies = 0
        total_quotes = 0
        engagement_rates = []
        
        high_impact_tweets = []
        
        for tweet in tweets:
            metrics = tweet.get('metrics', {})
            
            likes = metrics.get('like_count', 0)
            retweets = metrics.get('retweet_count', 0)
            replies = metrics.get('reply_count', 0)
            quotes = metrics.get('quote_count', 0)
            
            total_likes += likes
            total_retweets += retweets
            total_replies += replies
            total_quotes += quotes
            
            if 'engagement_rate' in tweet:
                engagement_rates.append(tweet['engagement_rate'])
            
            # Identify high impact tweets
            total_engagement = likes + retweets * 2 + replies + quotes * 1.5
            if total_engagement > 10000:  # High impact threshold
                high_impact_tweets.append({
                    'id': tweet['id'],
                    'text': tweet['text'][:100] + '...',
                    'total_engagement': total_engagement,
                    'url': tweet.get('url')
                })
        
        tweet_count = len(tweets)
        
        return {
            'total_tweets': tweet_count,
            'total_interactions': {
                'likes': total_likes,
                'retweets': total_retweets,
                'replies': total_replies,
                'quotes': total_quotes,
                'total': total_likes + total_retweets + total_replies + total_quotes
            },
            'average_interactions': {
                'likes': total_likes / tweet_count if tweet_count > 0 else 0,
                'retweets': total_retweets / tweet_count if tweet_count > 0 else 0,
                'replies': total_replies / tweet_count if tweet_count > 0 else 0,
                'quotes': total_quotes / tweet_count if tweet_count > 0 else 0
            },
            'engagement_rate': {
                'average': sum(engagement_rates) / len(engagement_rates) if engagement_rates else 0,
                'max': max(engagement_rates) if engagement_rates else 0,
                'min': min(engagement_rates) if engagement_rates else 0
            },
            'high_impact_tweets': sorted(high_impact_tweets, 
                                        key=lambda x: x['total_engagement'], 
                                        reverse=True)[:10],
            'virality_score': self._calculate_virality_score(tweets)
        }
    
    def _calculate_virality_score(self, tweets: List[Dict[str, Any]]) -> float:
        """计算病毒传播度分数
        
        Args:
            tweets: 推文列表
            
        Returns:
            病毒传播度分数（0-100）
        """
        if not tweets:
            return 0
        
        scores = []
        for tweet in tweets:
            metrics = tweet.get('metrics', {})
            
            # Virality factors
            retweet_ratio = metrics.get('retweet_count', 0) / (metrics.get('like_count', 1))
            quote_ratio = metrics.get('quote_count', 0) / (metrics.get('like_count', 1))
            
            # Higher retweet and quote ratios indicate virality
            score = min(100, (retweet_ratio * 50 + quote_ratio * 30) * 10)
            scores.append(score)
        
        return sum(scores) / len(scores)
    
    def convert_to_local_time(self, utc_time_str: str, timezone_offset: int = 8) -> str:
        """转换UTC时间到本地时间
        
        Args:
            utc_time_str: UTC时间字符串
            timezone_offset: 时区偏移（小时），默认东八区
            
        Returns:
            本地时间字符串
        """
        try:
            utc_time = datetime.fromisoformat(utc_time_str.replace('Z', '+00:00'))
            local_time = utc_time + timedelta(hours=timezone_offset)
            
            # Format as relative time
            now = datetime.now(timezone.utc) + timedelta(hours=timezone_offset)
            delta = now - local_time
            
            if delta.days > 0:
                if delta.days == 1:
                    return "1天前"
                else:
                    return f"{delta.days}天前"
            elif delta.seconds > 3600:
                hours = delta.seconds // 3600
                if hours == 1:
                    return "1小时前"
                else:
                    return f"{hours}小时前"
            elif delta.seconds > 60:
                minutes = delta.seconds // 60
                return f"{minutes}分钟前"
            else:
                return "刚刚"
        except Exception as e:
            logger.error(f"Error converting time: {e}")
            return utc_time_str
    
    def filter_sensitive_content(self, tweets: List[Dict[str, Any]]) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        """过滤敏感内容
        
        Args:
            tweets: 原始推文列表
            
        Returns:
            (安全推文列表, 被过滤推文列表)
        """
        safe_tweets = []
        filtered_tweets = []
        
        for tweet in tweets:
            content_level = self._classify_content(tweet)
            
            if content_level == 'blocked':
                filtered_tweets.append({
                    **tweet,
                    'filter_reason': 'sensitive_content',
                    'content_level': content_level
                })
            elif content_level == 'warning':
                safe_tweets.append({
                    **tweet,
                    'content_warning': True,
                    'content_level': content_level
                })
            else:
                safe_tweets.append({
                    **tweet,
                    'content_level': content_level
                })
        
        logger.info(f"Filtered {len(filtered_tweets)} sensitive tweets")
        return safe_tweets, filtered_tweets
    
    def _classify_content(self, tweet: Dict[str, Any]) -> str:
        """分类内容安全级别
        
        Args:
            tweet: 推文数据
            
        Returns:
            'safe' | 'warning' | 'blocked'
        """
        # Check Twitter's own sensitive flag
        if tweet.get('possibly_sensitive', False):
            return 'warning'
        
        text = tweet.get('text', '').lower()
        
        # Check against sensitive words
        for word in self.sensitive_words:
            if word in text:
                return 'blocked'
        
        # Check NSFW patterns
        for pattern in self.nsfw_patterns:
            if re.search(pattern, text, re.IGNORECASE):
                return 'warning'
        
        return 'safe'
    
    def load_sensitive_words(self, words: List[str]):
        """加载敏感词库
        
        Args:
            words: 敏感词列表
        """
        self.sensitive_words = set(words)
        logger.info(f"Loaded {len(self.sensitive_words)} sensitive words")
    
    def add_nsfw_pattern(self, pattern: str):
        """添加NSFW检测正则表达式
        
        Args:
            pattern: 正则表达式模式
        """
        try:
            compiled = re.compile(pattern, re.IGNORECASE)
            self.nsfw_patterns.append(compiled)
        except re.error as e:
            logger.error(f"Invalid regex pattern: {e}")
    
    async def _make_api_request(self, url: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
        """发起API请求
        
        Args:
            url: API端点
            params: 请求参数
            
        Returns:
            响应数据
        """
        if not self.api_key:
            raise ValueError("API key not set")
        
        self.api_call_count += 1
        
        async with aiohttp.ClientSession() as session:
            async with session.get(url, headers=self.headers, params=params) as response:
                if response.status == 401:
                    raise aiohttp.ClientResponseError(
                        request_info=response.request_info,
                        history=response.history,
                        status=401,
                        message="Unauthorized: Check API key"
                    )
                
                response.raise_for_status()
                
                # Update rate limit info from headers
                self.api_quota_remaining = int(response.headers.get('x-rate-limit-remaining', 450))
                
                return await response.json()
    
    async def _get_cached_trends(self) -> Optional[List[Dict[str, Any]]]:
        """获取缓存的趋势数据
        
        Returns:
            缓存的趋势数据
        """
        if not self.redis_client:
            return None
        
        try:
            cached = await self.redis_client.get('twitter:trends:global')
            if cached:
                return json.loads(cached)
        except Exception as e:
            logger.error(f"Error getting cached trends: {e}")
        
        return None
    
    async def _cache_trends(self, trends: List[Dict[str, Any]], ttl: int = 3600):
        """缓存趋势数据
        
        Args:
            trends: 趋势数据
            ttl: 缓存时间（秒）
        """
        if not self.redis_client:
            return
        
        try:
            await self.redis_client.setex(
                'twitter:trends:global',
                ttl,
                json.dumps(trends)
            )
        except Exception as e:
            logger.error(f"Error caching trends: {e}")
    
    async def fetch_hot_topics(self) -> List[Dict[str, Any]]:
        """获取热点话题（实现BaseSpider的抽象方法）
        
        Returns:
            热点话题列表
        """
        if not await self.verify_api_key():
            logger.error("API key verification failed")
            return []
        
        # Fetch trends
        trends = await self.fetch_global_trends()
        
        # Fetch hot tweets
        hot_tweets = await self.fetch_hot_tweets()
        
        # Filter sensitive content
        safe_tweets, _ = self.filter_sensitive_content(hot_tweets)
        
        # Analyze hashtags
        hashtag_analysis = await self.analyze_hashtags(safe_tweets)
        
        # Collect interaction stats
        interaction_stats = await self.collect_interaction_stats(safe_tweets)
        
        # Combine all data
        hot_topics = []
        
        for idx, trend in enumerate(trends[:20], 1):
            hot_topics.append({
                'platform': 'twitter',
                'rank': idx,
                'title': trend['name'],
                'heat_score': trend.get('tweet_volume', 0),
                'url': trend['url'],
                'category': 'trending',
                'created_at': datetime.now(timezone.utc).isoformat(),
                'extra_data': {
                    'promoted': trend.get('promoted', False),
                    'tweet_count': trend.get('tweet_volume', 0)
                }
            })
        
        # Add hot tweets as topics
        for idx, tweet in enumerate(safe_tweets[:10], len(trends) + 1):
            hot_topics.append({
                'platform': 'twitter',
                'rank': idx,
                'title': tweet['text'][:100],
                'heat_score': tweet['metrics'].get('like_count', 0),
                'url': tweet['url'],
                'category': 'hot_tweet',
                'created_at': tweet['created_at'],
                'extra_data': {
                    'author': tweet['author']['username'],
                    'metrics': tweet['metrics'],
                    'language': tweet.get('lang'),
                    'hashtags': tweet.get('hashtags', [])
                }
            })
        
        # Cache the results
        if self.redis_client:
            await self._cache_trends(hot_topics)
        
        # Store analysis results
        if self.mongodb_client:
            await self._store_analysis_results({
                'timestamp': datetime.now(timezone.utc).isoformat(),
                'hashtag_analysis': hashtag_analysis,
                'interaction_stats': interaction_stats,
                'api_calls': self.api_call_count,
                'api_quota_remaining': self.api_quota_remaining
            })
        
        return hot_topics
    
    async def _store_analysis_results(self, analysis: Dict[str, Any]):
        """存储分析结果
        
        Args:
            analysis: 分析数据
        """
        if not self.mongodb_client:
            return
        
        try:
            db = self.mongodb_client['crawler_db']
            collection = db['twitter_analysis']
            await collection.insert_one(analysis)
            logger.info("Stored analysis results to MongoDB")
        except Exception as e:
            logger.error(f"Error storing analysis results: {e}")
    
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面内容（实现BaseSpider的抽象方法）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析后的数据列表
        """
        # Twitter API returns JSON, not HTML
        try:
            data = json.loads(html)
            return data.get('data', [])
        except json.JSONDecodeError:
            logger.error("Failed to parse response as JSON")
            return []