"""Comment processing and analysis implementation."""

import re
import logging
from typing import Dict, Any, List, Optional, Tuple
from collections import Counter, defaultdict
from datetime import datetime
import jieba
import jieba.analyse
from textstat import flesch_reading_ease

logger = logging.getLogger(__name__)


class CommentProcessor:
    """Process and analyze comments for hot topic content generation."""
    
    def __init__(self):
        """Initialize comment processor."""
        # Initialize jieba for Chinese text processing
        jieba.initialize()
        
        # Sentiment keywords (simplified approach)
        self.positive_keywords = {
            "好", "棒", "赞", "支持", "喜欢", "优秀", "厉害", "不错", "满意", "开心",
            "高兴", "激动", "感动", "温暖", "美好", "正确", "同意", "认同", "佩服"
        }
        
        self.negative_keywords = {
            "差", "烂", "垃圾", "讨厌", "反对", "失望", "愤怒", "生气", "难过", "悲伤",
            "糟糕", "恶心", "无语", "郁闷", "不满", "批评", "谴责", "抗议", "抵制"
        }
        
        # Stop words for keyword extraction
        self.stop_words = {
            "的", "了", "在", "是", "我", "有", "和", "就", "不", "人", "都", "一", "一个",
            "上", "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好",
            "自己", "这", "那", "里", "为", "把", "被", "从", "跟", "与", "向", "往", "给"
        }
    
    async def process_comments(self, comments: List[Dict[str, Any]]) -> Dict[str, Any]:
        """Process and analyze comments comprehensively.
        
        Args:
            comments: List of comment dictionaries
            
        Returns:
            Processed comments analysis
        """
        if not comments:
            return {
                "total_count": 0,
                "sentiment_distribution": {"positive": 0, "negative": 0, "neutral": 0},
                "key_viewpoints": [],
                "top_comments": [],
                "keyword_frequency": {},
                "quality_score": 0.0
            }
        
        logger.info(f"Processing {len(comments)} comments")
        
        # Basic statistics
        total_count = len(comments)
        
        # Sentiment analysis
        sentiment_results = [self._analyze_sentiment(comment.get("content", "")) for comment in comments]
        sentiment_distribution = self._calculate_sentiment_distribution(sentiment_results)
        
        # Extract key viewpoints
        key_viewpoints = await self._extract_key_viewpoints(comments)
        
        # Select top quality comments
        top_comments = self._select_top_comments(comments, limit=5)
        
        # Keyword frequency analysis
        keyword_frequency = self._analyze_keyword_frequency(comments)
        
        # Calculate overall quality score
        quality_score = self._calculate_quality_score(comments, sentiment_results)
        
        # Detect discussion themes
        themes = self._detect_themes(comments)
        
        return {
            "total_count": total_count,
            "sentiment_distribution": sentiment_distribution,
            "key_viewpoints": key_viewpoints,
            "top_comments": top_comments,
            "keyword_frequency": keyword_frequency,
            "quality_score": quality_score,
            "themes": themes,
            "processed_at": datetime.utcnow().isoformat()
        }
    
    def _analyze_sentiment(self, text: str) -> str:
        """Analyze sentiment of a single text.
        
        Args:
            text: Text to analyze
            
        Returns:
            Sentiment label: 'positive', 'negative', or 'neutral'
        """
        if not text:
            return "neutral"
        
        text = text.lower()
        
        # Count positive and negative keywords
        positive_count = sum(1 for word in self.positive_keywords if word in text)
        negative_count = sum(1 for word in self.negative_keywords if word in text)
        
        # Simple rule-based classification
        if positive_count > negative_count:
            return "positive"
        elif negative_count > positive_count:
            return "negative"
        else:
            return "neutral"
    
    def _calculate_sentiment_distribution(self, sentiment_results: List[str]) -> Dict[str, float]:
        """Calculate sentiment distribution percentages.
        
        Args:
            sentiment_results: List of sentiment labels
            
        Returns:
            Sentiment distribution dictionary
        """
        if not sentiment_results:
            return {"positive": 0.0, "negative": 0.0, "neutral": 0.0}
        
        counter = Counter(sentiment_results)
        total = len(sentiment_results)
        
        return {
            "positive": round(counter.get("positive", 0) / total * 100, 1),
            "negative": round(counter.get("negative", 0) / total * 100, 1),
            "neutral": round(counter.get("neutral", 0) / total * 100, 1)
        }
    
    async def _extract_key_viewpoints(self, comments: List[Dict[str, Any]], limit: int = 10) -> List[Dict[str, Any]]:
        """Extract key viewpoints from comments.
        
        Args:
            comments: List of comment dictionaries
            limit: Maximum number of viewpoints to extract
            
        Returns:
            List of key viewpoints with metadata
        """
        viewpoints = []
        
        # Group similar comments by keywords
        keyword_groups = defaultdict(list)
        
        for comment in comments:
            content = comment.get("content", "")
            if not content or len(content) < 10:  # Skip very short comments
                continue
            
            # Extract keywords
            keywords = jieba.analyse.extract_tags(content, topK=3, withWeight=False)
            keywords = [kw for kw in keywords if kw not in self.stop_words]
            
            if keywords:
                # Use the most important keyword as group key
                group_key = keywords[0]
                keyword_groups[group_key].append({
                    "content": content,
                    "likes": comment.get("likes", 0),
                    "replies": comment.get("replies", 0),
                    "keywords": keywords
                })
        
        # Select representative viewpoints from each group
        for keyword, group_comments in keyword_groups.items():
            if len(group_comments) < 2:  # Skip groups with too few comments
                continue
            
            # Sort by engagement (likes + replies)
            group_comments.sort(
                key=lambda x: x["likes"] + x["replies"], 
                reverse=True
            )
            
            # Take the most engaged comment as representative
            representative = group_comments[0]
            
            viewpoints.append({
                "keyword": keyword,
                "representative_comment": representative["content"],
                "support_count": len(group_comments),
                "engagement_score": representative["likes"] + representative["replies"],
                "related_keywords": representative["keywords"]
            })
        
        # Sort by support count and engagement
        viewpoints.sort(
            key=lambda x: (x["support_count"], x["engagement_score"]), 
            reverse=True
        )
        
        return viewpoints[:limit]
    
    def _select_top_comments(self, comments: List[Dict[str, Any]], limit: int = 5) -> List[Dict[str, Any]]:
        """Select top quality comments based on multiple criteria.
        
        Args:
            comments: List of comment dictionaries
            limit: Maximum number of comments to select
            
        Returns:
            List of top quality comments
        """
        if not comments:
            return []
        
        # Calculate quality score for each comment
        scored_comments = []
        
        for comment in comments:
            content = comment.get("content", "")
            if not content or len(content) < 5:
                continue
            
            # Quality factors
            likes = comment.get("likes", 0)
            replies = comment.get("replies", 0)
            content_length = len(content)
            
            # Calculate quality score
            engagement_score = likes + replies * 2  # Replies are more valuable
            length_score = min(content_length / 100, 2.0)  # Normalize length, cap at 2.0
            
            # Penalize very short or very long comments
            if content_length < 10:
                length_penalty = 0.5
            elif content_length > 500:
                length_penalty = 0.8
            else:
                length_penalty = 1.0
            
            quality_score = (engagement_score * 0.7 + length_score * 0.3) * length_penalty
            
            scored_comments.append({
                **comment,
                "quality_score": quality_score
            })
        
        # Sort by quality score and return top comments
        scored_comments.sort(key=lambda x: x["quality_score"], reverse=True)
        
        return scored_comments[:limit]
    
    def _analyze_keyword_frequency(self, comments: List[Dict[str, Any]], top_k: int = 20) -> Dict[str, int]:
        """Analyze keyword frequency across all comments.
        
        Args:
            comments: List of comment dictionaries
            top_k: Number of top keywords to return
            
        Returns:
            Dictionary of keyword frequencies
        """
        all_text = " ".join([
            comment.get("content", "") 
            for comment in comments 
            if comment.get("content")
        ])
        
        if not all_text:
            return {}
        
        # Extract keywords with frequency
        keywords = jieba.analyse.extract_tags(all_text, topK=top_k*2, withWeight=True)
        
        # Filter out stop words and convert to frequency dict
        keyword_freq = {}
        for keyword, weight in keywords:
            if keyword not in self.stop_words and len(keyword) > 1:
                # Convert weight to approximate frequency
                keyword_freq[keyword] = int(weight * 100)
        
        # Return top_k keywords
        sorted_keywords = sorted(keyword_freq.items(), key=lambda x: x[1], reverse=True)
        return dict(sorted_keywords[:top_k])
    
    def _calculate_quality_score(self, comments: List[Dict[str, Any]], sentiment_results: List[str]) -> float:
        """Calculate overall quality score of the comment set.
        
        Args:
            comments: List of comment dictionaries
            sentiment_results: List of sentiment analysis results
            
        Returns:
            Quality score between 0.0 and 1.0
        """
        if not comments:
            return 0.0
        
        # Factors for quality assessment
        total_engagement = sum(
            comment.get("likes", 0) + comment.get("replies", 0) 
            for comment in comments
        )
        
        avg_length = sum(
            len(comment.get("content", "")) 
            for comment in comments
        ) / len(comments)
        
        # Sentiment diversity (balanced discussion is better)
        sentiment_counter = Counter(sentiment_results)
        sentiment_entropy = self._calculate_entropy(list(sentiment_counter.values()))
        
        # Normalize scores
        engagement_score = min(total_engagement / (len(comments) * 10), 1.0)  # Normalize by expected engagement
        length_score = min(avg_length / 100, 1.0)  # Normalize by expected length
        diversity_score = sentiment_entropy / 1.585  # Max entropy for 3 categories is log2(3) ≈ 1.585
        
        # Weighted combination
        quality_score = (
            engagement_score * 0.4 + 
            length_score * 0.3 + 
            diversity_score * 0.3
        )
        
        return round(quality_score, 3)
    
    def _calculate_entropy(self, values: List[int]) -> float:
        """Calculate entropy of a list of values.
        
        Args:
            values: List of integer values
            
        Returns:
            Entropy value
        """
        if not values or sum(values) == 0:
            return 0.0
        
        total = sum(values)
        probabilities = [v / total for v in values if v > 0]
        
        entropy = -sum(p * (p.bit_length() - 1) for p in probabilities if p > 0)
        return entropy
    
    def _detect_themes(self, comments: List[Dict[str, Any]], min_support: int = 3) -> List[Dict[str, Any]]:
        """Detect discussion themes from comments.
        
        Args:
            comments: List of comment dictionaries
            min_support: Minimum number of comments to form a theme
            
        Returns:
            List of detected themes
        """
        if len(comments) < min_support:
            return []
        
        # Extract all keywords from comments
        all_keywords = []
        for comment in comments:
            content = comment.get("content", "")
            if content:
                keywords = jieba.analyse.extract_tags(content, topK=5, withWeight=False)
                keywords = [kw for kw in keywords if kw not in self.stop_words and len(kw) > 1]
                all_keywords.extend(keywords)
        
        # Find frequent keyword combinations
        keyword_counter = Counter(all_keywords)
        frequent_keywords = [kw for kw, count in keyword_counter.items() if count >= min_support]
        
        # Group keywords into themes (simplified approach)
        themes = []
        for keyword in frequent_keywords[:10]:  # Top 10 keywords
            related_comments = [
                comment for comment in comments
                if keyword in comment.get("content", "")
            ]
            
            if len(related_comments) >= min_support:
                themes.append({
                    "theme_keyword": keyword,
                    "frequency": keyword_counter[keyword],
                    "support_comments": len(related_comments),
                    "sample_comments": [
                        comment.get("content", "")[:100] + "..." 
                        if len(comment.get("content", "")) > 100 
                        else comment.get("content", "")
                        for comment in related_comments[:3]
                    ]
                })
        
        # Sort themes by frequency
        themes.sort(key=lambda x: x["frequency"], reverse=True)
        
        return themes
    
    def filter_high_quality_comments(
        self,
        comments: List[Dict[str, Any]], 
        min_length: int = 10,
        min_engagement: int = 1
    ) -> List[Dict[str, Any]]:
        """Filter comments based on quality criteria.
        
        Args:
            comments: List of comment dictionaries
            min_length: Minimum content length
            min_engagement: Minimum likes + replies
            
        Returns:
            Filtered list of high-quality comments
        """
        filtered_comments = []
        
        for comment in comments:
            content = comment.get("content", "")
            likes = comment.get("likes", 0)
            replies = comment.get("replies", 0)
            
            # Apply filters
            if (len(content) >= min_length and 
                (likes + replies) >= min_engagement and
                not self._is_spam_comment(content)):
                
                filtered_comments.append(comment)
        
        return filtered_comments
    
    def _is_spam_comment(self, content: str) -> bool:
        """Check if a comment is likely spam.
        
        Args:
            content: Comment content
            
        Returns:
            True if likely spam, False otherwise
        """
        if not content:
            return True
        
        # Simple spam detection rules
        spam_patterns = [
            r'(.)\1{4,}',  # Repeated characters
            r'[^\u4e00-\u9fff\w\s.,!?，。！？]',  # Non-Chinese/alphanumeric characters
            r'(微信|QQ|电话|联系|加我)',  # Contact information
        ]
        
        for pattern in spam_patterns:
            if re.search(pattern, content):
                return True
        
        # Check for very short repetitive content
        if len(set(content)) < len(content) * 0.3:  # Less than 30% unique characters
            return True
        
        return False