"""Quality scoring system for content evaluation"""

from typing import Dict, Any, Optional, List
import logging
import re
from datetime import datetime

from .base_cleaner import BaseCleaner

logger = logging.getLogger(__name__)


class QualityScorer(BaseCleaner):
    """Score content quality based on multiple dimensions
    
    Evaluates completeness, readability, information density, and originality
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize quality scorer
        
        Args:
            config: Configuration with options:
                - weights: Dictionary of dimension weights
                - min_content_length: Minimum content length for quality content (default: 100)
                - max_content_length: Maximum optimal content length (default: 10000)
                - generate_report: Generate detailed quality report (default: True)
        """
        super().__init__(config)
        
        # Default weights for quality dimensions (sum to 1.0)
        default_weights = {
            'completeness': 0.25,
            'readability': 0.25,
            'information_density': 0.25,
            'originality': 0.25
        }
        self.weights = self.config.get('weights', default_weights)
        
        self.min_content_length = self.config.get('min_content_length', 100)
        self.max_content_length = self.config.get('max_content_length', 10000)
        self.generate_report = self.config.get('generate_report', True)
        
        # Statistics
        self.stats = {
            'scored': 0,
            'total_score': 0,
            'high_quality': 0,  # Score >= 70
            'medium_quality': 0,  # Score 40-69
            'low_quality': 0  # Score < 40
        }
    
    def clean(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """Score content quality
        
        Args:
            data: Input data dictionary
            
        Returns:
            Data with quality score and report
        """
        # Calculate quality scores
        scores = self._calculate_quality_scores(data)
        
        # Calculate weighted total score
        total_score = self._calculate_total_score(scores)
        
        # Generate quality report if configured
        report = None
        if self.generate_report:
            report = self._generate_quality_report(scores, total_score, data)
        
        # Add to cleaned data
        if 'cleaned_data' not in data:
            data['cleaned_data'] = {}
        
        data['cleaned_data']['quality_score'] = round(total_score, 2)
        data['cleaned_data']['quality_dimensions'] = scores
        if report:
            data['cleaned_data']['quality_report'] = report
        
        # Update statistics
        self._update_statistics(total_score)
        
        return data
    
    def _calculate_quality_scores(self, data: Dict[str, Any]) -> Dict[str, float]:
        """Calculate individual quality dimension scores
        
        Args:
            data: Data dictionary
            
        Returns:
            Dictionary of dimension scores (0-100)
        """
        scores = {}
        
        # Completeness score
        scores['completeness'] = self._score_completeness(data)
        
        # Readability score
        scores['readability'] = self._score_readability(data)
        
        # Information density score
        scores['information_density'] = self._score_information_density(data)
        
        # Originality score
        scores['originality'] = self._score_originality(data)
        
        return scores
    
    def _score_completeness(self, data: Dict[str, Any]) -> float:
        """Score content completeness
        
        Args:
            data: Data dictionary
            
        Returns:
            Completeness score (0-100)
        """
        score = 0
        max_points = 100
        
        # Check for essential fields
        if 'title' in data and data['title']:
            title_len = len(str(data['title']))
            if title_len > 5:
                score += 15
            elif title_len > 0:
                score += 5
        
        # Check content
        content_text = self._extract_content_text(data)
        if content_text:
            content_len = len(content_text)
            
            # Length-based scoring
            if content_len >= self.min_content_length:
                score += 20
                
                # Additional points for optimal length
                if content_len >= 500:
                    score += 10
                if content_len >= 1000:
                    score += 10
                
                # Penalty for too long content
                if content_len > self.max_content_length:
                    score -= 5
        
        # Check for metadata
        if 'description' in data and data['description']:
            score += 10
        
        if 'excerpt' in data and data['excerpt']:
            score += 5
        
        # Check for structured data
        if 'content' in data and isinstance(data['content'], dict):
            # Has structured content
            score += 10
            
            # Check for comments/engagement
            if 'comments' in data['content']:
                score += 10
        
        # Check for keywords (if extracted)
        if 'cleaned_data' in data and 'keywords' in data['cleaned_data']:
            keywords = data['cleaned_data']['keywords']
            if len(keywords) >= 3:
                score += 10
        
        return min(score, max_points)
    
    def _score_readability(self, data: Dict[str, Any]) -> float:
        """Score content readability
        
        Args:
            data: Data dictionary
            
        Returns:
            Readability score (0-100)
        """
        content_text = self._extract_content_text(data)
        if not content_text:
            return 0
        
        score = 100  # Start with perfect score and deduct
        
        # Check sentence length
        sentences = re.split(r'[.!?。！？]', content_text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if sentences:
            avg_sentence_length = sum(len(s) for s in sentences) / len(sentences)
            
            # Optimal sentence length is 15-25 words/characters
            if avg_sentence_length > 100:
                score -= 20  # Very long sentences
            elif avg_sentence_length > 50:
                score -= 10  # Long sentences
            elif avg_sentence_length < 5:
                score -= 15  # Too short/fragmented
        
        # Check paragraph structure
        paragraphs = content_text.split('\n\n')
        if len(paragraphs) > 1:
            score += 0  # Good paragraph structure
        else:
            score -= 10  # No paragraph breaks
        
        # Check for special characters and formatting issues
        special_char_ratio = len(re.findall(r'[^\w\s\u4e00-\u9fff,.!?;:()（）。，！？；：]', content_text)) / max(len(content_text), 1)
        if special_char_ratio > 0.1:
            score -= 15  # Too many special characters
        
        # Check for ALL CAPS abuse
        uppercase_ratio = sum(1 for c in content_text if c.isupper()) / max(len(content_text), 1)
        if uppercase_ratio > 0.3:
            score -= 10
        
        # Check for repeated punctuation
        if re.search(r'[.!?]{3,}', content_text):
            score -= 5
        
        return max(0, score)
    
    def _score_information_density(self, data: Dict[str, Any]) -> float:
        """Score information density
        
        Args:
            data: Data dictionary
            
        Returns:
            Information density score (0-100)
        """
        content_text = self._extract_content_text(data)
        if not content_text:
            return 0
        
        score = 0
        
        # Word diversity (unique words ratio)
        words = re.findall(r'\w+|\u4e00-\u9fff', content_text.lower())
        if words:
            unique_ratio = len(set(words)) / len(words)
            score += unique_ratio * 30  # Up to 30 points for word diversity
        
        # Check for numbers and data points
        numbers = re.findall(r'\d+', content_text)
        if numbers:
            score += min(len(numbers) * 2, 20)  # Up to 20 points for data points
        
        # Check for structure (lists, bullet points)
        if re.search(r'^\s*[-*•]\s+', content_text, re.MULTILINE):
            score += 10  # Has lists/bullet points
        
        # Check for quotes or citations
        if '"' in content_text or '"' in content_text or '「' in content_text:
            score += 10  # Has quotes/citations
        
        # Keywords density (if available)
        if 'cleaned_data' in data and 'keywords' in data['cleaned_data']:
            keywords = data['cleaned_data']['keywords']
            if keywords:
                # Check if keywords appear in content
                keyword_count = sum(1 for kw in keywords if kw.lower() in content_text.lower())
                score += min(keyword_count * 3, 20)  # Up to 20 points
        
        # Length bonus for substantial content
        if len(content_text) > 500:
            score += 10
        
        return min(score, 100)
    
    def _score_originality(self, data: Dict[str, Any]) -> float:
        """Score content originality
        
        Args:
            data: Data dictionary
            
        Returns:
            Originality score (0-100)
        """
        score = 70  # Base score (assume original unless proven otherwise)
        
        # Check for duplicate detection results
        if 'cleaned_data' in data:
            if 'is_duplicate' in data['cleaned_data']:
                if data['cleaned_data']['is_duplicate']:
                    similarity = data['cleaned_data'].get('similarity', 0.5)
                    # Heavily penalize duplicates
                    score = max(0, (1 - similarity) * 50)
                else:
                    score = 90  # Confirmed original
            
            # Sentiment diversity bonus
            if 'sentiment' in data['cleaned_data']:
                sentiment = data['cleaned_data']['sentiment']
                if 'sentences' in sentiment:
                    # Varied sentiment across sentences indicates original thought
                    sentences = sentiment['sentences']
                    if len(sentences) > 3:
                        labels = [s['label'] for s in sentences]
                        unique_labels = len(set(labels))
                        if unique_labels > 1:
                            score += 10  # Sentiment variation bonus
        
        # Check for templated content patterns
        content_text = self._extract_content_text(data)
        if content_text:
            # Common template phrases that indicate low originality
            template_phrases = [
                '点击查看', '扫码关注', '转发本文', '原文链接',
                'click here', 'subscribe', 'follow us', 'read more'
            ]
            
            for phrase in template_phrases:
                if phrase in content_text.lower():
                    score -= 5
        
        return max(0, min(score, 100))
    
    def _calculate_total_score(self, scores: Dict[str, float]) -> float:
        """Calculate weighted total score
        
        Args:
            scores: Individual dimension scores
            
        Returns:
            Total quality score (0-100)
        """
        total = 0
        for dimension, score in scores.items():
            weight = self.weights.get(dimension, 0.25)
            total += score * weight
        
        return total
    
    def _generate_quality_report(self, scores: Dict[str, float], total_score: float, 
                                 data: Dict[str, Any]) -> Dict[str, Any]:
        """Generate detailed quality report
        
        Args:
            scores: Dimension scores
            total_score: Overall quality score
            data: Original data
            
        Returns:
            Quality report dictionary
        """
        # Determine quality level
        if total_score >= 70:
            level = 'high'
            recommendation = 'Content is of high quality and ready for use'
        elif total_score >= 40:
            level = 'medium'
            recommendation = 'Content is acceptable but could be improved'
        else:
            level = 'low'
            recommendation = 'Content needs significant improvement'
        
        # Identify weak areas
        weak_areas = []
        for dimension, score in scores.items():
            if score < 50:
                weak_areas.append(f"{dimension} ({score:.1f})")
        
        # Generate improvement suggestions
        suggestions = []
        
        if scores['completeness'] < 50:
            suggestions.append("Add more detailed content and metadata")
        
        if scores['readability'] < 50:
            suggestions.append("Improve sentence structure and formatting")
        
        if scores['information_density'] < 50:
            suggestions.append("Include more specific information and data points")
        
        if scores['originality'] < 50:
            suggestions.append("Ensure content is unique and not duplicated")
        
        report = {
            'total_score': round(total_score, 2),
            'quality_level': level,
            'dimension_scores': {k: round(v, 2) for k, v in scores.items()},
            'recommendation': recommendation,
            'weak_areas': weak_areas,
            'improvement_suggestions': suggestions,
            'evaluated_at': datetime.utcnow().isoformat()
        }
        
        return report
    
    def _extract_content_text(self, data: Dict[str, Any]) -> str:
        """Extract main text content
        
        Args:
            data: Data dictionary
            
        Returns:
            Combined text content
        """
        text_parts = []
        
        if 'title' in data and data['title']:
            text_parts.append(str(data['title']))
        
        if 'content' in data and data['content']:
            if isinstance(data['content'], str):
                text_parts.append(data['content'])
            elif isinstance(data['content'], dict):
                for key in ['description', 'text', 'body']:
                    if key in data['content'] and data['content'][key]:
                        text_parts.append(str(data['content'][key]))
        
        if 'description' in data and data['description']:
            text_parts.append(str(data['description']))
        
        return ' '.join(text_parts)
    
    def _update_statistics(self, score: float):
        """Update quality statistics
        
        Args:
            score: Quality score
        """
        self.stats['scored'] += 1
        self.stats['total_score'] += score
        
        if score >= 70:
            self.stats['high_quality'] += 1
        elif score >= 40:
            self.stats['medium_quality'] += 1
        else:
            self.stats['low_quality'] += 1
    
    def get_stats(self) -> Dict[str, Any]:
        """Get quality scoring statistics
        
        Returns:
            Statistics dictionary
        """
        stats = super().get_stats()
        stats['quality'] = self.stats.copy()
        
        if self.stats['scored'] > 0:
            stats['quality']['average_score'] = round(
                self.stats['total_score'] / self.stats['scored'], 2
            )
        
        return stats