"""
Text structure analysis module.
Analyzes linguistic structure, frequency statistics, and readability metrics.
"""

import re
import string
import math
from collections import Counter, defaultdict
from typing import Dict, List, Tuple, Any, Optional
from dataclasses import dataclass

@dataclass
class TextStatistics:
    """Container for text structure statistics."""
    total_characters: int = 0
    total_words: int = 0
    total_sentences: int = 0
    total_lines: int = 0
    avg_sentence_length: float = 0.0
    avg_word_length: float = 0.0
    vocabulary_size: int = 0
    lexical_diversity: float = 0.0
    sentence_length_distribution: Dict[str, float] = None
    word_frequency: Dict[str, int] = None
    pos_distribution: Dict[str, int] = None
    complexity_score: float = 0.0
    readability_metrics: Dict[str, float] = None

class TextStructureAnalyzer:
    """Analyzes the structural and linguistic properties of text."""
    
    def __init__(self, config):
        self.config = config
        self.stats = TextStatistics()
        
    def analyze(self, text: str) -> Dict[str, Any]:
        """Perform comprehensive text structure analysis."""
        
        # Basic text processing
        sentences = self._extract_sentences(text)
        words = self._extract_words(text)
        lines = text.split('\n')
        
        # Calculate basic statistics
        self.stats.total_characters = len(text)
        self.stats.total_words = len(words)
        self.stats.total_sentences = len(sentences)
        self.stats.total_lines = len(lines)
        
        # Calculate averages
        if sentences:
            sentence_lengths = [len(s.split()) for s in sentences if s.strip()]
            self.stats.avg_sentence_length = sum(sentence_lengths) / len(sentence_lengths) if sentence_lengths else 0
            
        if words:
            word_lengths = [len(w) for w in words]
            self.stats.avg_word_length = sum(word_lengths) / len(word_lengths)
            
        # Vocabulary analysis
        word_counter = Counter(words)
        self.stats.vocabulary_size = len(word_counter)
        self.stats.lexical_diversity = len(word_counter) / len(words) if words else 0
        self.stats.word_frequency = dict(word_counter.most_common(50))
        
        # Sentence length distribution
        self.stats.sentence_length_distribution = self._analyze_sentence_length_distribution(sentences)
        
        # Complexity analysis
        self.stats.complexity_score = self._calculate_complexity_score(text, words, sentences)
        
        # Readability metrics
        self.stats.readability_metrics = self._calculate_readability_metrics(text, words, sentences)
        
        # Part-of-speech analysis (simplified)
        self.stats.pos_distribution = self._analyze_pos_distribution(words)
        
        return self._compile_results()
    
    def _extract_sentences(self, text: str) -> List[str]:
        """Extract sentences from text using regex patterns."""
        # Simple sentence splitting - can be enhanced with nltk/spacy
        sentence_pattern = r'[.!?]+\s+'
        sentences = re.split(sentence_pattern, text)
        
        # Filter by length constraints
        filtered_sentences = []
        for sentence in sentences:
            word_count = len(sentence.split())
            if self.config.min_sentence_length <= word_count <= self.config.max_sentence_length:
                filtered_sentences.append(sentence.strip())
                
        return filtered_sentences
    
    def _extract_words(self, text: str) -> List[str]:
        """Extract words from text with basic preprocessing."""
        # Remove punctuation and convert to lowercase
        text_clean = text.translate(str.maketrans('', '', string.punctuation))
        words = text_clean.lower().split()
        
        # Filter out very short words and numbers
        filtered_words = []
        for word in words:
            if len(word) >= 2 and not word.isdigit():
                filtered_words.append(word)
                
        return filtered_words
    
    def _analyze_sentence_length_distribution(self, sentences: List[str]) -> Dict[str, float]:
        """Analyze the distribution of sentence lengths."""
        if not sentences:
            return {}
            
        lengths = [len(s.split()) for s in sentences]
        
        distribution = {
            'mean': sum(lengths) / len(lengths),
            'median': sorted(lengths)[len(lengths) // 2],
            'std': math.sqrt(sum((x - sum(lengths)/len(lengths))**2 for x in lengths) / len(lengths)),
            'min': min(lengths),
            'max': max(lengths),
            'short_sentences_pct': sum(1 for l in lengths if l <= 10) / len(lengths) * 100,
            'medium_sentences_pct': sum(1 for l in lengths if 10 < l <= 20) / len(lengths) * 100,
            'long_sentences_pct': sum(1 for l in lengths if l > 20) / len(lengths) * 100
        }
        
        return distribution
    
    def _calculate_complexity_score(self, text: str, words: List[str], sentences: List[str]) -> float:
        """Calculate a complexity score based on various linguistic features."""
        if not words or not sentences:
            return 0.0
            
        # Average word length component
        avg_word_length = sum(len(w) for w in words) / len(words)
        word_length_score = min(avg_word_length / 8.0, 1.0)  # Normalize to 0-1
        
        # Average sentence length component
        avg_sentence_length = sum(len(s.split()) for s in sentences) / len(sentences)
        sentence_length_score = min(avg_sentence_length / 25.0, 1.0)  # Normalize to 0-1
        
        # Vocabulary diversity component
        vocab_diversity = len(set(words)) / len(words)
        
        # Combine components
        complexity_score = (word_length_score * 0.3 + 
                          sentence_length_score * 0.4 + 
                          vocab_diversity * 0.3)
        
        return complexity_score
    
    def _calculate_readability_metrics(self, text: str, words: List[str], sentences: List[str]) -> Dict[str, float]:
        """Calculate basic readability metrics."""
        if not words or not sentences:
            return {}
            
        # Flesch Reading Ease approximation
        avg_sentence_length = len(words) / len(sentences)
        avg_syllables_per_word = sum(self._count_syllables(word) for word in words) / len(words)
        
        flesch_score = 206.835 - (1.015 * avg_sentence_length) - (84.6 * avg_syllables_per_word)
        flesch_score = max(0, min(100, flesch_score))  # Clamp to 0-100
        
        # Automated Readability Index approximation
        characters_per_word = sum(len(word) for word in words) / len(words)
        ari_score = (4.71 * characters_per_word) + (0.5 * avg_sentence_length) - 21.43
        
        return {
            'flesch_reading_ease': flesch_score,
            'automated_readability_index': ari_score,
            'avg_sentence_length': avg_sentence_length,
            'avg_syllables_per_word': avg_syllables_per_word
        }
    
    def _count_syllables(self, word: str) -> int:
        """Estimate syllable count for a word."""
        word = word.lower()
        if len(word) <= 3:
            return 1
            
        # Count vowel groups
        vowels = 'aeiouy'
        syllable_count = 0
        previous_was_vowel = False
        
        for char in word:
            is_vowel = char in vowels
            if is_vowel and not previous_was_vowel:
                syllable_count += 1
            previous_was_vowel = is_vowel
            
        # Handle silent e
        if word.endswith('e'):
            syllable_count -= 1
            
        return max(1, syllable_count)
    
    def _analyze_pos_distribution(self, words: List[str]) -> Dict[str, int]:
        """Simple part-of-speech analysis using word patterns."""
        pos_counts = defaultdict(int)
        
        for word in words:
            # Very basic POS tagging based on patterns
            if word.endswith(('ing', 'ed', 'er', 'est')):
                pos_counts['verb/adjective'] += 1
            elif word.endswith(('ly')):
                pos_counts['adverb'] += 1
            elif word.endswith(('tion', 'sion', 'ness', 'ment')):
                pos_counts['noun'] += 1
            elif word in ['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for']:
                pos_counts['function_word'] += 1
            else:
                pos_counts['content_word'] += 1
                
        return dict(pos_counts)
    
    def _compile_results(self) -> Dict[str, Any]:
        """Compile analysis results into a structured format."""
        return {
            'basic_statistics': {
                'total_characters': self.stats.total_characters,
                'total_words': self.stats.total_words,
                'total_sentences': self.stats.total_sentences,
                'total_lines': self.stats.total_lines,
                'avg_sentence_length': round(self.stats.avg_sentence_length, 2),
                'avg_word_length': round(self.stats.avg_word_length, 2),
                'vocabulary_size': self.stats.vocabulary_size,
                'lexical_diversity': round(self.stats.lexical_diversity, 4)
            },
            'sentence_analysis': self.stats.sentence_length_distribution,
            'word_frequency': self.stats.word_frequency,
            'pos_distribution': self.stats.pos_distribution,
            'complexity_metrics': {
                'complexity_score': round(self.stats.complexity_score, 4),
                'readability_metrics': self.stats.readability_metrics
            }
        }
