"""
Enhanced Text Processing Utilities

Advanced text processing utilities for AI classification engine including
text cleaning, normalization, and feature extraction.
"""

import re
import logging
from typing import List, Dict, Any, Optional, Set
import string
from collections import Counter


class EnhancedTextProcessor:
    """
    Enhanced text processor for academic paper content analysis.
    
    Features:
    - Academic text cleaning (LaTeX, references, etc.)
    - Keyword extraction and normalization
    - Text similarity computation
    - Academic-specific preprocessing
    - Multi-language support basics
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize text processor.
        
        Args:
            config: Optional configuration dictionary
        """
        self.config = config or {}
        self.logger = logging.getLogger(__name__)
        
        # Text processing parameters
        self.min_word_length = self.config.get('min_word_length', 2)
        self.max_word_length = self.config.get('max_word_length', 50)
        self.remove_stopwords = self.config.get('remove_stopwords', True)
        
        # Academic-specific patterns
        self._init_academic_patterns()
        
        # Stopwords (basic English set)
        self.stopwords = self._load_stopwords()
        
        self.logger.info("Enhanced Text Processor initialized")
    
    def _init_academic_patterns(self):
        """Initialize regex patterns for academic text processing."""
        
        # LaTeX patterns
        self.latex_patterns = {
            'commands': re.compile(r'\\[a-zA-Z]+\{[^}]*\}'),
            'simple_commands': re.compile(r'\\[a-zA-Z]+'),
            'math_inline': re.compile(r'\$[^$]+\$'),
            'math_display': re.compile(r'\$\$[^$]+\$\$'),
            'comments': re.compile(r'%.*$', re.MULTILINE),
            'citations': re.compile(r'\\cite\{[^}]+\}'),
            'references': re.compile(r'\\ref\{[^}]+\}'),
            'labels': re.compile(r'\\label\{[^}]+\}'),
            'sections': re.compile(r'\\(?:sub)*section\*?\{([^}]+)\}'),
            'environments': re.compile(r'\\begin\{[^}]+\}.*?\\end\{[^}]+\}', re.DOTALL)
        }
        
        # Academic abbreviations and terms
        self.academic_abbreviations = {
            'e.g.': 'for example',
            'i.e.': 'that is',
            'et al.': 'and others',
            'cf.': 'compare',
            'vs.': 'versus',
            'etc.': 'and so on'
        }
        
        # Common academic stop phrases
        self.academic_stop_phrases = {
            'in this paper', 'we show', 'we present', 'we propose',
            'it is shown', 'it can be seen', 'as shown in', 'figure shows',
            'table shows', 'results show', 'we observe', 'we find'
        }
        
        # Technical term patterns
        self.technical_patterns = {
            'equations': re.compile(r'equation\s*\(?(\d+)\)?', re.IGNORECASE),
            'figures': re.compile(r'figure\s*\(?(\d+)\)?', re.IGNORECASE),
            'tables': re.compile(r'table\s*\(?(\d+)\)?', re.IGNORECASE),
            'algorithms': re.compile(r'algorithm\s*\(?(\d+)\)?', re.IGNORECASE),
            'theorems': re.compile(r'theorem\s*\(?(\d+)\)?', re.IGNORECASE),
            'lemmas': re.compile(r'lemma\s*\(?(\d+)\)?', re.IGNORECASE)
        }
    
    def _load_stopwords(self) -> Set[str]:
        """Load stopwords for text processing."""
        basic_stopwords = {
            'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from',
            'has', 'he', 'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the',
            'to', 'was', 'were', 'will', 'with', 'the', 'this', 'but', 'they',
            'have', 'had', 'what', 'said', 'each', 'which', 'she', 'do', 'how',
            'their', 'if', 'up', 'out', 'many', 'then', 'them', 'these', 'so',
            'some', 'her', 'would', 'make', 'like', 'into', 'him', 'time',
            'two', 'more', 'go', 'no', 'way', 'could', 'my', 'than', 'first',
            'been', 'call', 'who', 'oil', 'sit', 'now', 'find', 'down', 'day',
            'did', 'get', 'come', 'made', 'may', 'part'
        }
        
        # Add academic-specific stopwords
        academic_stopwords = {
            'paper', 'study', 'research', 'method', 'approach', 'result',
            'conclusion', 'work', 'analysis', 'data', 'show', 'present',
            'propose', 'demonstrate', 'investigate', 'examine', 'consider',
            'discuss', 'describe', 'explain', 'report', 'provide', 'suggest',
            'indicate', 'observe', 'find', 'obtain', 'achieve', 'perform'
        }
        
        return basic_stopwords | academic_stopwords
    
    def clean_academic_text(self, text: str, 
                          remove_latex: bool = True,
                          remove_citations: bool = True,
                          remove_references: bool = True) -> str:
        """
        Clean academic text by removing LaTeX, citations, etc.
        
        Args:
            text: Raw text content
            remove_latex: Remove LaTeX commands and environments
            remove_citations: Remove citation markers
            remove_references: Remove reference markers
            
        Returns:
            Cleaned text
        """
        if not text:
            return ""
        
        cleaned = text
        
        if remove_latex:
            # Remove LaTeX environments first (more complex structures)
            cleaned = self.latex_patterns['environments'].sub(' ', cleaned)
            
            # Remove LaTeX commands
            cleaned = self.latex_patterns['commands'].sub(' ', cleaned)
            cleaned = self.latex_patterns['simple_commands'].sub(' ', cleaned)
            
            # Remove math expressions
            cleaned = self.latex_patterns['math_display'].sub(' [MATH] ', cleaned)
            cleaned = self.latex_patterns['math_inline'].sub(' ', cleaned)
            
            # Remove comments
            cleaned = self.latex_patterns['comments'].sub('', cleaned)
        
        if remove_citations:
            cleaned = self.latex_patterns['citations'].sub('', cleaned)
        
        if remove_references:
            cleaned = self.latex_patterns['references'].sub('', cleaned)
            cleaned = self.latex_patterns['labels'].sub('', cleaned)
        
        # Remove extra whitespace
        cleaned = re.sub(r'\s+', ' ', cleaned)
        cleaned = cleaned.strip()
        
        return cleaned
    
    def extract_keywords(self, text: str, 
                        max_keywords: int = 20,
                        min_frequency: int = 2) -> List[Tuple[str, int]]:
        """
        Extract keywords from text using frequency analysis.
        
        Args:
            text: Input text
            max_keywords: Maximum number of keywords to return
            min_frequency: Minimum frequency threshold
            
        Returns:
            List of (keyword, frequency) tuples
        """
        if not text:
            return []
        
        # Clean and tokenize text
        cleaned = self.clean_academic_text(text)
        words = self._tokenize(cleaned)
        
        # Filter words
        filtered_words = []
        for word in words:
            word = word.lower().strip()
            
            # Length filtering
            if len(word) < self.min_word_length or len(word) > self.max_word_length:
                continue
            
            # Remove stopwords
            if self.remove_stopwords and word in self.stopwords:
                continue
            
            # Remove pure numbers
            if word.isdigit():
                continue
            
            # Remove words with special characters (except hyphens)
            if re.search(r'[^\w\-]', word):
                continue
            
            filtered_words.append(word)
        
        # Count frequencies
        word_counts = Counter(filtered_words)
        
        # Filter by minimum frequency
        frequent_words = [(word, count) for word, count in word_counts.items() 
                         if count >= min_frequency]
        
        # Sort by frequency descending
        frequent_words.sort(key=lambda x: x[1], reverse=True)
        
        return frequent_words[:max_keywords]
    
    def extract_noun_phrases(self, text: str, max_phrases: int = 15) -> List[str]:
        """
        Extract noun phrases from text using simple patterns.
        
        Args:
            text: Input text
            max_phrases: Maximum number of phrases to return
            
        Returns:
            List of noun phrases
        """
        if not text:
            return []
        
        # Clean text
        cleaned = self.clean_academic_text(text)
        
        # Simple noun phrase patterns (adjective* noun+)
        noun_phrase_pattern = re.compile(
            r'\b(?:[A-Z][a-z]*\s+)*[A-Z][a-z]*(?:\s+[a-z]+)*\b'
        )
        
        phrases = noun_phrase_pattern.findall(cleaned)
        
        # Filter and clean phrases
        filtered_phrases = []
        for phrase in phrases:
            phrase = phrase.strip()
            
            # Length filtering
            if len(phrase.split()) < 2 or len(phrase.split()) > 5:
                continue
            
            # Remove common stop phrases
            phrase_lower = phrase.lower()
            if any(stop_phrase in phrase_lower for stop_phrase in self.academic_stop_phrases):
                continue
            
            filtered_phrases.append(phrase)
        
        # Remove duplicates and return top phrases
        unique_phrases = list(dict.fromkeys(filtered_phrases))
        return unique_phrases[:max_phrases]
    
    def compute_text_similarity(self, text1: str, text2: str) -> float:
        """
        Compute similarity between two texts using word overlap.
        
        Args:
            text1: First text
            text2: Second text
            
        Returns:
            Similarity score (0-1)
        """
        if not text1 or not text2:
            return 0.0
        
        # Extract keywords from both texts
        keywords1 = set(word for word, _ in self.extract_keywords(text1, max_keywords=50))
        keywords2 = set(word for word, _ in self.extract_keywords(text2, max_keywords=50))
        
        if not keywords1 or not keywords2:
            return 0.0
        
        # Compute Jaccard similarity
        intersection = keywords1 & keywords2
        union = keywords1 | keywords2
        
        return len(intersection) / len(union) if union else 0.0
    
    def normalize_text(self, text: str) -> str:
        """
        Normalize text for consistent processing.
        
        Args:
            text: Input text
            
        Returns:
            Normalized text
        """
        if not text:
            return ""
        
        # Basic cleaning
        normalized = text.strip()
        
        # Expand academic abbreviations
        for abbrev, expansion in self.academic_abbreviations.items():
            normalized = normalized.replace(abbrev, expansion)
        
        # Normalize whitespace
        normalized = re.sub(r'\s+', ' ', normalized)
        
        # Remove excessive punctuation
        normalized = re.sub(r'[.]{2,}', '.', normalized)
        normalized = re.sub(r'[!]{2,}', '!', normalized)
        normalized = re.sub(r'[?]{2,}', '?', normalized)
        
        return normalized.strip()
    
    def extract_technical_terms(self, text: str) -> Dict[str, List[str]]:
        """
        Extract technical terms and references from academic text.
        
        Args:
            text: Input text
            
        Returns:
            Dictionary mapping term types to lists of terms
        """
        terms = {
            'equations': [],
            'figures': [],
            'tables': [],
            'algorithms': [],
            'theorems': [],
            'lemmas': []
        }
        
        for term_type, pattern in self.technical_patterns.items():
            matches = pattern.findall(text)
            terms[term_type] = list(set(matches))  # Remove duplicates
        
        return terms
    
    def _tokenize(self, text: str) -> List[str]:
        """
        Simple tokenization for text processing.
        
        Args:
            text: Input text
            
        Returns:
            List of tokens
        """
        if not text:
            return []
        
        # Split on whitespace and punctuation
        tokens = re.split(r'[\s\-_]+', text)
        
        # Remove empty tokens
        tokens = [token for token in tokens if token.strip()]
        
        return tokens
    
    def clean_title(self, title: str) -> str:
        """
        Clean and normalize paper title.
        
        Args:
            title: Raw title
            
        Returns:
            Cleaned title
        """
        if not title:
            return ""
        
        # Remove LaTeX commands
        cleaned = self.clean_academic_text(title, remove_citations=False)
        
        # Normalize case (keep original capitalization for now)
        cleaned = self.normalize_text(cleaned)
        
        # Remove trailing punctuation except for ? and !
        cleaned = re.sub(r'[.,;:]+$', '', cleaned)
        
        return cleaned.strip()
    
    def clean_abstract(self, abstract: str) -> str:
        """
        Clean and normalize paper abstract.
        
        Args:
            abstract: Raw abstract
            
        Returns:
            Cleaned abstract
        """
        if not abstract:
            return ""
        
        # Clean LaTeX and academic formatting
        cleaned = self.clean_academic_text(abstract)
        
        # Normalize text
        cleaned = self.normalize_text(cleaned)
        
        # Remove common abstract prefixes
        prefixes_to_remove = [
            'abstract:', 'abstract.', 'abstract -', 'summary:',
            'summary.', 'summary -'
        ]
        
        cleaned_lower = cleaned.lower()
        for prefix in prefixes_to_remove:
            if cleaned_lower.startswith(prefix):
                cleaned = cleaned[len(prefix):].strip()
                break
        
        return cleaned
    
    def clean_author_name(self, author: str) -> str:
        """
        Clean and normalize author name.
        
        Args:
            author: Raw author name
            
        Returns:
            Cleaned author name
        """
        if not author:
            return ""
        
        # Remove extra whitespace
        cleaned = re.sub(r'\s+', ' ', author.strip())
        
        # Remove LaTeX formatting
        cleaned = self.clean_academic_text(cleaned)
        
        # Remove email addresses
        cleaned = re.sub(r'\S+@\S+\.\S+', '', cleaned)
        
        # Remove affiliations in parentheses
        cleaned = re.sub(r'\([^)]*\)', '', cleaned)
        
        # Remove common prefixes/suffixes
        prefixes_suffixes = ['Dr.', 'Prof.', 'Mr.', 'Ms.', 'Mrs.', 'PhD', 'Ph.D.']
        for prefix in prefixes_suffixes:
            cleaned = cleaned.replace(prefix, '')
        
        # Clean up extra spaces
        cleaned = re.sub(r'\s+', ' ', cleaned).strip()
        
        return cleaned
    
    def extract_section_content(self, text: str, section_title: str) -> Optional[str]:
        """
        Extract content from a specific section in academic text.
        
        Args:
            text: Full text content
            section_title: Title of section to extract
            
        Returns:
            Section content or None if not found
        """
        if not text or not section_title:
            return None
        
        # Create pattern to match section
        section_patterns = [
            rf'\\section\*?\{{{re.escape(section_title)}\}}(.*?)(?=\\section|\Z)',
            rf'#{section_title}(.*?)(?=^#|\Z)',
            rf'^{re.escape(section_title)}\.?\s*$(.*?)(?=^\w+\.?\s*$|\Z)'
        ]
        
        for pattern in section_patterns:
            match = re.search(pattern, text, re.MULTILINE | re.DOTALL | re.IGNORECASE)
            if match:
                content = match.group(1).strip()
                return self.clean_academic_text(content) if content else None
        
        return None
    
    def get_text_statistics(self, text: str) -> Dict[str, Any]:
        """
        Get statistics about text content.
        
        Args:
            text: Input text
            
        Returns:
            Dictionary of text statistics
        """
        if not text:
            return {'error': 'Empty text'}
        
        cleaned = self.clean_academic_text(text)
        words = self._tokenize(cleaned)
        
        # Basic statistics
        stats = {
            'character_count': len(text),
            'word_count': len(words),
            'sentence_count': len(re.split(r'[.!?]+', cleaned)),
            'paragraph_count': len([p for p in text.split('\n\n') if p.strip()]),
            'average_word_length': sum(len(word) for word in words) / len(words) if words else 0
        }
        
        # Vocabulary statistics
        unique_words = set(word.lower() for word in words)
        stats['unique_words'] = len(unique_words)
        stats['vocabulary_richness'] = len(unique_words) / len(words) if words else 0
        
        # Technical content indicators
        technical_terms = self.extract_technical_terms(text)
        stats['technical_references'] = sum(len(refs) for refs in technical_terms.values())
        
        # Keywords
        keywords = self.extract_keywords(text, max_keywords=10)
        stats['top_keywords'] = [word for word, _ in keywords[:5]]
        
        return stats