"""
Text Processing Utilities for ArXiv Scraper Service

Text cleaning, normalization, and preprocessing utilities for paper metadata.
"""

import re
import html
import unicodedata
from typing import List, Optional, Dict, Any


class TextProcessor:
    """
    Text processing utilities for paper metadata normalization.
    
    Features:
    - HTML entity decoding
    - Unicode normalization
    - Special character handling
    - Title and abstract cleaning
    - Author name normalization
    """
    
    def __init__(self):
        """Initialize text processor."""
        # Compile regex patterns
        self._init_patterns()
    
    def _init_patterns(self):
        """Initialize regex patterns."""
        # HTML tags
        self.html_tag_pattern = re.compile(r'<[^>]+>')
        
        # Multiple whitespace
        self.whitespace_pattern = re.compile(r'\s+')
        
        # LaTeX commands (basic)
        self.latex_pattern = re.compile(r'\\[a-zA-Z]+\{[^}]*\}|\\[a-zA-Z]+')
        
        # Special characters that should be cleaned
        self.special_chars_pattern = re.compile(r'[\x00-\x1f\x7f-\x9f]')
        
        # Common title patterns to clean
        self.title_prefix_pattern = re.compile(r'^(Title:\s*|TITLE:\s*)', re.IGNORECASE)
        self.arxiv_prefix_pattern = re.compile(r'arXiv:\d+\.\d+v?\d*\s*', re.IGNORECASE)
        
        # Author name patterns
        self.author_suffix_pattern = re.compile(r'\s*\([^)]*\)\s*$')  # Remove affiliations in parentheses
        self.author_email_pattern = re.compile(r'\s*<[^>]*>\s*')  # Remove email addresses
    
    def clean_text(self, text: Optional[str]) -> str:
        """
        Basic text cleaning and normalization.
        
        Args:
            text: Input text
            
        Returns:
            Cleaned text
        """
        if not text:
            return ""
        
        # Decode HTML entities
        text = html.unescape(text)
        
        # Remove HTML tags
        text = self.html_tag_pattern.sub(' ', text)
        
        # Remove control characters
        text = self.special_chars_pattern.sub(' ', text)
        
        # Normalize unicode
        text = unicodedata.normalize('NFKC', text)
        
        # Remove basic LaTeX commands
        text = self.latex_pattern.sub(' ', text)
        
        # Normalize whitespace
        text = self.whitespace_pattern.sub(' ', text)
        
        # Strip and return
        return text.strip()
    
    def clean_title(self, title: Optional[str]) -> str:
        """
        Clean and normalize paper title.
        
        Args:
            title: Paper title
            
        Returns:
            Cleaned title
        """
        if not title:
            return ""
        
        # Basic cleaning
        title = self.clean_text(title)
        
        # Remove common prefixes
        title = self.title_prefix_pattern.sub('', title)
        title = self.arxiv_prefix_pattern.sub('', title)
        
        # Remove extra punctuation at the end
        title = title.rstrip('.,;:')
        
        # Capitalize first letter if needed
        if title and title[0].islower():
            title = title[0].upper() + title[1:]
        
        return title.strip()
    
    def clean_abstract(self, abstract: Optional[str]) -> str:
        """
        Clean and normalize paper abstract.
        
        Args:
            abstract: Paper abstract
            
        Returns:
            Cleaned abstract
        """
        if not abstract:
            return ""
        
        # Basic cleaning
        abstract = self.clean_text(abstract)
        
        # Remove common prefixes
        abstract = re.sub(r'^(Abstract:?\s*|ABSTRACT:?\s*)', '', abstract, flags=re.IGNORECASE)
        
        # Handle paragraph breaks
        abstract = re.sub(r'\n\s*\n', ' \n\n ', abstract)  # Preserve paragraph breaks
        abstract = re.sub(r'\n(?!\n)', ' ', abstract)  # Convert single newlines to spaces
        
        # Clean up multiple spaces again after paragraph processing
        abstract = self.whitespace_pattern.sub(' ', abstract)
        
        return abstract.strip()
    
    def clean_author_name(self, name: Optional[str]) -> str:
        """
        Clean and normalize author name.
        
        Args:
            name: Author name
            
        Returns:
            Cleaned author name
        """
        if not name:
            return ""
        
        # Basic cleaning
        name = self.clean_text(name)
        
        # Remove email addresses
        name = self.author_email_pattern.sub(' ', name)
        
        # Remove affiliations in parentheses
        name = self.author_suffix_pattern.sub('', name)
        
        # Remove common prefixes/suffixes
        name = re.sub(r'\b(Dr\.?|Prof\.?|Mr\.?|Ms\.?|Mrs\.?)\s+', '', name, flags=re.IGNORECASE)
        
        # Normalize name formatting
        name_parts = name.split()
        if len(name_parts) >= 2:
            # Basic name normalization (First Last or First Middle Last)
            normalized_parts = []
            for part in name_parts:
                if part:
                    # Capitalize first letter of each part
                    part = part.strip('.,')
                    if part:
                        normalized_parts.append(part.capitalize())
            
            name = ' '.join(normalized_parts)
        
        return name.strip()
    
    def extract_keywords(self, text: str, min_length: int = 3) -> List[str]:
        """
        Extract keywords from text.
        
        Args:
            text: Input text
            min_length: Minimum keyword length
            
        Returns:
            List of keywords
        """
        if not text:
            return []
        
        # Clean text
        text = self.clean_text(text.lower())
        
        # Simple keyword extraction (split by common delimiters)
        keywords = []
        
        # Split by punctuation and whitespace
        words = re.split(r'[,;.\s\-_]+', text)
        
        for word in words:
            word = word.strip()
            if len(word) >= min_length and word.isalpha():
                keywords.append(word)
        
        # Remove duplicates while preserving order
        seen = set()
        unique_keywords = []
        for keyword in keywords:
            if keyword not in seen:
                seen.add(keyword)
                unique_keywords.append(keyword)
        
        return unique_keywords[:50]  # Limit to top 50 keywords
    
    def normalize_subject_category(self, category: str) -> str:
        """
        Normalize ArXiv subject category.
        
        Args:
            category: Subject category
            
        Returns:
            Normalized category
        """
        if not category:
            return ""
        
        category = category.strip().lower()
        
        # Remove common prefixes
        category = re.sub(r'^(subject:?\s*|category:?\s*)', '', category, flags=re.IGNORECASE)
        
        # Handle old format categories
        if '/' in category and '.' not in category:
            # Old format: subject-class/YYMMnnn -> subject-class
            category = category.split('/')[0]
        
        return category
    
    def extract_urls(self, text: str) -> List[str]:
        """
        Extract URLs from text.
        
        Args:
            text: Input text
            
        Returns:
            List of URLs
        """
        if not text:
            return []
        
        url_pattern = re.compile(
            r'https?://(?:[-\w.])+(?:[:\d]+)?(?:/(?:[\w/_.])*(?:\?(?:[\w&=%.])*)?(?:#(?:[\w.])*)?)?',
            re.IGNORECASE
        )
        
        urls = url_pattern.findall(text)
        return list(set(urls))  # Remove duplicates
    
    def extract_doi(self, text: str) -> Optional[str]:
        """
        Extract DOI from text.
        
        Args:
            text: Input text
            
        Returns:
            DOI if found, None otherwise
        """
        if not text:
            return None
        
        doi_pattern = re.compile(r'10\.\d{4,}/[^\s,;]+', re.IGNORECASE)
        match = doi_pattern.search(text)
        
        return match.group(0) if match else None
    
    def is_valid_email(self, email: str) -> bool:
        """
        Check if string is a valid email address.
        
        Args:
            email: Email string
            
        Returns:
            True if valid email
        """
        if not email:
            return False
        
        email_pattern = re.compile(
            r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
        )
        
        return bool(email_pattern.match(email.strip()))
    
    def truncate_text(self, text: str, max_length: int, suffix: str = "...") -> str:
        """
        Truncate text to maximum length.
        
        Args:
            text: Input text
            max_length: Maximum length
            suffix: Suffix to add when truncating
            
        Returns:
            Truncated text
        """
        if not text:
            return ""
        
        if len(text) <= max_length:
            return text
        
        # Try to truncate at word boundary
        truncate_at = max_length - len(suffix)
        
        if truncate_at <= 0:
            return suffix[:max_length]
        
        truncated = text[:truncate_at]
        
        # Find last space to avoid cutting words
        last_space = truncated.rfind(' ')
        if last_space > 0 and last_space > truncate_at * 0.8:  # Only if we don't lose too much
            truncated = truncated[:last_space]
        
        return truncated + suffix
    
    def validate_paper_data(self, paper_data: Dict[str, Any]) -> Dict[str, List[str]]:
        """
        Validate paper data and return validation errors.
        
        Args:
            paper_data: Paper data dictionary
            
        Returns:
            Dictionary with validation errors by field
        """
        errors = {}
        
        # Validate title
        title = paper_data.get('title', '').strip()
        if not title:
            errors['title'] = ['Title is required']
        elif len(title) < 10:
            errors['title'] = ['Title is too short (minimum 10 characters)']
        elif len(title) > 500:
            errors['title'] = ['Title is too long (maximum 500 characters)']
        
        # Validate authors
        authors = paper_data.get('authors', [])
        if not authors:
            errors['authors'] = ['At least one author is required']
        else:
            author_errors = []
            for i, author in enumerate(authors):
                if not author or not author.strip():
                    author_errors.append(f'Author {i+1} is empty')
                elif len(author.strip()) < 2:
                    author_errors.append(f'Author {i+1} name is too short')
            
            if author_errors:
                errors['authors'] = author_errors
        
        # Validate abstract
        abstract = paper_data.get('abstract', '').strip()
        if abstract and len(abstract) < 50:
            errors['abstract'] = ['Abstract is too short (minimum 50 characters if provided)']
        elif abstract and len(abstract) > 10000:
            errors['abstract'] = ['Abstract is too long (maximum 10,000 characters)']
        
        # Validate ArXiv ID if present
        arxiv_id = paper_data.get('arxiv_id', '').strip()
        if arxiv_id:
            arxiv_pattern = re.compile(r'^\d{4}\.\d{4,5}(v\d+)?$|^[a-z-]+(\.[A-Z]{2})?/\d{7}(v\d+)?$')
            if not arxiv_pattern.match(arxiv_id):
                errors['arxiv_id'] = ['Invalid ArXiv ID format']
        
        # Validate DOI if present
        doi = paper_data.get('doi', '').strip()
        if doi:
            doi_pattern = re.compile(r'^10\.\d{4,}/[^\s,;]+$')
            if not doi_pattern.match(doi):
                errors['doi'] = ['Invalid DOI format']
        
        return errors