"""
Paper Processor for ArXiv Scraper Service

Processes and normalizes paper metadata from OAI-PMH records
for storage and classification preparation.
"""

import re
import logging
from datetime import datetime, timezone
from typing import Dict, List, Any, Optional
from urllib.parse import urlparse
import xml.etree.ElementTree as ET

from utils.text_processing import TextProcessor


class PaperProcessor:
    """
    Paper metadata processor and normalizer.
    
    Features:
    - OAI-PMH record parsing
    - Metadata normalization
    - Subject classification
    - URL generation
    - Text cleaning and processing
    """
    
    def __init__(self, config: Dict[str, Any]):
        """Initialize paper processor.
        
        Args:
            config: Processing configuration
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # Text processor for cleaning
        self.text_processor = TextProcessor()
        
        # Subject mappings
        self.subject_mappings = self._load_subject_mappings()
        
        # Regex patterns
        self._init_regex_patterns()
        
        self.logger.info("Paper processor initialized")
    
    def _init_regex_patterns(self):
        """Initialize regex patterns for parsing."""
        # ArXiv ID patterns
        self.arxiv_id_pattern = re.compile(r'(?:arXiv:)?(\d{4}\.\d{4,5})(v\d+)?')
        self.arxiv_old_id_pattern = re.compile(r'(?:arXiv:)?([a-z-]+(?:\.[A-Z]{2})?/\d{7})(v\d+)?')
        
        # DOI pattern
        self.doi_pattern = re.compile(r'10\.\d{4,}(?:\.\d+)?/[^\s]+')
        
        # Version pattern
        self.version_pattern = re.compile(r'v(\d+)$')
        
        # Subject category pattern
        self.subject_pattern = re.compile(r'^([a-z-]+(?:\.[A-Z]{2})?)$')
    
    def _load_subject_mappings(self) -> Dict[str, str]:
        """Load ArXiv subject category mappings."""
        # Standard ArXiv subject categories
        return {
            # Mathematics
            'math.AG': 'Algebraic Geometry',
            'math.AT': 'Algebraic Topology',
            'math.AP': 'Analysis of PDEs',
            'math.CT': 'Category Theory',
            'math.CA': 'Classical Analysis and ODEs',
            'math.CO': 'Combinatorics',
            'math.AC': 'Commutative Algebra',
            'math.CV': 'Complex Variables',
            'math.DG': 'Differential Geometry',
            'math.DS': 'Dynamical Systems',
            'math.FA': 'Functional Analysis',
            'math.GM': 'General Mathematics',
            'math.GN': 'General Topology',
            'math.GT': 'Geometric Topology',
            'math.GR': 'Group Theory',
            'math.HO': 'History and Overview',
            'math.IT': 'Information Theory',
            'math.KT': 'K-Theory and Homology',
            'math.LO': 'Logic',
            'math.MP': 'Mathematical Physics',
            'math.MG': 'Metric Geometry',
            'math.NT': 'Number Theory',
            'math.NA': 'Numerical Analysis',
            'math.OA': 'Operator Algebras',
            'math.OC': 'Optimization and Control',
            'math.PR': 'Probability',
            'math.QA': 'Quantum Algebra',
            'math.RT': 'Representation Theory',
            'math.RA': 'Rings and Algebras',
            'math.SP': 'Spectral Theory',
            'math.ST': 'Statistics Theory',
            'math.SG': 'Symplectic Geometry',
            
            # Physics
            'physics.acc-ph': 'Accelerator Physics',
            'physics.ao-ph': 'Atmospheric and Oceanic Physics',
            'physics.atom-ph': 'Atomic Physics',
            'physics.atm-clus': 'Atomic and Molecular Clusters',
            'physics.bio-ph': 'Biological Physics',
            'physics.chem-ph': 'Chemical Physics',
            'physics.class-ph': 'Classical Physics',
            'physics.comp-ph': 'Computational Physics',
            'physics.data-an': 'Data Analysis',
            'physics.flu-dyn': 'Fluid Dynamics',
            'physics.gen-ph': 'General Physics',
            'physics.geo-ph': 'Geophysics',
            'physics.hist-ph': 'History and Philosophy of Physics',
            'physics.ins-det': 'Instrumentation and Detectors',
            'physics.med-ph': 'Medical Physics',
            'physics.optics': 'Optics',
            'physics.ed-ph': 'Physics Education',
            'physics.soc-ph': 'Physics and Society',
            'physics.plasm-ph': 'Plasma Physics',
            'physics.pop-ph': 'Popular Physics',
            'physics.space-ph': 'Space Physics',
            
            # Computer Science  
            'cs.AI': 'Artificial Intelligence',
            'cs.CL': 'Computation and Language',
            'cs.CC': 'Computational Complexity',
            'cs.CE': 'Computational Engineering',
            'cs.CG': 'Computational Geometry',
            'cs.GT': 'Computer Science and Game Theory',
            'cs.CV': 'Computer Vision and Pattern Recognition',
            'cs.CY': 'Computers and Society',
            'cs.CR': 'Cryptography and Security',
            'cs.DS': 'Data Structures and Algorithms',
            'cs.DB': 'Databases',
            'cs.DL': 'Digital Libraries',
            'cs.DM': 'Discrete Mathematics',
            'cs.DC': 'Distributed Computing',
            'cs.ET': 'Emerging Technologies',
            'cs.FL': 'Formal Languages and Automata Theory',
            'cs.GL': 'General Literature',
            'cs.GR': 'Graphics',
            'cs.AR': 'Hardware Architecture',
            'cs.HC': 'Human-Computer Interaction',
            'cs.IR': 'Information Retrieval',
            'cs.IT': 'Information Theory',
            'cs.LG': 'Machine Learning',
            'cs.LO': 'Logic in Computer Science',
            'cs.MS': 'Mathematical Software',
            'cs.MA': 'Multiagent Systems',
            'cs.MM': 'Multimedia',
            'cs.NI': 'Networking and Internet Architecture',
            'cs.NE': 'Neural and Evolutionary Computing',
            'cs.NA': 'Numerical Analysis',
            'cs.OS': 'Operating Systems',
            'cs.OH': 'Other Computer Science',
            'cs.PF': 'Performance',
            'cs.PL': 'Programming Languages',
            'cs.RO': 'Robotics',
            'cs.SI': 'Social and Information Networks',
            'cs.SE': 'Software Engineering',
            'cs.SD': 'Sound',
            'cs.SC': 'Symbolic Computation',
            'cs.SY': 'Systems and Control'
        }
    
    def process_record(self, record) -> Optional[Dict[str, Any]]:
        """
        Process OAI-PMH record into normalized paper data.
        
        Args:
            record: OAI-PMH record object
            
        Returns:
            Normalized paper data dictionary or None if processing fails
        """
        try:
            # Extract basic metadata
            paper_data = self._extract_basic_metadata(record)
            
            if not paper_data:
                return None
            
            # Extract ArXiv-specific metadata
            arxiv_metadata = self._extract_arxiv_metadata(record)
            paper_data.update(arxiv_metadata)
            
            # Process and normalize fields
            paper_data = self._normalize_paper_data(paper_data)
            
            # Generate URLs
            paper_data.update(self._generate_urls(paper_data))
            
            # Extract subject categories
            paper_data.update(self._process_subject_categories(paper_data))
            
            # Validate required fields
            if not self._validate_paper_data(paper_data):
                return None
            
            return paper_data
            
        except Exception as e:
            self.logger.error(f"Error processing record: {e}")
            return None
    
    def _extract_basic_metadata(self, record) -> Optional[Dict[str, Any]]:
        """Extract basic Dublin Core metadata."""
        try:
            metadata = record.metadata
            
            # Extract from Dublin Core elements
            paper_data = {
                'oai_identifier': getattr(record, 'identifier', ''),
                'title': self._get_dc_element(metadata, 'title'),
                'authors': self._get_dc_elements(metadata, 'creator'),
                'abstract': self._get_dc_element(metadata, 'description'),
                'date_submitted': self._parse_date(self._get_dc_element(metadata, 'date')),
                'subjects': self._get_dc_elements(metadata, 'subject')
            }
            
            # Extract identifier (should contain ArXiv ID)
            identifiers = self._get_dc_elements(metadata, 'identifier')
            paper_data['identifiers'] = identifiers
            
            return paper_data
            
        except Exception as e:
            self.logger.error(f"Error extracting basic metadata: {e}")
            return None
    
    def _extract_arxiv_metadata(self, record) -> Dict[str, Any]:
        """Extract ArXiv-specific metadata from record."""
        arxiv_data = {}
        
        try:
            # Try to extract ArXiv ID from various sources
            arxiv_id = self._extract_arxiv_id(record)
            if arxiv_id:
                arxiv_data['arxiv_id'] = arxiv_id
                
                # Extract version from ArXiv ID
                version_match = self.version_pattern.search(arxiv_id)
                arxiv_data['version'] = int(version_match.group(1)) if version_match else 1
            
            # Extract DOI if present
            doi = self._extract_doi(record)
            if doi:
                arxiv_data['doi'] = doi
            
            # Extract journal reference
            journal_ref = self._extract_journal_reference(record)
            if journal_ref:
                arxiv_data['journal_ref'] = journal_ref
            
            # Extract comments
            comments = self._extract_comments(record)
            if comments:
                arxiv_data['comments'] = comments
            
        except Exception as e:
            self.logger.error(f"Error extracting ArXiv metadata: {e}")
        
        return arxiv_data
    
    def _extract_arxiv_id(self, record) -> Optional[str]:
        """Extract ArXiv ID from record."""
        # Try different sources for ArXiv ID
        sources = []
        
        # From OAI identifier
        if hasattr(record, 'identifier'):
            sources.append(record.identifier)
        
        # From identifiers in metadata
        if hasattr(record, 'metadata'):
            identifiers = self._get_dc_elements(record.metadata, 'identifier')
            sources.extend(identifiers)
        
        # From subjects (sometimes contains ID)
        if hasattr(record, 'metadata'):
            subjects = self._get_dc_elements(record.metadata, 'subject')
            sources.extend(subjects)
        
        # Try to match ArXiv ID patterns
        for source in sources:
            if not source:
                continue
            
            # Try new format (YYYY.NNNN)
            match = self.arxiv_id_pattern.search(source)
            if match:
                return match.group(1)
            
            # Try old format (subject-class/YYMMnnn)
            match = self.arxiv_old_id_pattern.search(source)
            if match:
                return match.group(1)
        
        return None
    
    def _extract_doi(self, record) -> Optional[str]:
        """Extract DOI from record."""
        sources = []
        
        if hasattr(record, 'metadata'):
            identifiers = self._get_dc_elements(record.metadata, 'identifier')
            sources.extend(identifiers)
        
        for source in sources:
            if source:
                match = self.doi_pattern.search(source)
                if match:
                    return match.group(0)
        
        return None
    
    def _extract_journal_reference(self, record) -> Optional[str]:
        """Extract journal reference from record."""
        # Journal reference is usually in relation or source field
        if hasattr(record, 'metadata'):
            relations = self._get_dc_elements(record.metadata, 'relation')
            for relation in relations:
                if relation and 'journal' in relation.lower():
                    return relation
            
            sources = self._get_dc_elements(record.metadata, 'source')
            for source in sources:
                if source and len(source) > 10:  # Likely a journal reference
                    return source
        
        return None
    
    def _extract_comments(self, record) -> Optional[str]:
        """Extract comments from record."""
        # Comments might be in description or other fields
        if hasattr(record, 'metadata'):
            descriptions = self._get_dc_elements(record.metadata, 'description')
            # Look for additional descriptions beyond the main abstract
            if len(descriptions) > 1:
                return descriptions[1]  # Second description often contains comments
        
        return None
    
    def _get_dc_element(self, metadata, element_name: str) -> Optional[str]:
        """Get single Dublin Core element."""
        elements = self._get_dc_elements(metadata, element_name)
        return elements[0] if elements else None
    
    def _get_dc_elements(self, metadata, element_name: str) -> List[str]:
        """Get list of Dublin Core elements."""
        try:
            # Handle different metadata formats
            if hasattr(metadata, element_name):
                values = getattr(metadata, element_name)
                if isinstance(values, list):
                    return [str(v).strip() for v in values if v]
                elif values:
                    return [str(values).strip()]
            
            # Try alternative access methods
            if hasattr(metadata, 'getMap'):
                values = metadata.getMap().get(element_name, [])
                if isinstance(values, list):
                    return [str(v).strip() for v in values if v]
                elif values:
                    return [str(values).strip()]
            
            return []
            
        except Exception as e:
            self.logger.debug(f"Error getting DC element {element_name}: {e}")
            return []
    
    def _parse_date(self, date_str: Optional[str]) -> Optional[datetime]:
        """Parse date string to datetime object."""
        if not date_str:
            return None
        
        # Try different date formats
        date_formats = [
            '%Y-%m-%d',
            '%Y-%m-%dT%H:%M:%SZ',
            '%Y-%m-%d %H:%M:%S',
            '%a, %d %b %Y %H:%M:%S %Z',
            '%Y/%m/%d',
            '%d %b %Y'
        ]
        
        for fmt in date_formats:
            try:
                dt = datetime.strptime(date_str.strip(), fmt)
                # Ensure timezone aware
                if dt.tzinfo is None:
                    dt = dt.replace(tzinfo=timezone.utc)
                return dt
            except ValueError:
                continue
        
        self.logger.warning(f"Could not parse date: {date_str}")
        return None
    
    def _normalize_paper_data(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """Normalize and clean paper data."""
        # Clean title
        if paper_data.get('title'):
            paper_data['title'] = self.text_processor.clean_title(paper_data['title'])
        
        # Clean and normalize authors
        if paper_data.get('authors'):
            paper_data['authors'] = self._normalize_authors(paper_data['authors'])
        
        # Clean abstract
        if paper_data.get('abstract'):
            paper_data['abstract'] = self.text_processor.clean_abstract(paper_data['abstract'])
        
        # Set creation and update dates
        paper_data['date_created'] = paper_data.get('date_submitted')
        paper_data['date_updated'] = None  # Will be set if this is an update
        
        return paper_data
    
    def _normalize_authors(self, authors: List[str]) -> List[str]:
        """Normalize author names."""
        normalized = []
        
        for author in authors:
            if not author:
                continue
            
            # Clean author name
            author = self.text_processor.clean_author_name(author)
            
            if author:
                normalized.append(author)
        
        return normalized
    
    def _generate_urls(self, paper_data: Dict[str, Any]) -> Dict[str, str]:
        """Generate PDF and source URLs."""
        urls = {}
        
        arxiv_id = paper_data.get('arxiv_id')
        if arxiv_id:
            # Remove version suffix for URL generation
            clean_id = re.sub(r'v\d+$', '', arxiv_id)
            
            urls['pdf_url'] = f"https://arxiv.org/pdf/{clean_id}.pdf"
            urls['source_url'] = f"https://arxiv.org/abs/{clean_id}"
        
        return urls
    
    def _process_subject_categories(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """Process and classify subject categories."""
        subject_data = {}
        
        subjects = paper_data.get('subjects', [])
        if not subjects:
            return subject_data
        
        # Extract valid subject categories
        valid_categories = []
        primary_subject = None
        
        for subject in subjects:
            if not subject:
                continue
            
            # Check if it's a valid ArXiv subject category
            if self.subject_pattern.match(subject):
                valid_categories.append(subject)
                
                # First valid category becomes primary
                if primary_subject is None:
                    primary_subject = subject
        
        if valid_categories:
            subject_data['subject_categories'] = valid_categories
            subject_data['primary_subject'] = primary_subject
        
        return subject_data
    
    def _validate_paper_data(self, paper_data: Dict[str, Any]) -> bool:
        """Validate that paper data has required fields."""
        required_fields = ['title', 'authors', 'date_submitted']
        
        for field in required_fields:
            if not paper_data.get(field):
                self.logger.warning(f"Paper missing required field: {field}")
                return False
        
        # Check for valid ArXiv ID or at least some identifier
        if not paper_data.get('arxiv_id') and not paper_data.get('oai_identifier'):
            self.logger.warning("Paper missing both ArXiv ID and OAI identifier")
            return False
        
        return True