"""
ArXiv Scraper Service - Main Service Implementation

Implements the core scraping functionality using OAI-PMH protocol
with the sickle library for efficient paper harvesting.
"""

import time
import logging
from datetime import datetime, timedelta, timezone
from typing import Dict, List, Optional, Tuple, Any
import json

from sickle import Sickle
from sickle.oaiexceptions import (
    OAIError, BadArgument, BadResumptionToken, 
    BadVerb, CannotDisseminateFormat, IdDoesNotExist,
    NoMetadataFormats, NoRecordsMatch, NoSetHierarchy
)

from .oai_client import OAIClient
from .database_manager import DatabaseManager
from .state_manager import StateManager
from .paper_processor import PaperProcessor
from utils.metrics import MetricsCollector
from utils.exceptions import (
    ScrapingError, DatabaseError, ConfigurationError,
    RateLimitError, NetworkError
)


class ArxivScraperService:
    """
    Main ArXiv scraper service implementing robust daily paper harvesting.
    
    Features:
    - OAI-PMH client using sickle library
    - Incremental updates with timestamp filtering
    - Rate limiting and error handling
    - Database integration and state persistence
    - Comprehensive monitoring and metrics
    """
    
    def __init__(self, config: Dict[str, Any], dry_run: bool = False):
        """Initialize the scraper service.
        
        Args:
            config: Configuration dictionary
            dry_run: If True, don't write to database
        """
        self.config = config
        self.dry_run = dry_run
        self.logger = logging.getLogger(__name__)
        
        # Initialize components
        self._initialize_components()
        
        # Metrics collection
        self.metrics = MetricsCollector()
        
        self.logger.info(f"ArXiv Scraper Service initialized (dry_run={dry_run})")
    
    def _initialize_components(self):
        """Initialize all service components."""
        try:
            # OAI-PMH client
            self.oai_client = OAIClient(
                base_url=self.config['arxiv']['oai_base_url'],
                rate_limit_delay=self.config['scraping']['rate_limit_delay'],
                max_retries=self.config['scraping']['max_retries'],
                timeout=self.config['scraping']['request_timeout']
            )
            
            # Database manager
            if not self.dry_run:
                self.db_manager = DatabaseManager(
                    connection_string=self.config['database']['connection_string'],
                    pool_size=self.config['database']['pool_size'],
                    max_overflow=self.config['database']['max_overflow']
                )
            else:
                self.db_manager = None
                self.logger.info("Running in dry-run mode - database writes disabled")
            
            # State manager for persistence
            self.state_manager = StateManager(
                state_file_path=self.config['state']['file_path']
            )
            
            # Paper processor for metadata normalization
            self.paper_processor = PaperProcessor(
                config=self.config.get('processing', {})
            )
            
        except Exception as e:
            raise ConfigurationError(f"Failed to initialize components: {e}")
    
    def run_incremental_scrape(self) -> Dict[str, Any]:
        """
        Run incremental scraping from last successful run.
        
        Returns:
            Dictionary with scraping results and metrics
        """
        start_time = time.time()
        self.logger.info("Starting incremental scrape")
        
        try:
            # Get last successful run timestamp
            last_run = self.state_manager.get_last_successful_run()
            
            if last_run:
                date_from = last_run.isoformat()
                self.logger.info(f"Resuming from last successful run: {date_from}")
            else:
                # Default to yesterday if no previous run
                date_from = (datetime.now(timezone.utc) - timedelta(days=1)).isoformat()
                self.logger.info(f"No previous run found, starting from: {date_from}")
            
            # Perform scraping
            results = self._scrape_papers(date_from=date_from)
            
            # Update state on success
            if results['errors'] == 0:
                self.state_manager.update_last_successful_run(datetime.now(timezone.utc))
                self.logger.info("State updated - incremental scrape completed successfully")
            
            results['execution_time'] = time.time() - start_time
            return results
            
        except Exception as e:
            self.logger.error(f"Incremental scrape failed: {e}", exc_info=True)
            raise ScrapingError(f"Incremental scrape failed: {e}")
    
    def full_resync(self) -> Dict[str, Any]:
        """
        Perform full resynchronization from the earliest available date.
        
        Returns:
            Dictionary with scraping results and metrics
        """
        start_time = time.time()
        self.logger.info("Starting full resynchronization")
        
        try:
            # Start from ArXiv's inception (approximately)
            earliest_date = self.config['arxiv'].get('earliest_date', '2007-01-01')
            self.logger.info(f"Full resync starting from: {earliest_date}")
            
            # Clear existing state
            self.state_manager.clear_state()
            
            # Perform scraping
            results = self._scrape_papers(date_from=earliest_date)
            
            # Update state on success
            if results['errors'] == 0:
                self.state_manager.update_last_successful_run(datetime.now(timezone.utc))
                self.logger.info("State updated - full resync completed successfully")
            
            results['execution_time'] = time.time() - start_time
            return results
            
        except Exception as e:
            self.logger.error(f"Full resync failed: {e}", exc_info=True)
            raise ScrapingError(f"Full resync failed: {e}")
    
    def scrape_date_range(self, date_from: Optional[str] = None, 
                         date_until: Optional[str] = None) -> Dict[str, Any]:
        """
        Scrape papers within a specific date range.
        
        Args:
            date_from: Start date (ISO format)
            date_until: End date (ISO format)
            
        Returns:
            Dictionary with scraping results and metrics
        """
        start_time = time.time()
        self.logger.info(f"Starting date range scrape: {date_from} to {date_until}")
        
        try:
            results = self._scrape_papers(
                date_from=date_from,
                date_until=date_until
            )
            
            results['execution_time'] = time.time() - start_time
            return results
            
        except Exception as e:
            self.logger.error(f"Date range scrape failed: {e}", exc_info=True)
            raise ScrapingError(f"Date range scrape failed: {e}")
    
    def _scrape_papers(self, date_from: Optional[str] = None,
                      date_until: Optional[str] = None) -> Dict[str, Any]:
        """
        Core scraping implementation.
        
        Args:
            date_from: Start date for scraping
            date_until: End date for scraping
            
        Returns:
            Dictionary with scraping results
        """
        papers_processed = 0
        papers_stored = 0
        papers_skipped = 0
        errors = 0
        
        try:
            # Get papers from OAI-PMH endpoint
            self.logger.info("Fetching papers from ArXiv OAI-PMH endpoint")
            
            papers_generator = self.oai_client.list_records(
                metadata_prefix='oai_dc',
                set_spec='physics:math-ph',  # Focus on mathematical physics
                from_date=date_from,
                until_date=date_until
            )
            
            # Process papers in batches for efficiency
            batch_size = self.config['processing']['batch_size']
            batch = []
            
            for paper_record in papers_generator:
                try:
                    # Process paper metadata
                    paper_data = self.paper_processor.process_record(paper_record)
                    
                    if paper_data:
                        batch.append(paper_data)
                        papers_processed += 1
                        
                        # Process batch when full
                        if len(batch) >= batch_size:
                            stored, skipped = self._process_batch(batch)
                            papers_stored += stored
                            papers_skipped += skipped
                            batch = []
                            
                            # Log progress
                            if papers_processed % 100 == 0:
                                self.logger.info(f"Processed {papers_processed} papers...")
                    
                except Exception as e:
                    self.logger.error(f"Error processing paper: {e}")
                    errors += 1
                    continue
            
            # Process remaining papers in batch
            if batch:
                stored, skipped = self._process_batch(batch)
                papers_stored += stored
                papers_skipped += skipped
            
            self.logger.info(f"Scraping completed: {papers_processed} processed, "
                           f"{papers_stored} stored, {papers_skipped} skipped, "
                           f"{errors} errors")
            
            return {
                'papers_processed': papers_processed,
                'papers_stored': papers_stored,
                'papers_skipped': papers_skipped,
                'errors': errors
            }
            
        except Exception as e:
            self.logger.error(f"Scraping failed: {e}", exc_info=True)
            raise ScrapingError(f"Scraping failed: {e}")
    
    def _process_batch(self, batch: List[Dict[str, Any]]) -> Tuple[int, int]:
        """
        Process a batch of papers.
        
        Args:
            batch: List of paper data dictionaries
            
        Returns:
            Tuple of (papers_stored, papers_skipped)
        """
        if self.dry_run:
            self.logger.info(f"DRY RUN: Would process batch of {len(batch)} papers")
            return len(batch), 0
        
        try:
            return self.db_manager.store_papers_batch(batch)
        except Exception as e:
            self.logger.error(f"Error processing batch: {e}")
            raise DatabaseError(f"Batch processing failed: {e}")
    
    def get_status(self) -> Dict[str, Any]:
        """
        Get current service status and metrics.
        
        Returns:
            Dictionary with status information
        """
        try:
            last_run = self.state_manager.get_last_successful_run()
            
            status = {
                'service': 'ArXiv Scraper Service',
                'version': '1.0.0',
                'last_successful_run': last_run.isoformat() if last_run else None,
                'dry_run_mode': self.dry_run,
                'database_connected': False if self.dry_run else self.db_manager.is_connected(),
                'metrics': self.metrics.get_current_metrics()
            }
            
            return status
            
        except Exception as e:
            self.logger.error(f"Error getting status: {e}")
            return {'error': str(e)}
    
    def health_check(self) -> bool:
        """
        Perform health check of all components.
        
        Returns:
            True if all components are healthy
        """
        try:
            # Check OAI client
            if not self.oai_client.health_check():
                return False
            
            # Check database (if not in dry-run mode)
            if not self.dry_run and not self.db_manager.health_check():
                return False
            
            # Check state manager
            if not self.state_manager.health_check():
                return False
            
            return True
            
        except Exception as e:
            self.logger.error(f"Health check failed: {e}")
            return False