"""
Scheduler for ArXiv Scraper Service

Daily scheduling system with cron-like functionality for automated paper harvesting.
"""

import time
import signal
import logging
import threading
from datetime import datetime, timezone, timedelta
from typing import Callable, Optional, Dict, Any
import schedule

from .state_manager import StateManager
from utils.exceptions import SchedulingError


class ScraperScheduler:
    """
    Scheduler for automated scraper execution.
    
    Features:
    - Daily scheduling with configurable time
    - Timezone support
    - Graceful shutdown handling
    - Run duration monitoring
    - Failure recovery
    """
    
    def __init__(self, config: Dict[str, Any], state_manager: StateManager,
                 scraper_function: Callable[[], Dict[str, Any]]):
        """Initialize scheduler.
        
        Args:
            config: Scheduling configuration
            state_manager: State manager instance
            scraper_function: Function to execute for scraping
        """
        self.config = config
        self.state_manager = state_manager
        self.scraper_function = scraper_function
        
        self.logger = logging.getLogger(__name__)
        
        # Scheduling state
        self.is_running = False
        self.current_job = None
        self.shutdown_event = threading.Event()
        
        # Configure schedule
        self._setup_schedule()
        
        # Setup signal handlers
        self._setup_signal_handlers()
        
        self.logger.info(f"Scheduler initialized: daily run at {config.get('daily_run_time', '02:00')}")
    
    def _setup_schedule(self):
        """Setup the daily schedule."""
        daily_run_time = self.config.get('daily_run_time', '02:00')
        
        try:
            # Parse time format (HH:MM)
            hour, minute = map(int, daily_run_time.split(':'))
            
            # Schedule daily job
            schedule.every().day.at(daily_run_time).do(self._execute_scheduled_job)
            
            self.logger.info(f"Scheduled daily run at {daily_run_time}")
            
        except ValueError as e:
            raise SchedulingError(f"Invalid daily run time format: {daily_run_time}. Use HH:MM format.")
    
    def _setup_signal_handlers(self):
        """Setup signal handlers for graceful shutdown."""
        def signal_handler(signum, frame):
            self.logger.info(f"Received signal {signum}, initiating graceful shutdown...")
            self.shutdown_event.set()
        
        signal.signal(signal.SIGINT, signal_handler)
        signal.signal(signal.SIGTERM, signal_handler)
    
    def _execute_scheduled_job(self) -> Dict[str, Any]:
        """Execute the scheduled scraping job."""
        job_start_time = datetime.now(timezone.utc)
        
        self.logger.info("Starting scheduled scraping job")
        
        try:
            # Check if another job is already running
            if self.current_job and self.current_job.is_alive():
                self.logger.warning("Previous job still running, skipping this execution")
                return {'status': 'skipped', 'reason': 'previous_job_running'}
            
            # Create job thread with timeout
            job_thread = threading.Thread(
                target=self._run_scraper_with_timeout,
                name='ScraperJob'
            )
            
            self.current_job = job_thread
            job_thread.start()
            
            # Wait for completion or timeout
            max_duration = self.config.get('max_run_duration', 7200)  # 2 hours default
            job_thread.join(timeout=max_duration)
            
            if job_thread.is_alive():
                self.logger.error(f"Job exceeded maximum duration ({max_duration}s), terminating")
                # Note: In production, you might want to implement more sophisticated job termination
                return {'status': 'timeout', 'duration': max_duration}
            
            job_end_time = datetime.now(timezone.utc)
            duration = (job_end_time - job_start_time).total_seconds()
            
            self.logger.info(f"Scheduled job completed in {duration:.2f} seconds")
            
            return {
                'status': 'completed',
                'duration': duration,
                'start_time': job_start_time.isoformat(),
                'end_time': job_end_time.isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"Error in scheduled job: {e}", exc_info=True)
            
            # Update state with failure info
            self.state_manager.update_last_failed_run(
                datetime.now(timezone.utc),
                str(e)
            )
            
            return {'status': 'error', 'error': str(e)}
    
    def _run_scraper_with_timeout(self):
        """Run scraper function with proper error handling."""
        try:
            results = self.scraper_function()
            
            # Store results in state
            self.state_manager.update_metrics({
                'last_scheduled_run': datetime.now(timezone.utc).isoformat(),
                'last_scheduled_results': results
            })
            
            self.logger.info(f"Scheduled scraper completed: {results}")
            
        except Exception as e:
            self.logger.error(f"Error in scraper execution: {e}", exc_info=True)
            raise
    
    def start(self):
        """Start the scheduler."""
        if self.is_running:
            self.logger.warning("Scheduler is already running")
            return
        
        self.is_running = True
        self.shutdown_event.clear()
        
        self.logger.info("Starting scheduler...")
        
        # Log next scheduled run
        next_run = schedule.next_run()
        if next_run:
            self.logger.info(f"Next scheduled run: {next_run}")
        
        # Main scheduling loop
        while not self.shutdown_event.is_set():
            try:
                # Run pending scheduled jobs
                schedule.run_pending()
                
                # Sleep for a short interval
                time.sleep(60)  # Check every minute
                
            except KeyboardInterrupt:
                self.logger.info("Keyboard interrupt received")
                break
            except Exception as e:
                self.logger.error(f"Error in scheduler loop: {e}", exc_info=True)
                time.sleep(60)  # Continue after error
        
        self.is_running = False
        self.logger.info("Scheduler stopped")
    
    def stop(self):
        """Stop the scheduler gracefully."""
        self.logger.info("Stopping scheduler...")
        self.shutdown_event.set()
        
        # Wait for current job to complete if running
        if self.current_job and self.current_job.is_alive():
            self.logger.info("Waiting for current job to complete...")
            self.current_job.join(timeout=30)  # Wait up to 30 seconds
            
            if self.current_job.is_alive():
                self.logger.warning("Current job did not complete within timeout")
    
    def get_status(self) -> Dict[str, Any]:
        """Get scheduler status."""
        next_run = schedule.next_run()
        
        return {
            'is_running': self.is_running,
            'has_current_job': self.current_job is not None and self.current_job.is_alive(),
            'next_scheduled_run': next_run.isoformat() if next_run else None,
            'scheduled_jobs_count': len(schedule.jobs),
            'daily_run_time': self.config.get('daily_run_time'),
            'max_run_duration': self.config.get('max_run_duration'),
            'timezone': self.config.get('timezone', 'UTC')
        }
    
    def run_now(self) -> Dict[str, Any]:
        """Execute scraper job immediately (outside of schedule)."""
        if self.current_job and self.current_job.is_alive():
            return {'status': 'error', 'error': 'Another job is already running'}
        
        self.logger.info("Executing immediate scraper run")
        
        return self._execute_scheduled_job()
    
    def get_next_run_time(self) -> Optional[datetime]:
        """Get the next scheduled run time."""
        next_run = schedule.next_run()
        return next_run
    
    def reschedule(self, new_time: str):
        """Reschedule the daily run time."""
        try:
            # Validate time format
            hour, minute = map(int, new_time.split(':'))
            if not (0 <= hour <= 23 and 0 <= minute <= 59):
                raise ValueError("Invalid time values")
            
            # Clear existing schedule
            schedule.clear()
            
            # Update config
            self.config['daily_run_time'] = new_time
            
            # Setup new schedule
            self._setup_schedule()
            
            self.logger.info(f"Rescheduled daily run to {new_time}")
            
        except ValueError as e:
            raise SchedulingError(f"Invalid time format: {new_time}. Use HH:MM format with valid values.")
    
    def get_job_history(self) -> List[Dict[str, Any]]:
        """Get history of scheduled job executions from state."""
        metrics = self.state_manager.get_metrics()
        
        # Extract job history from metrics
        history = []
        
        # Get last few scheduled runs from metrics
        for key, value in metrics.items():
            if key.startswith('scheduled_run_'):
                try:
                    run_data = value if isinstance(value, dict) else {'result': value}
                    history.append(run_data)
                except Exception:
                    continue
        
        # Sort by timestamp if available
        history.sort(key=lambda x: x.get('timestamp', ''), reverse=True)
        
        return history[:10]  # Return last 10 runs


class SchedulerManager:
    """
    Manager for multiple schedulers and scheduling operations.
    """
    
    def __init__(self):
        """Initialize scheduler manager."""
        self.schedulers = {}
        self.logger = logging.getLogger(__name__)
    
    def add_scheduler(self, name: str, scheduler: ScraperScheduler):
        """Add a scheduler."""
        self.schedulers[name] = scheduler
        self.logger.info(f"Added scheduler: {name}")
    
    def start_all(self):
        """Start all schedulers."""
        for name, scheduler in self.schedulers.items():
            try:
                scheduler.start()
                self.logger.info(f"Started scheduler: {name}")
            except Exception as e:
                self.logger.error(f"Error starting scheduler {name}: {e}")
    
    def stop_all(self):
        """Stop all schedulers."""
        for name, scheduler in self.schedulers.items():
            try:
                scheduler.stop()
                self.logger.info(f"Stopped scheduler: {name}")
            except Exception as e:
                self.logger.error(f"Error stopping scheduler {name}: {e}")
    
    def get_status_all(self) -> Dict[str, Dict[str, Any]]:
        """Get status of all schedulers."""
        status = {}
        
        for name, scheduler in self.schedulers.items():
            try:
                status[name] = scheduler.get_status()
            except Exception as e:
                status[name] = {'error': str(e)}
        
        return status