#!/usr/bin/env python3
"""
ArXiv Scraper Service CLI

Command-line interface for managing and monitoring the ArXiv scraper service.
"""

import click
import json
import sys
from pathlib import Path
from datetime import datetime, timezone

# Add service modules to path
sys.path.insert(0, str(Path(__file__).parent))

from core.config_manager import ConfigManager
from core.scraper_service import ArxivScraperService
from core.scheduler import ScraperScheduler
from utils.logger import setup_logging


@click.group()
@click.option('--config', '-c', default='config.yaml', help='Configuration file path')
@click.option('--log-level', '-l', default='INFO', help='Logging level')
@click.pass_context
def cli(ctx, config, log_level):
    """ArXiv Scraper Service CLI"""
    ctx.ensure_object(dict)
    
    # Setup logging
    setup_logging(level=log_level)
    
    # Load configuration
    try:
        ctx.obj['config_manager'] = ConfigManager(config)
        ctx.obj['config'] = ctx.obj['config_manager'].get_config()
    except Exception as e:
        click.echo(f"Error loading configuration: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.option('--dry-run', is_flag=True, help='Run without database writes')
@click.option('--full-resync', is_flag=True, help='Full resync from earliest date')
@click.option('--date-from', help='Start date (YYYY-MM-DD)')
@click.option('--date-until', help='End date (YYYY-MM-DD)')
@click.pass_context
def run(ctx, dry_run, full_resync, date_from, date_until):
    """Run the scraper service"""
    config = ctx.obj['config']
    
    try:
        # Initialize scraper service
        scraper_service = ArxivScraperService(config=config, dry_run=dry_run)
        
        # Determine run mode
        if full_resync:
            click.echo("Starting full resync...")
            results = scraper_service.full_resync()
        elif date_from or date_until:
            click.echo(f"Running date range scrape: {date_from} to {date_until}")
            results = scraper_service.scrape_date_range(date_from=date_from, date_until=date_until)
        else:
            click.echo("Running incremental scrape...")
            results = scraper_service.run_incremental_scrape()
        
        # Display results
        click.echo("\n" + "="*50)
        click.echo("SCRAPING RESULTS")
        click.echo("="*50)
        click.echo(f"Papers processed: {results.get('papers_processed', 0)}")
        click.echo(f"Papers stored: {results.get('papers_stored', 0)}")
        click.echo(f"Papers skipped: {results.get('papers_skipped', 0)}")
        click.echo(f"Errors: {results.get('errors', 0)}")
        click.echo(f"Execution time: {results.get('execution_time', 0):.2f} seconds")
        
        if results.get('errors', 0) > 0:
            sys.exit(1)
        
    except Exception as e:
        click.echo(f"Scraping failed: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def schedule(ctx):
    """Start the scheduler for daily runs"""
    config = ctx.obj['config']
    
    if not config.get('scheduling', {}).get('enabled', False):
        click.echo("Scheduling is not enabled in configuration", err=True)
        sys.exit(1)
    
    try:
        # Initialize scraper service
        scraper_service = ArxivScraperService(config=config, dry_run=False)
        
        # Initialize scheduler
        scheduler = ScraperScheduler(
            config=config['scheduling'],
            state_manager=scraper_service.state_manager,
            scraper_function=scraper_service.run_incremental_scrape
        )
        
        click.echo("Starting scheduler...")
        click.echo(f"Daily run time: {config['scheduling'].get('daily_run_time', 'Not set')}")
        
        # Start scheduler (blocking)
        scheduler.start()
        
    except KeyboardInterrupt:
        click.echo("\nScheduler interrupted")
    except Exception as e:
        click.echo(f"Scheduler failed: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def status(ctx):
    """Show service status"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        status_info = scraper_service.get_status()
        
        click.echo("ArXiv Scraper Service Status")
        click.echo("="*40)
        
        for key, value in status_info.items():
            if isinstance(value, dict):
                click.echo(f"{key}:")
                for k, v in value.items():
                    click.echo(f"  {k}: {v}")
            else:
                click.echo(f"{key}: {value}")
        
    except Exception as e:
        click.echo(f"Error getting status: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def health(ctx):
    """Check service health"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        is_healthy = scraper_service.health_check()
        
        if is_healthy:
            click.echo("✓ Service is healthy")
        else:
            click.echo("✗ Service has health issues", err=True)
            sys.exit(1)
        
    except Exception as e:
        click.echo(f"Health check failed: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.option('--output', '-o', default='metrics.json', help='Output file')
@click.option('--format', 'output_format', default='json', type=click.Choice(['json']))
@click.pass_context
def metrics(ctx, output, output_format):
    """Export metrics"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        # Get current metrics
        current_metrics = scraper_service.metrics.get_current_metrics()
        
        # Write to file
        with open(output, 'w') as f:
            json.dump(current_metrics, f, indent=2, default=str)
        
        click.echo(f"Metrics exported to {output}")
        
    except Exception as e:
        click.echo(f"Error exporting metrics: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def config_info(ctx):
    """Show configuration information"""
    config_manager = ctx.obj['config_manager']
    
    try:
        # Get masked configuration (no secrets)
        masked_config = config_manager.get_sensitive_config_mask()
        
        click.echo("Configuration Information")
        click.echo("="*40)
        click.echo(json.dumps(masked_config, indent=2))
        
    except Exception as e:
        click.echo(f"Error displaying configuration: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.option('--output', '-o', default='config.template.yaml', help='Output file')
@click.pass_context
def create_config(ctx, output):
    """Create configuration template"""
    config_manager = ctx.obj['config_manager']
    
    try:
        config_manager.save_config_template(output)
        click.echo(f"Configuration template saved to {output}")
        
    except Exception as e:
        click.echo(f"Error creating config template: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.option('--backup-path', help='Path to backup file')
@click.pass_context
def backup_state(ctx, backup_path):
    """Backup scraper state"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        backup_file = scraper_service.state_manager.backup_state(backup_path)
        click.echo(f"State backed up to {backup_file}")
        
    except Exception as e:
        click.echo(f"Error backing up state: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.argument('backup_path')
@click.pass_context
def restore_state(ctx, backup_path):
    """Restore scraper state from backup"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        scraper_service.state_manager.restore_state(backup_path)
        click.echo(f"State restored from {backup_path}")
        
    except Exception as e:
        click.echo(f"Error restoring state: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def clear_state(ctx):
    """Clear all scraper state (use with caution)"""
    config = ctx.obj['config']
    
    if not click.confirm("This will clear all scraper state. Continue?"):
        click.echo("Cancelled")
        return
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        scraper_service.state_manager.clear_state()
        click.echo("State cleared successfully")
        
    except Exception as e:
        click.echo(f"Error clearing state: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.option('--subject', help='Filter by subject category')
@click.option('--limit', default=10, help='Number of papers to show')
@click.pass_context
def list_papers(ctx, subject, limit):
    """List recent papers"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        if scraper_service.db_manager is None:
            click.echo("Database not available in dry-run mode", err=True)
            sys.exit(1)
        
        if subject:
            papers = scraper_service.db_manager.get_papers_by_subject(subject, limit)
        else:
            papers = scraper_service.db_manager.get_latest_papers(limit)
        
        if not papers:
            click.echo("No papers found")
            return
        
        click.echo(f"Recent Papers ({len(papers)} found)")
        click.echo("="*60)
        
        for paper in papers:
            click.echo(f"ArXiv ID: {paper.get('arxiv_id', 'N/A')}")
            click.echo(f"Title: {paper.get('title', 'N/A')}")
            click.echo(f"Authors: {', '.join(paper.get('authors', []))}")
            click.echo(f"Date: {paper.get('date_submitted', 'N/A')}")
            if paper.get('primary_subject'):
                click.echo(f"Subject: {paper.get('primary_subject')}")
            click.echo("-" * 60)
        
    except Exception as e:
        click.echo(f"Error listing papers: {e}", err=True)
        sys.exit(1)


@cli.command()
@click.pass_context
def db_stats(ctx):
    """Show database statistics"""
    config = ctx.obj['config']
    
    try:
        scraper_service = ArxivScraperService(config=config, dry_run=True)
        
        if scraper_service.db_manager is None:
            click.echo("Database not available in dry-run mode", err=True)
            sys.exit(1)
        
        stats = scraper_service.db_manager.get_database_stats()
        
        click.echo("Database Statistics")
        click.echo("="*40)
        click.echo(f"Total papers: {stats.get('total_papers', 0)}")
        click.echo(f"Recent papers (30 days): {stats.get('recent_papers', 0)}")
        
        click.echo("\nPapers by status:")
        for status, count in stats.get('by_status', {}).items():
            click.echo(f"  {status}: {count}")
        
        click.echo("\nTop subjects:")
        for subject_info in stats.get('top_subjects', [])[:10]:
            click.echo(f"  {subject_info['primary_subject']}: {subject_info['count']}")
        
    except Exception as e:
        click.echo(f"Error getting database stats: {e}", err=True)
        sys.exit(1)


if __name__ == '__main__':
    cli()