"""
Simplified CLI for GCR Solver Manager.

Basic command-line interface using Click without complex dependencies.
"""

import click
import json
import logging
from pathlib import Path
from typing import Optional

from ..core.simple_config import get_config, create_default_config
from ..database.simple_db import SimpleDatabase
from ..test.simple_runner import SimpleTestRunner
from ..logs.manager import LogManager
from ..batch.batch_runner import BatchRunner

# Setup logging
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)

@click.group()
@click.option('--config', type=click.Path(), help='Configuration file path')
@click.option('--verbose', '-v', is_flag=True, help='Enable verbose output')
@click.pass_context
def cli(ctx, config, verbose):
    """GCR Solver Manager - Build, test, and analyze GCR solvers."""
    ctx.ensure_object(dict)
    
    if verbose:
        logging.getLogger().setLevel(logging.DEBUG)
    
    if config:
        from ..core.simple_config import load_config
        load_config(config)


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type to build for')
@click.option('--debug', is_flag=True, help='Build with debug flags')
@click.option('--nccl', is_flag=True, help='Enable NCCL communication')
@click.option('--parallel', '-j', default=4, help='Number of parallel jobs')
@click.option('--tag', multiple=True, help='Tags for this build')
def build(solver, gpu, debug, nccl, parallel, tag):
    """Build a solver configuration."""
    try:
        # Build functionality removed - use binary execution only
        
        options = {
            'debug': debug,
            'nccl': nccl,
            'parallel_jobs': parallel
        }
        
        success, run_id = builder.build(solver, gpu, options, list(tag))
        
        if success:
            click.echo(f"✅ Build completed successfully (run_id: {run_id})")
        else:
            click.echo(f"❌ Build failed (run_id: {run_id})")
            
    except Exception as e:
        click.echo(f"❌ Build error: {e}")


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type to test')
@click.option('--nproc', default=4, help='Number of MPI processes')
@click.option('--resolution', default=1.0, type=float, help='Grid resolution')
@click.option('--maxit', type=int, help='Maximum iterations')
@click.option('--debug', is_flag=True, help='Run in debug mode')
@click.option('--both', is_flag=True, help='Run both debug and release modes')
@click.option('--remote', is_flag=True, help='Run test on remote server (HIP tests use SLURM)')
@click.option('--sync', is_flag=True, help='Automatically sync results after remote execution')
@click.option('--tag', multiple=True, help='Tags for this test')
def test(solver, gpu, nproc, resolution, maxit, debug, both, remote, sync, tag):
    """Run a solver test."""
    try:
        runner = SimpleTestRunner()
        
        params = {
            'nproc': nproc,
            'resolution': resolution,
            'debug': debug,
            'both': both
        }
        
        if maxit:
            params['maxit'] = maxit
        
        # Check for remote execution
        if remote:
            # Validate remote configuration
            config = get_config()
            if not config.get('remote.enabled'):
                click.echo("❌ Remote execution not configured. Please create a .env file with SSH credentials.")
                return
            
            if gpu == 'hip':
                success, run_id = runner.run_remote_hip_test(solver, params, list(tag))
            else:
                success, run_id = runner.run_remote_test(solver, gpu, params, {}, list(tag))
            
            if success:
                click.echo(f"✅ Remote test completed successfully (run_id: {run_id})")
                
                # Auto-sync if requested
                if sync:
                    click.echo("🔄 Syncing results from remote server...")
                    if runner.sync_remote_results():
                        click.echo("✅ Results synced successfully")
                    else:
                        click.echo("⚠ Sync completed with warnings")
            else:
                click.echo(f"❌ Remote test failed (run_id: {run_id})")
        else:
            # Local execution
            success, run_id = runner.run_test(solver, gpu, params, list(tag))
            
            if success:
                click.echo(f"✅ Test completed successfully (run_id: {run_id})")
            else:
                click.echo(f"❌ Test failed (run_id: {run_id})")
            
    except Exception as e:
        click.echo(f"❌ Test error: {e}")


@cli.command()
@click.option('--count', '-n', default=10, help='Number of recent runs to show')
@click.option('--type', type=click.Choice(['build', 'test']), help='Filter by run type')
@click.option('--solver', help='Filter by solver')
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='Filter by GPU type')
def list_runs(count, type, solver, gpu):
    """List recent test/build runs."""
    try:
        config = get_config()
        db = SimpleDatabase(config.get('database.path'))
        runs = db.get_latest(count, type, solver, gpu)
        
        if not runs:
            click.echo("No runs found.")
            return
        
        click.echo(f"Recent runs ({len(runs)}):")
        for run in runs:
            click.echo(f"  {run['id']:4d} | {run['type']:5s} | {run['solver']:8s} | {run['gpu'] or 'cpu':4s} | {run['status']:7s} | {run['created_at']}")
            
    except Exception as e:
        click.echo(f"❌ Error listing runs: {e}")


@cli.command()
@click.argument('run_id', type=int)
def show_log(run_id):
    """Show log content for a specific run."""
    try:
        config = get_config()
        db = SimpleDatabase(config.get('database.path'))
        run = db.get_by_id(run_id)
        
        if not run:
            click.echo(f"❌ Run {run_id} not found")
            return
        
        log_manager = LogManager()
        log_content = log_manager.get_log_content(run_id)
        
        if log_content:
            click.echo(log_content)
        else:
            click.echo("❌ Log content not found")
            
    except Exception as e:
        click.echo(f"❌ Error showing log: {e}")


@cli.command()
@click.argument('run_id', type=int)
@click.option('--output', '-o', help='Output file for analysis plots')
@click.option('--title', help='Custom title for analysis')
def compare(run_id, output, title):
    """Analyze test run using compare_norm.py for detailed solver comparison."""
    try:
        import subprocess
        config = get_config()
        db = SimpleDatabase(config.get('database.path'))
        
        # Get run data
        run_data = db.get_by_id(run_id)
        if not run_data:
            click.echo(f"❌ Run ID {run_id} not found")
            return
        
        if run_data['type'] != 'test':
            click.echo(f"❌ Run ID {run_id} is not a test run (type: {run_data['type']})")
            return
        
        log_path = Path(run_data['log_path'])
        if not log_path.exists():
            click.echo(f"❌ Log file not found: {log_path}")
            return
        
        # Path to compare_norm.py script
        script_path = Path(__file__).parent.parent.parent.parent.parent / "scripts" / "python" / "compare_norm.py"
        if not script_path.exists():
            click.echo(f"❌ compare_norm.py not found at: {script_path}")
            return
        
        click.echo(f"📊 Analyzing run {run_id} ({run_data['solver']}/{run_data['gpu']}) using compare_norm.py...")
        click.echo(f"📄 Log file: {log_path}")
        
        # Build command
        cmd = ['python3', str(script_path)]
        if output:
            cmd.extend(['--output', output])
        if title:
            cmd.extend(['--title', title])
        cmd.append(str(log_path))
        
        # Execute compare_norm.py
        result = subprocess.run(cmd, capture_output=True, text=True, cwd=log_path.parent)
        
        if result.returncode == 0:
            click.echo("✅ Analysis completed successfully!")
            click.echo("\n" + "="*50)
            click.echo("ANALYSIS RESULTS:")
            click.echo("="*50)
            click.echo(result.stdout)
            
            if result.stderr.strip():
                click.echo("\n" + "="*50)
                click.echo("ADDITIONAL INFO:")
                click.echo("="*50)
                click.echo(result.stderr)
        else:
            click.echo(f"❌ Analysis failed with return code {result.returncode}")
            click.echo(f"Error output: {result.stderr}")
            if result.stdout:
                click.echo(f"Standard output: {result.stdout}")
    
    except Exception as e:
        click.echo(f"❌ Error: {e}")
        return


@cli.command()
def stats():
    """Show system statistics."""
    try:
        config = get_config()
        db = SimpleDatabase(config.get('database.path'))
        stats = db.get_statistics()
        
        click.echo("📊 System Statistics:")
        click.echo(f"  Total runs: {stats['overview']['total_runs']}")
        click.echo(f"  Build runs: {stats['overview']['build_runs']}")
        click.echo(f"  Test runs: {stats['overview']['test_runs']}")
        click.echo(f"  Successful runs: {stats['overview']['successful_runs']}")
        click.echo(f"  Failed runs: {stats['overview']['failed_runs']}")
        click.echo()
        
        click.echo("📈 Solver Breakdown:")
        for solver in stats['solvers']:
            click.echo(f"  {solver['solver']}: {solver['count']} runs")
        
    except Exception as e:
        click.echo(f"❌ Error getting statistics: {e}")


@cli.command()
def init_config():
    """Initialize default configuration file."""
    try:
        config_path = Path("config.yaml")
        if config_path.exists():
            if not click.confirm("config.yaml already exists. Overwrite?"):
                return
        
        create_default_config(config_path)
        click.echo(f"✅ Configuration created at {config_path}")
        
    except Exception as e:
        click.echo(f"❌ Error creating config: {e}")


@cli.command()
def search(query):
    """Search logs by text."""
    try:
        config = get_config()
        db = SimpleDatabase(config.get('database.path'))
        results = db.search_runs(query)
        
        if not results:
            click.echo("No matching runs found.")
            return
        
        click.echo(f"Found {len(results)} matching runs:")
        for run in results:
            click.echo(f"  {run['id']:4d} | {run['type']:5s} | {run['solver']:8s} | {run['gpu'] or 'cpu':4s} | {run['status']:7s} | {run['created_at']}")
            
    except Exception as e:
        click.echo(f"❌ Error searching: {e}")


@cli.command()
@click.argument('days', type=int, default=90)
def cleanup(days):
    """Clean up old runs and logs."""
    try:
        log_manager = LogManager()
        stats = log_manager.cleanup_old_logs(days)
        
        click.echo(f"✅ Cleanup completed:")
        click.echo(f"  Database entries deleted: {stats['database_entries_deleted']}")
        click.echo(f"  Log files deleted: {stats['log_files_deleted']}")
        
    except Exception as e:
        click.echo(f"❌ Error during cleanup: {e}")


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type to test')
@click.option('--nproc', default=4, help='Number of MPI processes')
@click.option('--resolution', default=1.0, type=float, help='Grid resolution')
@click.option('--maxit', type=int, help='Maximum iterations')
@click.option('--debug', is_flag=True, help='Run in debug mode')
@click.option('--partition', default='gpu', help='SLURM partition')
@click.option('--time', default='01:00:00', help='Time limit')
@click.option('--account', help='SLURM account')
@click.option('--tag', multiple=True, help='Tags for this test')
def submit_slurm(solver, gpu, nproc, resolution, maxit, debug, partition, time, account, tag):
    """Submit test as SLURM job."""
    try:
        runner = SimpleTestRunner()
        
        params = {
            'nproc': nproc,
            'resolution': resolution,
            'debug': debug
        }
        
        if maxit:
            params['maxit'] = maxit
        
        slurm_params = {
            'partition': partition,
            'time': time
        }
        
        if account:
            slurm_params['account'] = account
        
        job_id, run_id = runner.submit_slurm_job(solver, gpu, params, slurm_params)
        
        click.echo(f"✅ SLURM job submitted: {job_id} (run_id: {run_id})")
        click.echo(f"   Monitor with: gsm slurm-status {job_id}")
        
    except Exception as e:
        click.echo(f"❌ SLURM submission error: {e}")


@cli.command()
@click.argument('job_id', required=False)
def slurm_status(job_id):
    """Check status of SLURM jobs."""
    try:
        runner = SimpleTestRunner()
        
        if job_id:
            # Check specific job
            status_info = runner.check_slurm_job_status(job_id)
            click.echo(f"Job {job_id} status: {status_info.get('status', 'UNKNOWN')}")
            if 'reason' in status_info:
                click.echo(f"  Reason: {status_info['reason']}")
            if 'exit_code' in status_info:
                click.echo(f"  Exit code: {status_info['exit_code']}")
        else:
            # Monitor all SLURM jobs
            jobs_info = runner.monitor_slurm_jobs()
            
            if jobs_info['total_jobs'] == 0:
                click.echo("No SLURM jobs found in recent runs.")
                return
            
            click.echo(f"SLURM Jobs ({jobs_info['total_jobs']}):")
            for job in jobs_info['jobs']:
                click.echo(f"  Job {job['job_id']} | Run {job['run_id']} | {job['solver']} | {job['status']} | {job['created_at']}")
        
    except Exception as e:
        click.echo(f"❌ SLURM status error: {e}")


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type to test')
@click.option('--nproc', default=4, help='Number of MPI processes')
@click.option('--resolution', default=1.0, type=float, help='Grid resolution')
@click.option('--maxit', type=int, help='Maximum iterations')
@click.option('--debug', is_flag=True, help='Run in debug mode')
@click.option('--hostname', help='SSH hostname from SSH config (overrides .env)')
@click.option('--workdir', help='Remote working directory (overrides config)')
@click.option('--watch', is_flag=True, help='Show real-time build output and SLURM progress')
@click.option('--tag', multiple=True, help='Tags for this test')
def test_remote(solver, gpu, nproc, resolution, maxit, debug, hostname, workdir, watch, tag):
    """Build and run test on remote system via SSH."""
    try:
        runner = SimpleTestRunner()
        
        params = {
            'nproc': nproc,
            'resolution': resolution,
            'debug': debug
        }
        
        if maxit:
            params['maxit'] = maxit
        
        remote_config = {}
        if hostname:
            remote_config['hostname'] = hostname
        if workdir:
            remote_config['workdir'] = workdir
        
        success, run_id = runner.run_remote_test(solver, gpu, params, remote_config, list(tag), watch)
        
        if success:
            click.echo(f"✅ Remote test completed successfully (run_id: {run_id})")
        else:
            click.echo(f"❌ Remote test failed (run_id: {run_id})")
            
    except Exception as e:
        click.echo(f"❌ Remote test error: {e}")


@cli.command()
@click.option('--format', type=click.Choice(['json', 'csv']), default='json', help='Export format')
@click.option('--type', type=click.Choice(['build', 'test']), help='Filter by run type')
@click.option('--solver', help='Filter by solver')
@click.option('--days', type=int, default=30, help='Export runs from last N days')
@click.option('--output', '-o', help='Output file path')
def export(format, type, solver, days, output):
    """Export run data to JSON or CSV."""
    try:
        from ..analysis.simple_analyzer import SimpleAnalyzer
        analyzer = SimpleAnalyzer()
        
        # Generate output filename if not provided
        if not output:
            from datetime import datetime
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filter_parts = []
            if type:
                filter_parts.append(type)
            if solver:
                filter_parts.append(solver)
            filter_str = "_" + "_".join(filter_parts) if filter_parts else ""
            output = f"gsm_export{filter_str}_{timestamp}.{format}"
        
        success = analyzer.export_runs(output, format, type, solver, days)
        
        if success:
            click.echo(f"✅ Data exported to: {output}")
        else:
            click.echo("❌ Export failed")
            
    except Exception as e:
        click.echo(f"❌ Export error: {e}")


@cli.command()
@click.option('--config', '-c', required=True, help='Batch configuration file')
@click.option('--parallel', '-p', is_flag=True, help='Run jobs in parallel')
@click.option('--workers', '-w', default=4, help='Number of parallel workers')
def batch(config, parallel, workers):
    """Run batch operations from configuration file."""
    try:
        batch_runner = BatchRunner()
        results = batch_runner.run_batch_from_config(config, parallel, workers)
        
        if results['success']:
            click.echo(f"✅ Batch completed successfully")
            click.echo(f"   Total jobs: {results['total_jobs']}")
            click.echo(f"   Successful: {results['successful_jobs']}")
            click.echo(f"   Failed: {results['failed_jobs']}")
        else:
            click.echo(f"❌ Batch execution failed")
            if 'error' in results:
                click.echo(f"   Error: {results['error']}")
            else:
                click.echo(f"   {results['failed_jobs']} out of {results['total_jobs']} jobs failed")
                
    except Exception as e:
        click.echo(f"❌ Batch error: {e}")


@cli.command()
@click.option('--solvers', '-s', default='gcr,gmres', help='Comma-separated list of solvers')
@click.option('--gpus', '-g', default='cuda,hip,cpu', help='Comma-separated list of GPU types')  
@click.option('--operation', type=click.Choice(['build', 'test']), default='test', help='Operation type')
@click.option('--parallel', '-p', is_flag=True, help='Run jobs in parallel')
@click.option('--workers', '-w', default=4, help='Number of parallel workers')
@click.option('--nproc', default=4, help='Number of MPI processes (for test)')
@click.option('--resolution', default=1.0, type=float, help='Grid resolution (for test)')
@click.option('--debug', is_flag=True, help='Run in debug mode')
def batch_matrix(solvers, gpus, operation, parallel, workers, nproc, resolution, debug):
    """Run batch matrix of solver/GPU combinations."""
    try:
        solver_list = [s.strip() for s in solvers.split(',')]
        gpu_list = [g.strip() for g in gpus.split(',')]
        
        params = {
            'nproc': nproc,
            'resolution': resolution,
            'debug': debug
        }
        
        batch_runner = BatchRunner()
        results = batch_runner.run_batch_matrix(
            solvers=solver_list,
            gpu_types=gpu_list,
            operation=operation,
            params=params,
            parallel=parallel,
            max_workers=workers
        )
        
        if results['success']:
            click.echo(f"✅ Batch matrix completed successfully")
            click.echo(f"   Total combinations: {results['total_jobs']}")
            click.echo(f"   Successful: {results['successful_jobs']}")
            click.echo(f"   Failed: {results['failed_jobs']}")
        else:
            click.echo(f"❌ Batch matrix execution failed")
            click.echo(f"   {results['failed_jobs']} out of {results['total_jobs']} jobs failed")
            
    except Exception as e:
        click.echo(f"❌ Batch matrix error: {e}")


@cli.command()
@click.option('--output', '-o', default='batch_config_template.yaml', help='Output file path')
def create_batch_template(output):
    """Create a batch configuration template file."""
    try:
        batch_runner = BatchRunner()
        success = batch_runner.create_batch_config_template(output)
        
        if success:
            click.echo(f"✅ Batch configuration template created: {output}")
            click.echo(f"   Edit the template and run with: gsm batch -c {output}")
        else:
            click.echo("❌ Failed to create batch template")
            
    except Exception as e:
        click.echo(f"❌ Template creation error: {e}")


@cli.command()
@click.option('--days', default=7, help='Number of days to show')
def batch_history(days):
    """Show batch execution history."""
    try:
        batch_runner = BatchRunner()
        history = batch_runner.get_batch_history(days)
        
        if not history:
            click.echo("No batch runs found in the specified time period.")
            return
        
        click.echo(f"Batch History (last {days} days):")
        for batch in history:
            click.echo(f"  {batch['date']}: {batch['total_jobs']} jobs "
                      f"({batch['successful_jobs']} success, {batch['failed_jobs']} failed)")
            
    except Exception as e:
        click.echo(f"❌ Batch history error: {e}")


@cli.command()
@click.option('--runs', '-r', required=True, help='Comma-separated list of run IDs to compare')
@click.option('--output', '-o', help='Output file path')
@click.option('--title', help='Plot title')
def plot_convergence(runs, output, title):
    """Plot convergence comparison between test runs."""
    try:
        from ..plotting.simple_plotter import SimplePlotter
        
        run_ids = [int(r.strip()) for r in runs.split(',')]
        
        if not output:
            from datetime import datetime
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output = f"convergence_comparison_{timestamp}.png"
        
        plotter = SimplePlotter()
        success = plotter.plot_convergence_comparison(run_ids, output, title)
        
        if success:
            click.echo(f"✅ Convergence plot saved: {output}")
        else:
            click.echo("❌ Failed to create convergence plot")
            
    except ImportError:
        click.echo("❌ matplotlib is required for plotting. Install with: pip install matplotlib")
    except Exception as e:
        click.echo(f"❌ Plot error: {e}")


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type')
@click.option('--days', default=30, help='Number of days to analyze')
@click.option('--output', '-o', help='Output file path')
def plot_trends(solver, gpu, days, output):
    """Plot performance trends over time."""
    try:
        from ..plotting.simple_plotter import SimplePlotter
        
        plotter = SimplePlotter()
        success = plotter.plot_performance_trends(solver, gpu, days, output)
        
        if success:
            if output:
                click.echo(f"✅ Performance trends plot saved: {output}")
            else:
                click.echo("✅ Performance trends plot saved")
        else:
            click.echo("❌ Failed to create performance trends plot")
            
    except ImportError:
        click.echo("❌ matplotlib is required for plotting. Install with: pip install matplotlib")
    except Exception as e:
        click.echo(f"❌ Plot error: {e}")


@cli.command()
@click.option('--solvers', '-s', default='gcr,gmres', help='Comma-separated list of solvers')
@click.option('--gpu', type=click.Choice(['cuda', 'hip', 'cpu']), help='GPU type')
@click.option('--metric', type=click.Choice(['final_norm', 'convergence_rate']), 
              default='final_norm', help='Metric to compare')
@click.option('--output', '-o', help='Output file path')
def plot_comparison(solvers, gpu, metric, output):
    """Plot comparison between different solvers."""
    try:
        from ..plotting.simple_plotter import SimplePlotter
        
        solver_list = [s.strip() for s in solvers.split(',')]
        
        plotter = SimplePlotter()
        success = plotter.plot_solver_comparison(solver_list, gpu, metric, output)
        
        if success:
            if output:
                click.echo(f"✅ Solver comparison plot saved: {output}")
            else:
                click.echo("✅ Solver comparison plot saved")
        else:
            click.echo("❌ Failed to create solver comparison plot")
            
    except ImportError:
        click.echo("❌ matplotlib is required for plotting. Install with: pip install matplotlib")
    except Exception as e:
        click.echo(f"❌ Plot error: {e}")


@cli.command()
@click.option('--days', default=30, help='Number of days to analyze')
@click.option('--output', '-o', help='Output file path')
def plot_builds(days, output):
    """Plot build success rate over time."""
    try:
        from ..plotting.simple_plotter import SimplePlotter
        
        plotter = SimplePlotter()
        success = plotter.plot_build_success_rate(days, output)
        
        if success:
            if output:
                click.echo(f"✅ Build success rate plot saved: {output}")
            else:
                click.echo("✅ Build success rate plot saved")
        else:
            click.echo("❌ Failed to create build success rate plot")
            
    except ImportError:
        click.echo("❌ matplotlib is required for plotting. Install with: pip install matplotlib")
    except Exception as e:
        click.echo(f"❌ Plot error: {e}")


@cli.command()
@click.option('--solver', required=True, type=click.Choice(['gcr', 'ca-gcr', 'gmres', 'bca-gmres']))
@click.option('--implementations', '-i', required=True, help='Comma-separated list of implementations (cpu,cuda,hip)')
@click.option('--days', default=7, help='Number of days to analyze')
@click.option('--metric', type=click.Choice(['final_norm', 'convergence_rate', 'performance']), 
              default='final_norm', help='Metric to compare')
@click.option('--output', '-o', help='Output file path (JSON format)')
@click.option('--tolerance', default=1e-10, type=float, help='Tolerance for norm comparison')
def compare_solvers(solver, implementations, days, metric, output, tolerance):
    """Compare solver results across different implementations."""
    try:
        from ..analysis.solver_comparison import SolverComparison
        
        impl_list = [impl.strip() for impl in implementations.split(',')]
        
        comparator = SolverComparison()
        results = comparator.compare_implementations(
            solver=solver,
            implementations=impl_list,
            days=days,
            metric=metric,
            tolerance=tolerance
        )
        
        if not results:
            click.echo("❌ No data found for comparison")
            return
        
        # Display results
        click.echo(f"Solver Comparison: {solver}")
        click.echo(f"Implementations: {', '.join(impl_list)}")
        click.echo(f"Metric: {metric}")
        click.echo(f"Time period: {days} days")
        click.echo("")
        
        # Show comparison summary
        if 'summary' in results:
            summary = results['summary']
            click.echo("Summary:")
            click.echo(f"  Total comparisons: {summary.get('total_comparisons', 0)}")
            click.echo(f"  Consistent results: {summary.get('consistent_results', 0)}")
            click.echo(f"  Inconsistent results: {summary.get('inconsistent_results', 0)}")
            if 'max_difference' in summary:
                click.echo(f"  Max difference: {summary['max_difference']:.2e}")
            if 'avg_difference' in summary:
                click.echo(f"  Average difference: {summary['avg_difference']:.2e}")
            click.echo("")
        
        # Show detailed comparisons
        if 'comparisons' in results:
            click.echo("Detailed Comparisons:")
            for comp in results['comparisons'][:5]:  # Show first 5
                impl1, impl2 = comp['implementations']
                diff = comp.get('difference', 'N/A')
                status = comp.get('status', 'unknown')
                
                click.echo(f"  {impl1} vs {impl2}: {status}")
                if diff != 'N/A':
                    click.echo(f"    Difference: {diff:.2e}")
            
            if len(results['comparisons']) > 5:
                click.echo(f"  ... and {len(results['comparisons']) - 5} more")
        
        # Save to file if requested
        if output:
            import json
            from pathlib import Path
            
            output_path = Path(output)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            
            with open(output_path, 'w') as f:
                json.dump(results, f, indent=2, default=str)
            
            click.echo(f"\n✅ Detailed results saved to: {output}")
        
    except Exception as e:
        click.echo(f"❌ Solver comparison error: {e}")


@cli.command()
@click.option('--logs-only', is_flag=True, help='Sync only log files, skip git operations')
@click.option('--verbose', '-v', is_flag=True, help='Show detailed sync output')
def sync(logs_only, verbose):
    """Synchronize repository and logs with remote server."""
    try:
        from ..core.simple_config import get_config
        
        config = get_config()
        if not config.get('remote.enabled'):
            click.echo("❌ Remote synchronization not configured. Please create a .env file with SSH credentials.")
            return
        
        runner = SimpleTestRunner()
        
        if logs_only:
            click.echo("🔄 Syncing logs from remote server...")
            success = runner.sync_remote_logs_only(verbose)
        else:
            click.echo("🔄 Running full synchronization (git + logs)...")
            success = runner.sync_remote_results(verbose)
        
        if success:
            click.echo("✅ Synchronization completed successfully")
        else:
            click.echo("⚠ Synchronization completed with warnings")
            
    except Exception as e:
        click.echo(f"❌ Sync error: {e}")


if __name__ == '__main__':
    cli()