# main.py
"""
PRISM Cascade Detection System - Command Line Interface

Complete unified entry point for all PRISM operations including
training, data preparation, evaluation, analysis, and deployment.
"""
import argparse
import logging
import sys

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='[%(asctime)s] %(levelname)s: %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)


def setup_stage1_parser(subparsers):
    """Setup Stage 1 training parser."""
    parser = subparsers.add_parser(
        'train-stage1',
        help='Train Stage 1 YOLO proposal network'
    )
    parser.add_argument('--epochs', type=int, help='Number of training epochs')
    parser.add_argument('--batch-size', type=int, help='Training batch size')
    parser.add_argument('--img-size', type=int, default=640, help='Image size')
    return parser


def setup_proposals_parser(subparsers):
    """Setup proposal generation parser."""
    parser = subparsers.add_parser(
        'gen-proposals',
        help='Generate region proposals for Stage 2 training'
    )
    parser.add_argument('--conf-thresh', type=float, default=0.05,
                        help='Confidence threshold for proposals')
    return parser


def setup_stage2_parser(subparsers):
    """Setup Stage 2 training parser."""
    parser = subparsers.add_parser(
        'train-stage2',
        help='Train Stage 2 ROI refinement network'
    )
    parser.add_argument('--basic', action='store_true',
                        help='Use basic training mode without advanced optimizations')
    parser.add_argument('--no-validation', action='store_true',
                        help='Disable validation during training')
    parser.add_argument('--epochs', type=int, help='Number of training epochs')
    parser.add_argument('--batch-size', type=int, help='Training batch size')
    parser.add_argument('--lr', type=float, help='Learning rate')
    return parser


def setup_stage2_ultimate_parser(subparsers):
    """Setup Stage 2 ultimate training parser (alias for advanced mode)."""
    parser = subparsers.add_parser(
        'train-stage2-ultimate',
        help='Train Stage 2 with all optimizations (same as train-stage2 without --basic)'
    )
    parser.add_argument('--epochs', type=int, help='Number of training epochs')
    parser.add_argument('--batch-size', type=int, help='Training batch size')
    parser.add_argument('--lr', type=float, help='Learning rate')
    parser.add_argument('--no-validation', action='store_true',
                        help='Disable validation during training')
    return parser


def setup_high_performance_parser(subparsers):
    """Setup high-performance training parser."""
    parser = subparsers.add_parser(
        'train-hp',
        help='High-performance training workflow (automated, optimized for maximum mAP)'
    )
    parser.add_argument('--mode', type=str, default='full',
                        choices=['full', 'stage1', 'stage2', 'optimize', 'evaluate'],
                        help='Training mode: full (all 8 stages), stage1 (Stage 1 only), '
                             'stage2 (Stage 2 only), optimize (hyperparameter search only), '
                             'evaluate (evaluation only)')
    parser.add_argument('--config', type=str, default='config.yaml',
                        help='Path to config file (default: config.yaml)')
    parser.add_argument('--n-trials', type=int,
                        help='Number of optimization trials (overrides config, default: 30)')
    parser.add_argument('--no-validation', action='store_true',
                        help='Disable validation during training (use when validation set quality is poor)')
    parser.add_argument('--use-train-for-threshold', action='store_true',
                        help='Use training set for Stage 1 threshold optimization instead of validation set')
    parser.add_argument('--fast-threshold-search', action='store_true',
                        help='Use fast threshold search (8 values instead of 18, faster but less accurate)')
    parser.add_argument('--use-cache', action='store_true',
                        help='Use cached results from previous stages if available (skip completed stages). '
                             'Without this flag, all stages will be retrained regardless of existing checkpoints.')
    parser.add_argument('--resume-from', type=str, choices=['stage1', 'stage2', 'stage3', 'stage4', 'stage5'],
                        help='Resume training from a specific stage (implies --use-cache)')
    parser.add_argument('--skip-stage1-hyperparam', action='store_true',
                        help='Skip Stage 1 threshold optimization (use default thresholds). '
                             'Saves 10-30 minutes but may result in suboptimal proposals.')
    parser.add_argument('--skip-stage2-hyperparam', action='store_true',
                        help='Skip Stage 2 hyperparameter optimization (use default hyperparameters). '
                             'Saves 2-6 hours but may result in suboptimal Stage 2 performance.')
    return parser


def setup_eval_parser(subparsers):
    """Setup evaluation parser."""
    parser = subparsers.add_parser(
        'evaluate',
        help='Evaluate trained model on test set'
    )
    parser.add_argument('--use-ema', action='store_true',
                        help='Use EMA model for evaluation')
    parser.add_argument('--output-dir', type=str, default='evaluation_results',
                        help='Directory to save evaluation results')
    parser.add_argument('--conf-thresh', type=float, default=0.25,
                        help='Confidence threshold for Stage 2 (default: 0.25)')
    return parser


def setup_serve_parser(subparsers):
    """Setup deployment server parser."""
    parser = subparsers.add_parser(
        'serve',
        help='Start API server for deployment'
    )
    parser.add_argument('--host', type=str, default='0.0.0.0',
                        help='Server host')
    parser.add_argument('--port', type=int, default=8000,
                        help='Server port')
    parser.add_argument('--use-ema', action='store_true',
                        help='Use EMA model for inference')
    return parser


def setup_inference_parser(subparsers):
    """Setup local inference parser."""
    parser = subparsers.add_parser(
        'infer',
        help='Run local inference on images'
    )
    parser.add_argument('--image', type=str, help='Path to single image')
    parser.add_argument('--dir', type=str, help='Path to directory containing images')
    parser.add_argument('--conf-thresh', type=float, default=0.25,
                        help='Confidence threshold (default: 0.25)')
    parser.add_argument('--use-ema', action='store_true',
                        help='Use EMA model for inference')
    parser.add_argument('--use-tta', action='store_true',
                        help='Use test-time augmentation')
    parser.add_argument('--output', type=str, default='results.json',
                        help='Output JSON file for batch inference')
    return parser


def setup_gradio_parser(subparsers):
    """Setup Gradio web UI parser."""
    parser = subparsers.add_parser(
        'gradio',
        help='Launch Gradio web interface'
    )
    parser.add_argument('--host', type=str, default='0.0.0.0',
                        help='Server host')
    parser.add_argument('--port', type=int, default=7860,
                        help='Server port')
    parser.add_argument('--share', action='store_true',
                        help='Create public link')
    return parser


def setup_ensemble_parser(subparsers):
    """Setup ensemble inference parser."""
    parser = subparsers.add_parser(
        'ensemble',
        help='Run ensemble inference with multiple models'
    )
    parser.add_argument('--image', type=str, help='Path to single image')
    parser.add_argument('--dir', type=str, help='Path to directory')
    parser.add_argument('--models', type=str, nargs='+',
                        help='List of model paths')
    parser.add_argument('--strategy', type=str, default='weighted_average',
                        choices=['weighted_average', 'voting', 'wbf'],
                        help='Ensemble strategy')
    parser.add_argument('--weights', type=float, nargs='+',
                        help='Model weights for ensemble')
    parser.add_argument('--output', type=str, default='ensemble_results.json',
                        help='Output file')
    return parser


def setup_data_check_parser(subparsers):
    """Setup data quality check parser."""
    parser = subparsers.add_parser(
        'data-check',
        help='Check dataset quality and statistics'
    )
    parser.add_argument('--data-yaml', type=str,
                        help='Path to data.yaml (default: from config)')
    parser.add_argument('--output-dir', type=str, default='data_quality_results',
                        help='Output directory')
    return parser


def setup_error_analysis_parser(subparsers):
    """Setup error analysis parser."""
    parser = subparsers.add_parser(
        'error-analysis',
        help='Analyze model errors and find hard cases'
    )
    parser.add_argument('--predictions', type=str, required=True,
                        help='Path to predictions JSON file')
    parser.add_argument('--output-dir', type=str, default='error_analysis',
                        help='Output directory')
    return parser


def setup_gradcam_parser(subparsers):
    """Setup Grad-CAM visualization parser."""
    parser = subparsers.add_parser(
        'gradcam',
        help='Generate Grad-CAM visualizations'
    )
    parser.add_argument('--image', type=str,
                        help='Path to input image (for single image)')
    parser.add_argument('--dir', type=str,
                        help='Path to directory (for batch processing)')
    parser.add_argument('--output', type=str, default='gradcam_output.png',
                        help='Output path for single image')
    parser.add_argument('--output-dir', type=str, default='gradcam_results',
                        help='Output directory for batch processing')
    parser.add_argument('--target-layer', type=str,
                        help='Target layer name (auto-detect if not specified)')
    return parser


def setup_hp_search_parser(subparsers):
    """Setup hyperparameter search parser."""
    parser = subparsers.add_parser(
        'hp-search',
        help='Run hyperparameter search using Optuna'
    )
    parser.add_argument('--n-trials', type=int, default=50,
                        help='Number of trials')
    parser.add_argument('--timeout', type=int, default=86400,
                        help='Timeout in seconds (default: 24h)')
    parser.add_argument('--study-name', type=str, default='prism-hp-search',
                        help='Study name')
    return parser


def setup_export_parser(subparsers):
    """Setup model export parser."""
    parser = subparsers.add_parser(
        'export',
        help='Export model to ONNX, TorchScript, or other formats'
    )
    parser.add_argument('--format', type=str, default='onnx',
                        choices=['onnx', 'torchscript', 'tensorrt'],
                        help='Export format')
    parser.add_argument('--model', type=str,
                        help='Model checkpoint path')
    parser.add_argument('--output', type=str, default='exported_model',
                        help='Output path')
    parser.add_argument('--simplify', action='store_true',
                        help='Simplify ONNX model')
    return parser


def setup_optimize_threshold_parser(subparsers):
    """Setup proposal threshold optimization parser."""
    parser = subparsers.add_parser(
        'optimize-threshold',
        help='Automatically optimize Stage 1 proposal threshold'
    )
    parser.add_argument('--data-yaml', type=str,
                        help='Path to data.yaml (default: from config)')
    parser.add_argument('--stage1-weights', type=str,
                        help='Path to Stage 1 weights (default: from config)')
    parser.add_argument('--threshold-min', type=float, default=0.001,
                        help='Minimum threshold to test (default: 0.001)')
    parser.add_argument('--threshold-max', type=float, default=0.1,
                        help='Maximum threshold to test (default: 0.1)')
    parser.add_argument('--num-points', type=int, default=15,
                        help='Number of test points (default: 15)')
    parser.add_argument('--sample-size', type=int,
                        help='Validation sample size (default: all)')
    parser.add_argument('--output-dir', type=str, default='proposal_threshold_optimization',
                        help='Output directory')
    parser.add_argument('--target-min', type=int, default=20,
                        help='Target minimum proposals per image (default: 20)')
    parser.add_argument('--target-max', type=int, default=100,
                        help='Target maximum proposals per image (default: 100)')
    return parser


# Diagnostic parsers

def setup_diag_stage1_parser(subparsers):
    """Setup Stage 1 diagnostics parser."""
    parser = subparsers.add_parser(
        'diag-stage1',
        help='Run Stage 1 comprehensive diagnostics'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--weights', type=str, default='weights/stage1_proposer.pt',
                        help='Path to Stage 1 weights')
    parser.add_argument('--output-dir', type=str, default='.',
                        help='Output directory for diagnostic results')
    return parser


def setup_diag_stage1_test_parser(subparsers):
    """Setup Stage 1 test set diagnostics parser."""
    parser = subparsers.add_parser(
        'diag-stage1-test',
        help='Diagnose Stage 1 performance on test set'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--weights', type=str, default='weights/stage1_proposer.pt',
                        help='Path to Stage 1 weights')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Device to use (cuda or cpu)')
    return parser


def setup_diag_stage2_parser(subparsers):
    """Setup Stage 2 diagnostics parser."""
    parser = subparsers.add_parser(
        'diag-stage2',
        help='Run Stage 2 comprehensive diagnostics'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--proposals', type=str,
                        help='Path to proposals JSON')
    parser.add_argument('--weights', type=str,
                        help='Path to Stage 2 weights')
    return parser


def setup_diag_stage2_check_parser(subparsers):
    """Setup comprehensive Stage 2 check parser."""
    parser = subparsers.add_parser(
        'diag-stage2-check',
        help='Comprehensive Stage 2 logic check'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--proposals', type=str,
                        help='Path to proposals JSON')
    return parser


def setup_diag_collapse_parser(subparsers):
    """Setup model collapse diagnostics parser."""
    parser = subparsers.add_parser(
        'diag-collapse',
        help='Diagnose model collapse (always predicting same class)'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--weights', type=str,
                        help='Path to Stage 2 weights')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Device to use')
    parser.add_argument('--num-images', type=int, default=100,
                        help='Number of test images to analyze')
    return parser


def setup_diag_bbox_parser(subparsers):
    """Setup bbox regression diagnostics parser."""
    parser = subparsers.add_parser(
        'diag-bbox',
        help='Diagnose bounding box regression issues'
    )
    parser.add_argument('--proposals', type=str, default='proposals/proposals.json',
                        help='Path to proposals JSON')
    parser.add_argument('--num-images', type=int, default=500,
                        help='Number of images to analyze')
    return parser


def setup_check_proposals_parser(subparsers):
    """Setup proposals quality check parser."""
    parser = subparsers.add_parser(
        'check-proposals',
        help='Check proposals quality and statistics'
    )
    parser.add_argument('--proposals', type=str, default='proposals/proposals.json',
                        help='Path to proposals JSON')
    parser.add_argument('--sample-size', type=int, default=100,
                        help='Number of images to sample')
    return parser


def setup_analyze_iou_parser(subparsers):
    """Setup IoU distribution analysis parser."""
    parser = subparsers.add_parser(
        'analyze-iou',
        help='Analyze IoU distribution across classes and images'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--proposals', type=str, default='proposals/proposals.json',
                        help='Path to proposals JSON')
    parser.add_argument('--output', type=str, default='iou_analysis.png',
                        help='Output visualization file')
    return parser


def setup_vis_stage1_parser(subparsers):
    """Setup Stage 1 visualization parser."""
    parser = subparsers.add_parser(
        'vis-stage1',
        help='Visualize Stage 1 prediction quality'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--weights', type=str, default='weights/stage1_proposer.pt',
                        help='Path to Stage 1 weights')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Device to use')
    parser.add_argument('--num-images', type=int, default=5,
                        help='Number of images to visualize')
    parser.add_argument('--output-prefix', type=str, default='stage1_quality',
                        help='Output file prefix')
    parser.add_argument('--conf-thresh', type=float, default=0.01,
                        help='Confidence threshold')
    return parser


def setup_vis_predictions_parser(subparsers):
    """Setup predictions visualization parser."""
    parser = subparsers.add_parser(
        'vis-predictions',
        help='Visualize end-to-end predictions'
    )
    parser.add_argument('--data-yaml', type=str, default='dataset/data.yaml',
                        help='Path to data.yaml')
    parser.add_argument('--stage1-weights', type=str, default='weights/stage1_proposer.pt',
                        help='Path to Stage 1 weights')
    parser.add_argument('--stage2-weights', type=str,
                        help='Path to Stage 2 weights')
    parser.add_argument('--device', type=str, default='cuda',
                        help='Device to use')
    parser.add_argument('--num-images', type=int, default=5,
                        help='Number of images to visualize')
    parser.add_argument('--output-prefix', type=str, default='vis_pred',
                        help='Output file prefix')
    return parser


# Handler functions

def handle_train_stage1(args):
    """Handle Stage 1 training."""
    from src.training.train_stage1 import run_training_stage1
    from src.config import STAGE1_CONFIG

    logger.info("=" * 80)
    logger.info("Stage 1: Training YOLO Proposal Network")
    logger.info("=" * 80)

    if args.epochs:
        STAGE1_CONFIG['epochs'] = args.epochs
    if args.batch_size:
        STAGE1_CONFIG['batch_size'] = args.batch_size
    if hasattr(args, 'img_size') and args.img_size:
        STAGE1_CONFIG['img_size'] = args.img_size

    run_training_stage1()


def handle_gen_proposals(args):
    """Handle proposal generation."""
    from src.utils.generate_proposals import run_proposal_generation
    from src.config import STAGE1_CONFIG

    logger.info("=" * 80)
    logger.info("Stage 2 Preparation: Generating Region Proposals")
    logger.info("=" * 80)

    original_thresh = STAGE1_CONFIG.get('confidence_threshold', 0.1)
    STAGE1_CONFIG['confidence_threshold'] = args.conf_thresh

    try:
        run_proposal_generation()
    finally:
        STAGE1_CONFIG['confidence_threshold'] = original_thresh


def handle_train_stage2(args):
    """Handle Stage 2 training."""
    from src.training.train_stage2 import run_training_stage2_unified
    from src.config import STAGE2_CONFIG

    logger.info("=" * 80)
    logger.info("Stage 2: Training ROI Refinement Network")
    logger.info("=" * 80)

    if args.epochs:
        STAGE2_CONFIG['epochs'] = args.epochs
    if args.batch_size:
        STAGE2_CONFIG['batch_size'] = args.batch_size
    if hasattr(args, 'lr') and args.lr:
        STAGE2_CONFIG['learning_rate'] = args.lr

    mode = "Basic" if getattr(args, 'basic', False) else "Advanced"
    val_status = "Disabled" if args.no_validation else "Enabled"
    logger.info(f"Training mode: {mode}")
    logger.info(f"Validation: {val_status}")

    run_training_stage2_unified(
        use_advanced=not getattr(args, 'basic', False),
        use_validation=not args.no_validation
    )


def handle_train_stage2_ultimate(args):
    """Handle Stage 2 ultimate training (alias for advanced mode)."""
    # Convert to regular stage2 args with advanced mode
    args.basic = False
    handle_train_stage2(args)


def handle_high_performance_training(args):
    """Handle high-performance training workflow."""
    from train_high_performance import HighPerformanceTrainer

    logger.info("=" * 80)
    logger.info("PRISM High-Performance Training System")
    logger.info("=" * 80)
    logger.info(f"Mode: {args.mode}")
    logger.info(f"Configuration file: {args.config}")
    if args.n_trials:
        logger.info(f"Optimization trials: {args.n_trials}")
    logger.info("")

    # Initialize trainer
    trainer = HighPerformanceTrainer(config_path=args.config)

    # Override n_trials if specified
    if args.n_trials:
        trainer.config['optimization']['n_trials'] = args.n_trials

    # Override validation settings
    use_validation = not args.no_validation
    use_train_for_threshold = args.use_train_for_threshold
    fast_threshold_search = args.fast_threshold_search

    # Override cache settings
    use_cache = args.use_cache or (args.resume_from is not None)
    resume_from = args.resume_from

    # Override hyperparameter optimization settings
    skip_stage1_hyperparam = args.skip_stage1_hyperparam
    skip_stage2_hyperparam = args.skip_stage2_hyperparam

    if args.no_validation:
        logger.info("⚠️  Validation disabled - all training will proceed without validation set")
    if args.use_train_for_threshold:
        logger.info("⚠️  Using training set for threshold optimization (instead of validation set)")
    if fast_threshold_search:
        logger.info("⚠️  Using fast threshold search (8 values instead of 18)")
    if skip_stage1_hyperparam:
        logger.info("⏭️  Skipping Stage 1 threshold optimization (saves 10-30 minutes)")
    if skip_stage2_hyperparam:
        logger.info("⏭️  Skipping Stage 2 hyperparameter optimization (saves 2-6 hours)")
    if use_cache:
        logger.info("✓ Cache enabled - will skip completed stages if checkpoints exist")
    if resume_from:
        logger.info(f"✓ Resuming from {resume_from} (skipping all previous stages)")

    # Store in trainer config
    trainer.use_validation = use_validation
    trainer.use_train_for_threshold = use_train_for_threshold
    trainer.fast_threshold_search = fast_threshold_search
    trainer.use_cache = use_cache
    trainer.resume_from = resume_from
    trainer.skip_stage1_hyperparam = skip_stage1_hyperparam
    trainer.skip_stage2_hyperparam = skip_stage2_hyperparam

    try:
        if args.mode == 'full':
            logger.info("Running complete 8-stage training pipeline...")
            logger.info("Estimated time: 12-24 hours")
            logger.info("")
            final_results = trainer.run_full_pipeline()

            logger.info("")
            logger.info("=" * 80)
            logger.info("Training Complete!")
            logger.info("=" * 80)
            logger.info(f"Final mAP: {final_results.get('map', 'N/A')}")
            logger.info(f"Results saved to: {trainer.results_dir}/")

        elif args.mode == 'stage1':
            logger.info("Running Stage 1 training...")
            logger.info("Estimated time: 2-4 hours")
            logger.info("")
            trainer.stage1_train_yolo()

        elif args.mode == 'stage2':
            logger.info("Running Stage 2 training...")
            logger.info("Estimated time: 3-6 hours")
            logger.info("")
            # Run necessary prerequisite stages
            logger.info("Checking prerequisites...")
            trainer.stage2_optimize_stage1_threshold()
            trainer.stage3_generate_proposals(threshold=None)  # Use optimized threshold
            trainer.stage4_optimize_stage2_hyperparams()
            trainer.stage5_train_stage2_full(best_hyperparams=None)  # Use optimized params

        elif args.mode == 'optimize':
            logger.info("Running hyperparameter optimization...")
            n_trials = args.n_trials or trainer.config['optimization'].get('n_trials', 30)
            logger.info(f"Optimization trials: {n_trials}")
            logger.info(f"Estimated time: {n_trials * 0.3:.1f}-{n_trials * 0.5:.1f} hours")
            logger.info("")
            best_params = trainer.stage4_optimize_stage2_hyperparams()

            logger.info("")
            logger.info("=" * 80)
            logger.info("Optimization Complete!")
            logger.info("=" * 80)
            logger.info("Best hyperparameters:")
            for key, value in best_params.items():
                logger.info(f"  {key}: {value}")
            logger.info("")
            logger.info("View visualization results:")
            logger.info("  optimization_results/*_history.html")
            logger.info("  optimization_results/*_importance.html")

        elif args.mode == 'evaluate':
            logger.info("Running final evaluation...")
            logger.info("")
            final_results = trainer.stage8_final_evaluation(deploy_thresholds=None)

            logger.info("")
            logger.info("=" * 80)
            logger.info("Evaluation Complete!")
            logger.info("=" * 80)
            logger.info(f"mAP: {final_results.get('map', 'N/A'):.4f}")
            logger.info(f"Precision: {final_results.get('precision', 'N/A'):.4f}")
            logger.info(f"Recall: {final_results.get('recall', 'N/A'):.4f}")
            logger.info(f"Detailed report: {trainer.results_dir}/final_evaluation/")

    except KeyboardInterrupt:
        logger.info("\nTraining interrupted by user")
        logger.info(f"Partial results may be saved at: {trainer.results_dir}/")
        sys.exit(0)
    except Exception as e:
        logger.error(f"Training failed: {e}", exc_info=True)
        sys.exit(1)


def handle_evaluate(args):
    """Handle model evaluation."""
    from src.evaluation.evaluate_model import run_evaluation

    logger.info("=" * 80)
    logger.info("Model Evaluation on Test Set")
    logger.info("=" * 80)

    run_evaluation(use_ema=args.use_ema, output_dir=args.output_dir,
                   conf_thresh=args.conf_thresh)


def handle_serve(args):
    """Handle API server deployment."""
    from src.config import SERVER_CONFIG
    import uvicorn
    from src.server import app

    logger.info("=" * 80)
    logger.info("Starting PRISM API Server")
    logger.info("=" * 80)

    host = args.host
    port = args.port

    logger.info(f"Server address: http://{host}:{port}")
    logger.info(f"EMA model: {'Enabled' if args.use_ema else 'Disabled'}")
    logger.info("Press CTRL+C to stop the server")

    uvicorn.run(app, host=host, port=port)


def handle_inference(args):
    """Handle local inference."""
    from src.inference.local_inference import LocalInference

    logger.info("=" * 80)
    logger.info("Local Inference")
    logger.info("=" * 80)

    if not args.image and not args.dir:
        logger.error("Error: Please specify either --image or --dir")
        sys.exit(1)

    inferencer = LocalInference(use_ema=args.use_ema, use_tta=args.use_tta)

    if args.image:
        logger.info(f"Processing single image: {args.image}")
        detections = inferencer.predict_single(args.image, conf_thresh=args.conf_thresh)

        import json
        logger.info("\nDetection results:")
        logger.info(json.dumps(detections, indent=2, ensure_ascii=False))

    elif args.dir:
        logger.info(f"Processing directory: {args.dir}")
        inferencer.predict_batch(args.dir, output_json=args.output, conf_thresh=args.conf_thresh)


def handle_gradio(args):
    """Handle Gradio web UI."""
    logger.info("=" * 80)
    logger.info("Starting Gradio Web Interface")
    logger.info("=" * 80)

    from src.inference.gradio_app import initialize_system, create_ui

    # 初始化系统
    initialize_system()

    # 创建并启动UI
    demo = create_ui()
    demo.queue(max_size=20)
    demo.launch(
        server_name=args.host,
        server_port=args.port,
        share=args.share,
        show_error=True
    )


def handle_ensemble(args):
    """Handle ensemble inference."""
    from src.inference.ensemble import EnsembleInference
    from pathlib import Path

    logger.info("=" * 80)
    logger.info("Ensemble Inference")
    logger.info("=" * 80)

    if not args.image and not args.dir:
        logger.error("Error: Please specify either --image or --dir")
        sys.exit(1)

    # Use default models if not specified
    if not args.models:
        from src.config import STAGE2_CONFIG
        default_models = [
            STAGE2_CONFIG['weights_path'],
            STAGE2_CONFIG['weights_path'].replace('.pth', '_ema.pth'),
            STAGE2_CONFIG['weights_path'].replace('.pth', '_swa.pth')
        ]
        args.models = [m for m in default_models if Path(m).exists()]

    logger.info(f"Using {len(args.models)} models for ensemble")

    ensemble = EnsembleInference(
        model_paths=args.models,
        weights=args.weights,
        strategy=args.strategy
    )

    # Run inference
    if args.image:
        logger.info(f"Processing image: {args.image}")
        # Implementation depends on your ensemble module
    elif args.dir:
        logger.info(f"Processing directory: {args.dir}")
        # Implementation depends on your ensemble module


def handle_data_check(args):
    """Handle data quality check."""
    from src.utils.data_quality_check import DataQualityChecker
    from src.config import DATA_YAML

    logger.info("=" * 80)
    logger.info("Data Quality Check")
    logger.info("=" * 80)

    data_yaml = args.data_yaml or DATA_YAML
    logger.info(f"Checking dataset: {data_yaml}")

    checker = DataQualityChecker(data_yaml)
    checker.run_checks()


def handle_error_analysis(args):
    """Handle error analysis."""
    import json
    from analysis.error_analysis import ErrorAnalyzer
    from src.config import DATA_YAML
    import yaml

    logger.info("=" * 80)
    logger.info("Error Analysis")
    logger.info("=" * 80)

    # Load class names
    with open(DATA_YAML) as f:
        class_names = yaml.safe_load(f)['names']

    # Load predictions
    with open(args.predictions) as f:
        predictions = json.load(f)

    analyzer = ErrorAnalyzer(class_names)

    # Process predictions
    # (Implementation depends on your prediction format)

    # Generate report
    analyzer.generate_report(output_dir=args.output_dir)


def handle_gradcam(args):
    """Handle Grad-CAM visualization."""
    from src.analysis.gradcam import generate_gradcam_for_image, batch_generate_gradcam
    from src.models.refiner import ROIRefinerModel
    from src.config import DEVICE, STAGE2_CONFIG
    import torch
    from pathlib import Path

    logger.info("=" * 80)
    logger.info("Grad-CAM Visualization")
    logger.info("=" * 80)

    if not args.image and not args.dir:
        logger.error("Error: Please specify either --image or --dir")
        sys.exit(1)

    # Load model
    logger.info("Loading model")
    model = ROIRefinerModel(device=DEVICE)

    # Load weights (prefer EMA)
    model_path = STAGE2_CONFIG['weights_path']
    ema_path = STAGE2_CONFIG['weights_path'].replace('.pth', '_ema.pth')
    if Path(ema_path).exists():
        model_path = ema_path
        logger.info("Using EMA model")

    checkpoint = torch.load(model_path, map_location=DEVICE)
    if 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.load_state_dict(checkpoint)

    model.eval()
    logger.info(f"Model loaded from: {model_path}")

    # Find target layer if specified
    target_layer = None
    if args.target_layer:
        try:
            target_layer = dict(model.named_modules())[args.target_layer]
            logger.info(f"Using specified target layer: {args.target_layer}")
        except KeyError:
            logger.error(f"Layer '{args.target_layer}' not found in model")
            logger.info("Available convolutional layers:")
            for name, module in model.named_modules():
                if isinstance(module, torch.nn.Conv2d):
                    logger.info(f"  - {name}")
            sys.exit(1)

    # Generate Grad-CAM
    try:
        if args.image:
            # Single image mode
            logger.info(f"Processing single image: {args.image}")
            cam_image = generate_gradcam_for_image(
                model=model,
                image_path=args.image,
                output_path=args.output,
                target_layer=target_layer
            )
            logger.info(f"Output saved to: {args.output}")

        elif args.dir:
            # Batch mode
            logger.info(f"Processing directory: {args.dir}")
            batch_generate_gradcam(
                model=model,
                image_dir=args.dir,
                output_dir=args.output_dir,
                target_layer=target_layer
            )
            logger.info(f"Outputs saved to: {args.output_dir}")

        logger.info("=" * 80)
        logger.info("Grad-CAM Generation Complete")
        logger.info("=" * 80)

    except Exception as e:
        logger.error(f"Grad-CAM generation failed: {e}")
        raise


def handle_hp_search(args):
    """Handle hyperparameter search."""
    from src.training.hyperparameter_search import search_hyperparameters

    logger.info("=" * 80)
    logger.info("Hyperparameter Search")
    logger.info("=" * 80)
    logger.info(f"Number of trials: {args.n_trials}")
    if args.timeout:
        logger.info(f"Timeout: {args.timeout} seconds ({args.timeout / 3600:.1f} hours)")
    logger.info(f"Study name: {args.study_name}")
    logger.info("")
    logger.info("This will run multiple short training trials to find optimal hyperparameters.")
    logger.info("Results will be saved to: hyperparameter_search_results.json")
    logger.info("")

    try:
        best_params = search_hyperparameters(
            n_trials=args.n_trials,
            timeout=args.timeout
        )

        logger.info("")
        logger.info("=" * 80)
        logger.info("Search Complete - Next Steps")
        logger.info("=" * 80)
        logger.info("1. Review results in: hyperparameter_search_results.json")
        logger.info("2. Check visualizations: hp_search_history.png, hp_param_importance.png")
        logger.info("3. Update config with best parameters")
        logger.info("4. Retrain with full epochs:")
        logger.info(
            f"   python main.py train-stage2 --lr {best_params.get('learning_rate', 'N/A'):.2e} --batch-size {best_params.get('batch_size', 'N/A')}")

    except KeyboardInterrupt:
        logger.info("\nSearch interrupted by user")
        logger.info("Partial results may be available in hyperparameter_search_results.json")
    except Exception as e:
        logger.error(f"Search failed: {e}")
        raise


def handle_export(args):
    """Handle model export."""
    from src.models.refiner import ROIRefinerModel
    from src.config import DEVICE, STAGE2_CONFIG
    import torch
    from pathlib import Path

    logger.info("=" * 80)
    logger.info(f"Exporting Model to {args.format.upper()}")
    logger.info("=" * 80)

    # Load model
    model = ROIRefinerModel(device=DEVICE)
    model_path = args.model or STAGE2_CONFIG['weights_path']

    checkpoint = torch.load(model_path, map_location=DEVICE)
    if 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.load_state_dict(checkpoint)

    model.eval()

    # Create dummy input
    dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)

    output_path = Path(args.output)

    if args.format == 'onnx':
        output_file = output_path.with_suffix('.onnx')
        torch.onnx.export(
            model,
            dummy_input,
            output_file,
            export_params=True,
            opset_version=11,
            do_constant_folding=True,
            input_names=['input'],
            output_names=['class_logits', 'bbox_deltas'],
            dynamic_axes={'input': {0: 'batch_size'}}
        )

        if args.simplify:
            try:
                import onnx
                from onnxsim import simplify
                model_onnx = onnx.load(output_file)
                model_simp, check = simplify(model_onnx)
                onnx.save(model_simp, output_file)
                logger.info("ONNX model simplified")
            except ImportError:
                logger.warning("onnxsim not installed, skipping simplification")

        logger.info(f"Model exported to: {output_file}")

    elif args.format == 'torchscript':
        output_file = output_path.with_suffix('.pt')
        traced_model = torch.jit.trace(model, dummy_input)
        traced_model.save(output_file)
        logger.info(f"Model exported to: {output_file}")

    elif args.format == 'tensorrt':
        logger.error("TensorRT export not implemented yet")
        sys.exit(1)


def handle_optimize_threshold(args):
    """Handle proposal threshold optimization."""
    from src.optimization.optimize_proposal_threshold import run_optimization

    logger.info("=" * 80)
    logger.info("Automatic Proposal Threshold Optimization")
    logger.info("=" * 80)
    logger.info(f"Threshold range: {args.threshold_min} - {args.threshold_max}")
    logger.info(f"Number of test points: {args.num_points}")
    logger.info(f"Target proposals/image: {args.target_min} - {args.target_max}")
    if args.sample_size:
        logger.info(f"Validation sample size: {args.sample_size}")
    else:
        logger.info("Using full validation set")
    logger.info("")
    logger.info("This will test multiple thresholds to find the optimal one.")
    logger.info("Estimated time: 20-60 minutes (depends on dataset size)")
    logger.info("")

    try:
        results = run_optimization(
            data_yaml=args.data_yaml,
            stage1_weights=args.stage1_weights,
            threshold_range=(args.threshold_min, args.threshold_max),
            num_points=args.num_points,
            sample_size=args.sample_size,
            output_dir=args.output_dir,
            target_min=args.target_min,
            target_max=args.target_max
        )

        best_threshold = results['best_threshold']

        logger.info("")
        logger.info("=" * 80)
        logger.info("Optimization Complete - Next Steps")
        logger.info("=" * 80)
        logger.info(f"1. Best threshold found: {best_threshold:.4f}")
        logger.info(f"2. Review results in: {args.output_dir}/")
        logger.info(f"3. Check visualization: {args.output_dir}/optimization_curves.png")
        logger.info(f"4. Read recommendations: {args.output_dir}/RECOMMENDATIONS.md")
        logger.info("5. Apply the best threshold:")
        logger.info(f"   python main.py gen-proposals --conf-thresh {best_threshold:.4f}")
        logger.info("6. Retrain Stage 2 with new proposals:")
        logger.info("   python main.py train-stage2 --epochs 80")

    except KeyboardInterrupt:
        logger.info("\nOptimization interrupted by user")
        logger.info(f"Partial results may be available in {args.output_dir}/")
    except Exception as e:
        logger.error(f"Optimization failed: {e}")
        raise


# Diagnostic handlers

def handle_diag_stage1(args):
    """Handle Stage 1 diagnostics."""
    from src.diagnostics.stage1_diag import diagnose_stage1

    logger.info("=" * 80)
    logger.info("Stage 1 Diagnostics")
    logger.info("=" * 80)

    diagnose_stage1(
        data_yaml=args.data_yaml,
        weights_path=args.weights,
        output_dir=args.output_dir
    )


def handle_diag_stage1_test(args):
    """Handle Stage 1 test set diagnostics."""
    from src.diagnostics.stage1_diag import diagnose_stage1_on_test

    logger.info("=" * 80)
    logger.info("Stage 1 Test Set Diagnostics")
    logger.info("=" * 80)

    diagnose_stage1_on_test(
        data_yaml=args.data_yaml,
        weights_path=args.weights,
        device=args.device
    )


def handle_diag_stage2(args):
    """Handle Stage 2 diagnostics."""
    from src.diagnostics.stage2_diag import diagnose_stage2

    logger.info("=" * 80)
    logger.info("Stage 2 Diagnostics")
    logger.info("=" * 80)

    diagnose_stage2(
        proposals_json=args.proposals,
        weights_path=args.weights,
        data_yaml=args.data_yaml
    )


def handle_diag_stage2_check(args):
    """Handle comprehensive Stage 2 check."""
    from src.diagnostics.stage2_diag import comprehensive_stage2_check

    logger.info("=" * 80)
    logger.info("Comprehensive Stage 2 Check")
    logger.info("=" * 80)

    exit_code = comprehensive_stage2_check(
        data_yaml=args.data_yaml,
        proposals_json=args.proposals
    )
    sys.exit(exit_code)


def handle_diag_collapse(args):
    """Handle model collapse diagnostics."""
    from src.diagnostics.stage2_diag import diagnose_model_collapse

    logger.info("=" * 80)
    logger.info("Model Collapse Diagnostics")
    logger.info("=" * 80)

    diagnose_model_collapse(
        data_yaml=args.data_yaml,
        weights_path=args.weights,
        device=args.device,
        num_test_images=args.num_images
    )


def handle_diag_bbox(args):
    """Handle bbox regression diagnostics."""
    from src.diagnostics.stage2_diag import diagnose_bbox_regression

    logger.info("=" * 80)
    logger.info("Bbox Regression Diagnostics")
    logger.info("=" * 80)

    diagnose_bbox_regression(
        proposals_json=args.proposals,
        num_images=args.num_images
    )


def handle_check_proposals(args):
    """Handle proposals quality check."""
    from src.diagnostics.quality_checks import check_proposals

    logger.info("=" * 80)
    logger.info("Proposals Quality Check")
    logger.info("=" * 80)

    check_proposals(
        proposals_json=args.proposals,
        sample_size=args.sample_size
    )


def handle_analyze_iou(args):
    """Handle IoU distribution analysis."""
    from src.diagnostics.quality_checks import analyze_iou_distribution

    logger.info("=" * 80)
    logger.info("IoU Distribution Analysis")
    logger.info("=" * 80)

    analyze_iou_distribution(
        data_yaml=args.data_yaml,
        proposals_json=args.proposals,
        output_file=args.output
    )


def handle_vis_stage1(args):
    """Handle Stage 1 visualization."""
    from src.diagnostics.visualization import visualize_stage1_quality

    logger.info("=" * 80)
    logger.info("Stage 1 Quality Visualization")
    logger.info("=" * 80)

    visualize_stage1_quality(
        data_yaml=args.data_yaml,
        weights_path=args.weights,
        device=args.device,
        num_images=args.num_images,
        output_prefix=args.output_prefix,
        conf_thresh=args.conf_thresh
    )


def handle_vis_predictions(args):
    """Handle predictions visualization."""
    from src.diagnostics.visualization import visualize_predictions

    logger.info("=" * 80)
    logger.info("Predictions Visualization")
    logger.info("=" * 80)

    visualize_predictions(
        data_yaml=args.data_yaml,
        stage1_weights=args.stage1_weights,
        stage2_weights=args.stage2_weights,
        device=args.device,
        num_images=args.num_images,
        output_prefix=args.output_prefix
    )


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(
        description='PRISM Cascade Detection System',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  High-Performance Training (Recommended):
    python main.py train-hp --mode full         # Complete 8-stage workflow (12-24h)
    python main.py train-hp --mode stage1       # Stage 1 only (2-4h)
    python main.py train-hp --mode stage2       # Stage 2 only (3-6h)
    python main.py train-hp --mode optimize     # Hyperparameter search only (6-12h)
    python main.py train-hp --mode evaluate     # Evaluation only (10min)
    python main.py train-hp --mode optimize --n-trials 50  # Custom trials
    python main.py train-hp --mode full --skip-stage1-hyperparam  # Skip Stage 1 threshold optimization
    python main.py train-hp --mode full --skip-stage2-hyperparam  # Skip Stage 2 hyperparameter search (saves 2-6h)
    python main.py train-hp --use-cache --skip-stage2-hyperparam  # Fast testing mode

  Basic Training:
    python main.py train-stage1
    python main.py gen-proposals
    python main.py train-stage2
    python main.py train-stage2-ultimate  # Same as train-stage2 (advanced mode)
    python main.py train-stage2 --basic   # Basic mode

  Evaluation:
    python main.py evaluate --use-ema
    python main.py error-analysis --predictions results.json

  Inference:
    python main.py infer --image test.jpg
    python main.py infer --dir images/ --use-tta
    python main.py ensemble --dir images/ --strategy voting

  Deployment:
    python main.py serve --port 8000
    python main.py gradio --share

  Analysis & Tools:
    python main.py data-check
    python main.py gradcam --image test.jpg
    python main.py hp-search --n-trials 100
    python main.py export --format onnx
    python main.py optimize-threshold  # Auto-optimize proposal threshold

  Diagnostics:
    python main.py diag-stage1                # Stage 1 comprehensive diagnostics
    python main.py diag-stage1-test           # Stage 1 test set performance
    python main.py diag-stage2                # Stage 2 comprehensive diagnostics
    python main.py diag-stage2-check          # Stage 2 logic check
    python main.py diag-collapse              # Model collapse diagnostics
    python main.py diag-bbox                  # Bbox regression diagnostics
    python main.py check-proposals            # Proposals quality check
    python main.py analyze-iou                # IoU distribution analysis
    python main.py vis-stage1                 # Visualize Stage 1 quality
    python main.py vis-predictions            # Visualize predictions

For more information, visit: https://github.com/yourrepo/prism
        """
    )

    subparsers = parser.add_subparsers(dest='command', help='Available commands')

    # Training commands
    setup_stage1_parser(subparsers)
    setup_proposals_parser(subparsers)
    setup_stage2_parser(subparsers)
    setup_stage2_ultimate_parser(subparsers)
    setup_high_performance_parser(subparsers)

    # Evaluation commands
    setup_eval_parser(subparsers)
    setup_error_analysis_parser(subparsers)

    # Inference commands
    setup_inference_parser(subparsers)
    setup_ensemble_parser(subparsers)

    # Deployment commands
    setup_serve_parser(subparsers)
    setup_gradio_parser(subparsers)

    # Analysis & visualization
    setup_gradcam_parser(subparsers)
    setup_data_check_parser(subparsers)

    # Advanced tools
    setup_hp_search_parser(subparsers)
    setup_export_parser(subparsers)
    setup_optimize_threshold_parser(subparsers)

    # Diagnostic tools
    setup_diag_stage1_parser(subparsers)
    setup_diag_stage1_test_parser(subparsers)
    setup_diag_stage2_parser(subparsers)
    setup_diag_stage2_check_parser(subparsers)
    setup_diag_collapse_parser(subparsers)
    setup_diag_bbox_parser(subparsers)
    setup_check_proposals_parser(subparsers)
    setup_analyze_iou_parser(subparsers)
    setup_vis_stage1_parser(subparsers)
    setup_vis_predictions_parser(subparsers)

    args = parser.parse_args()

    if not args.command:
        parser.print_help()
        sys.exit(1)

    # Route to appropriate handler
    handlers = {
        'train-stage1': handle_train_stage1,
        'gen-proposals': handle_gen_proposals,
        'train-stage2': handle_train_stage2,
        'train-stage2-ultimate': handle_train_stage2_ultimate,
        'train-hp': handle_high_performance_training,
        'evaluate': handle_evaluate,
        'serve': handle_serve,
        'infer': handle_inference,
        'gradio': handle_gradio,
        'ensemble': handle_ensemble,
        'data-check': handle_data_check,
        'error-analysis': handle_error_analysis,
        'gradcam': handle_gradcam,
        'hp-search': handle_hp_search,
        'export': handle_export,
        'optimize-threshold': handle_optimize_threshold,
        # Diagnostic commands
        'diag-stage1': handle_diag_stage1,
        'diag-stage1-test': handle_diag_stage1_test,
        'diag-stage2': handle_diag_stage2,
        'diag-stage2-check': handle_diag_stage2_check,
        'diag-collapse': handle_diag_collapse,
        'diag-bbox': handle_diag_bbox,
        'check-proposals': handle_check_proposals,
        'analyze-iou': handle_analyze_iou,
        'vis-stage1': handle_vis_stage1,
        'vis-predictions': handle_vis_predictions,
    }

    try:
        handler = handlers[args.command]
        handler(args)
    except KeyboardInterrupt:
        logger.info("\nOperation cancelled by user")
        sys.exit(0)
    except Exception as e:
        logger.error(f"Error: {e}", exc_info=True)
        sys.exit(1)


if __name__ == "__main__":
    main()