#!/usr/bin/env python
"""
PRISM High-Performance Training Pipeline
Objective: Maximize mAP performance

Complete workflow:
1. Data quality check and preprocessing
2. Stage 1 YOLOv10 training and threshold optimization
3. Proposal generation and quality analysis
4. Stage 2 hyperparameter optimization (Optuna)
5. Stage 2 full training (with best hyperparameters)
6. Model ensembling and test-time augmentation
7. Final evaluation

Usage:
    python train_high_performance.py --mode full          # Complete pipeline
    python train_high_performance.py --mode stage1        # Stage 1 only
    python train_high_performance.py --mode stage2        # Stage 2 only
    python train_high_performance.py --mode optimize      # Hyperparameter optimization only
    python train_high_performance.py --mode evaluate      # Evaluation only
"""

import argparse
import logging
import sys
from pathlib import Path
from datetime import datetime
import json
import yaml
import torch
import numpy as np

# Setup logging
logging.basicConfig(
    level=logging.INFO,
    format='[%(asctime)s] %(levelname)s: %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
    handlers=[
        logging.FileHandler(f'training_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class HighPerformanceTrainer:
    """High-performance training pipeline manager."""

    def __init__(self, config_path='config.yaml'):
        """
        Args:
            config_path: 
        """
        self.config = self._load_config(config_path)
        self.results_dir = Path('high_performance_results')
        self.results_dir.mkdir(exist_ok=True)

        # Validation set control (can be set externally)
        self.use_validation = True  # Use validation set by default
        self.use_train_for_threshold = False  # Use validation set for threshold optimization by default
        self.fast_threshold_search = False  # Use detailed search by default

        # Cache control (can be set externally)
        self.use_cache = False  # Retrain all stages by default
        self.resume_from = None  # Resume from specific stage (None = start from beginning)

        # Hyperparameter optimization control (can be set externally)
        self.skip_stage1_hyperparam = False  # Skip Stage 1 threshold optimization (use defaults)
        self.skip_stage2_hyperparam = False  # Skip Stage 2 hyperparameter optimization (use defaults)

        # Training history tracking
        self.training_history = {
            'start_time': datetime.now().isoformat(),
            'stages': {}
        }

    def _load_config(self, config_path):
        """Load configuration file."""
        if Path(config_path).exists():
            with open(config_path) as f:
                return yaml.safe_load(f)
        else:
            # Default configuration
            return {
                'stage1': {
                    'epochs': 100,
                    'batch_size': 16,
                    'img_size': 640,
                    'patience': 15,
                },
                'stage2': {
                    'epochs': 100,
                    'batch_size': 32,
                    'learning_rate': 1e-4,
                    'patience': 20,
                },
                'optimization': {
                    'n_trials': 30,
                    'timeout': None,
                }
            }

    def _check_stage_cache(self, stage_name):
        """
        Check if cache exists for a specific stage.

        Args:
            stage_name: Name of the stage ('stage1', 'stage2', 'proposals', etc.)

        Returns:
            bool: True if cache exists and is valid
        """
        from src.config import STAGE1_CONFIG, STAGE2_CONFIG
        import os

        if stage_name == 'stage1':
            # Check if Stage 1 weights exist
            weights_path = STAGE1_CONFIG.get('weights_path')
            if weights_path and os.path.exists(weights_path):
                logger.info(f" Found cached Stage 1 weights: {weights_path}")
                return True

        elif stage_name == 'proposals':
            # Check if proposals JSON exists
            proposals_path = STAGE2_CONFIG.get('proposals_json')
            if proposals_path and os.path.exists(proposals_path):
                logger.info(f" Found cached proposals: {proposals_path}")
                return True

        elif stage_name == 'stage2':
            # Check if Stage 2 weights exist
            weights_path = STAGE2_CONFIG.get('weights_path')
            if weights_path and os.path.exists(weights_path):
                logger.info(f" Found cached Stage 2 weights: {weights_path}")
                return True

        return False

    def _should_skip_stage(self, stage_name):
        """
        Determine if a stage should be skipped based on cache settings.

        Args:
            stage_name: Name of the stage

        Returns:
            bool: True if stage should be skipped
        """
        # If cache is disabled, never skip
        if not self.use_cache:
            return False

        # If resuming from a specific stage, skip earlier stages
        if self.resume_from:
            stage_order = ['stage1', 'stage2', 'proposals', 'stage3', 'stage4', 'stage5']
            if stage_name in stage_order and self.resume_from in stage_order:
                resume_idx = stage_order.index(self.resume_from)
                current_idx = stage_order.index(stage_name)
                if current_idx < resume_idx:
                    logger.info(f"⏭  Skipping {stage_name} (resuming from {self.resume_from})")
                    return True

        # Check if cache exists for this stage
        if self._check_stage_cache(stage_name):
            return True

        return False

    def run_full_pipeline(self):
        """"""
        logger.info("=" * 80)
        logger.info("PRISM ")
        logger.info("=" * 80)

        # 0: 
        logger.info("\n" + "=" * 80)
        logger.info("0: ")
        logger.info("=" * 80)
        self.stage0_data_quality_check()

        # 1: Stage 1
        logger.info("\n" + "=" * 80)
        logger.info("1: Stage 1 YOLO")
        logger.info("=" * 80)
        stage1_results = self.stage1_train_yolo()

        # 2: Stage 1
        logger.info("\n" + "=" * 80)
        logger.info("2: Stage 1")
        logger.info("=" * 80)
        stage1_thresh = self.stage2_optimize_stage1_threshold()

        # 3: Proposals
        logger.info("\n" + "=" * 80)
        logger.info("3: Proposals")
        logger.info("=" * 80)
        self.stage3_generate_proposals(stage1_thresh)

        # 4: Stage 2
        logger.info("\n" + "=" * 80)
        logger.info("4: Stage 2")
        logger.info("=" * 80)
        best_hyperparams = self.stage4_optimize_stage2_hyperparams()

        # 5: Stage 2
        logger.info("\n" + "=" * 80)
        logger.info("5: Stage 2")
        logger.info("=" * 80)
        stage2_results = self.stage5_train_stage2_full(best_hyperparams)

        # 6: 
        logger.info("\n" + "=" * 80)
        logger.info("6: ")
        logger.info("=" * 80)
        deploy_thresholds = self.stage6_optimize_deployment_thresholds()

        # 7: TTA
        logger.info("\n" + "=" * 80)
        logger.info("7: ")
        logger.info("=" * 80)
        ensemble_results = self.stage7_model_ensemble_tta()

        # 8: 
        logger.info("\n" + "=" * 80)
        logger.info("8: ")
        logger.info("=" * 80)
        final_results = self.stage8_final_evaluation(deploy_thresholds)

        # 
        self._save_training_history(final_results)

        logger.info("\n" + "=" * 80)
        logger.info(" !")
        logger.info("=" * 80)
        logger.info(f" mAP: {final_results['map']:.4f}")
        logger.info(f" Precision: {final_results['precision']:.4f}")
        logger.info(f" Recall: {final_results['recall']:.4f}")
        logger.info(f": {self.results_dir}/")

        return final_results

    def stage0_data_quality_check(self):
        """0: """
        from src.utils.data_quality_check import check_dataset_quality
        from src.config import DATA_YAML

        logger.info("...")

        try:
            #  data_yaml  config 
            data_yaml = self.config.get('data', {}).get('yaml', DATA_YAML)

            quality_report = check_dataset_quality(data_yaml, output_dir='.')

            # 
            stats = quality_report.get('stats', {})
            issues_dict = quality_report.get('issues', {})

            # 
            issues = []

            total_images = stats.get('total_images', 0)
            if total_images < 1000:
                issues.append(f"   ({total_images} < 1000)")

            # 
            if issues_dict.get('class_imbalance'):
                issues.append("  ")

            # 
            corrupted_count = len(issues_dict.get('corrupted_images', []))
            if corrupted_count > 0:
                issues.append(f"   {corrupted_count} ")

            # 
            missing_labels_count = len(issues_dict.get('missing_labels', []))
            if missing_labels_count > 0:
                issues.append(f"   {missing_labels_count} ")

            if issues:
                logger.warning(":")
                for issue in issues:
                    logger.warning(f"  {issue}")
                logger.warning(": ")
            else:
                logger.info(" ")

            self.training_history['stages']['data_quality'] = {
                'status': 'completed',
                'report': quality_report,
                'issues': issues
            }

        except Exception as e:
            logger.error(f": {e}")
            logger.warning("...")

    def stage1_train_yolo(self):
        """Stage 1: YOLO training"""
        from ultralytics import YOLO
        from src.config import STAGE1_CONFIG, DATASET_DIR

        # Check cache
        if self._should_skip_stage('stage1'):
            logger.info("⏭  Skipping Stage 1 training (using cached weights)")
            # Update config paths from existing weights
            from src.config import SERVER_CONFIG
            SERVER_CONFIG['stage1_weights'] = STAGE1_CONFIG['weights_path']
            return

        logger.info("Starting YOLOv10 training (Stage 1)...")

        # 
        model = YOLO('yolo11n.pt')

        # 
        train_config = {
            'data': f'{DATASET_DIR}/data.yaml',
            'epochs': self.config['stage1']['epochs'],
            'batch': self.config['stage1']['batch_size'],
            'imgsz': self.config['stage1']['img_size'],
            'patience': self.config['stage1']['patience'],
            'device': 'cuda' if torch.cuda.is_available() else 'cpu',

            # 
            'optimizer': 'AdamW',
            'lr0': 0.001,
            'lrf': 0.01,
            'momentum': 0.9,
            'weight_decay': 0.0005,

            # 
            'hsv_h': 0.015,
            'hsv_s': 0.7,
            'hsv_v': 0.4,
            'degrees': 10.0,
            'translate': 0.1,
            'scale': 0.5,
            'shear': 0.0,
            'perspective': 0.0,
            'flipud': 0.0,
            'fliplr': 0.5,
            'mosaic': 1.0,
            'mixup': 0.1,
            'copy_paste': 0.1,

            # 
            'warmup_epochs': 3.0,
            'warmup_momentum': 0.8,
            'warmup_bias_lr': 0.1,
            'close_mosaic': 10,

            # 
            'save': True,
            'save_period': 10,
            'project': str(self.results_dir / 'stage1'),
            'name': 'yolo_train',
            'exist_ok': True,
            'pretrained': True,
            'verbose': True,
        }

        # 
        results = model.train(**train_config)

        # 
        val_results = model.val()

        logger.info(f" Stage 1")
        logger.info(f"  mAP50: {val_results.box.map50:.4f}")
        logger.info(f"  mAP50-95: {val_results.box.map:.4f}")

        # 
        import shutil
        best_model_path = self.results_dir / 'stage1' / 'yolo_train' / 'weights' / 'best.pt'

        # Copy to unified weights directory for caching
        unified_weights_path = Path(STAGE1_CONFIG['weights_path'])
        unified_weights_path.parent.mkdir(parents=True, exist_ok=True)

        if best_model_path.exists():
            shutil.copy2(best_model_path, unified_weights_path)
            logger.info(f" Saved model to unified location: {unified_weights_path}")

        # Update SERVER_CONFIG for immediate use in current run
        from src.config import SERVER_CONFIG
        SERVER_CONFIG['stage1_weights'] = str(unified_weights_path)

        self.training_history['stages']['stage1_yolo'] = {
            'status': 'completed',
            'map50': float(val_results.box.map50),
            'map50_95': float(val_results.box.map),
            'best_model': str(unified_weights_path)  # Save unified path for reference
        }

        return {
            'map50': val_results.box.map50,
            'map50_95': val_results.box.map,
            'best_model': unified_weights_path  # Return unified path
        }

    def stage2_optimize_stage1_threshold(self):
        """Stage 2: Stage 1 threshold optimization (confidence + NMS IoU)"""
        from src.config import STAGE1_CONFIG

        # Check if we should skip optimization and use default thresholds
        if self.skip_stage1_hyperparam:
            logger.info("⏭  Skipping Stage 1 threshold optimization (using default thresholds)")

            # Use default thresholds from config
            default_conf_thresh = STAGE1_CONFIG.get('confidence_threshold', 0.01)
            default_iou_thresh = 0.7  # Standard NMS IoU threshold

            logger.info(f"  Using default thresholds:")
            logger.info(f"    Confidence threshold: {default_conf_thresh:.4f}")
            logger.info(f"    NMS IoU threshold: {default_iou_thresh:.2f}")

            self.training_history['stages']['stage1_threshold_opt'] = {
                'status': 'skipped',
                'best_conf_threshold': default_conf_thresh,
                'best_iou_threshold': default_iou_thresh,
                'note': 'Used default thresholds (optimization skipped)'
            }

            return {
                'conf_threshold': default_conf_thresh,
                'iou_threshold': default_iou_thresh
            }

        # Normal optimization path
        from src.optimization.hyperparameter_search import HyperparameterOptimizer

        logger.info("Optimizing Stage 1 proposal thresholds (confidence + NMS IoU)...")
        if self.use_train_for_threshold:
            logger.info("  Using training set for threshold optimization")

        optimizer = HyperparameterOptimizer(
            optimization_phase='stage1',
            use_train_set=self.use_train_for_threshold,
            fast_search=self.fast_threshold_search
        )
        results = optimizer.optimize()

        best_conf_thresh = results['best_config']['conf_threshold']
        best_iou_thresh = results['best_config']['iou_threshold']

        logger.info(f" Best configuration found:")
        logger.info(f"  Confidence threshold: {best_conf_thresh:.4f}")
        logger.info(f"  NMS IoU threshold: {best_iou_thresh:.2f}")
        logger.info(f"  Expected recall: {results['best_config']['recall']:.3f}")
        logger.info(f"  Avg proposals/image: {results['best_config']['avg_proposals']:.1f}")

        self.training_history['stages']['stage1_threshold_opt'] = {
            'status': 'completed',
            'best_conf_threshold': best_conf_thresh,
            'best_iou_threshold': best_iou_thresh,
            'recall': results['best_config']['recall'],
            'avg_proposals': results['best_config']['avg_proposals']
        }

        # Return both thresholds as a dict
        return {
            'conf_threshold': best_conf_thresh,
            'iou_threshold': best_iou_thresh
        }

    def stage3_generate_proposals(self, thresholds):
        """
        Stage 3: Generate proposals

        Args:
            thresholds: Dict with 'conf_threshold' and 'iou_threshold' keys
        """
        from src.utils.generate_proposals import run_proposal_generation
        from src.config import STAGE1_CONFIG, STAGE2_CONFIG

        # Check cache
        if self._should_skip_stage('proposals'):
            logger.info("⏭  Skipping proposal generation (using cached proposals)")
            return

        # Handle both dict and float for backward compatibility
        if isinstance(thresholds, dict):
            conf_thresh = thresholds['conf_threshold']
            iou_thresh = thresholds.get('iou_threshold', 0.7)  # Default to 0.7 if not specified
        else:
            # Backward compatibility: if just a float is passed
            conf_thresh = thresholds
            iou_thresh = 0.7

        logger.info(f"Generating proposals with:")
        logger.info(f"  Confidence threshold: {conf_thresh:.4f}")
        logger.info(f"  NMS IoU threshold: {iou_thresh:.2f}")

        # Generate proposals
        proposals_file = self.results_dir / 'proposals.json'

        # Update config to use custom output path and thresholds
        original_conf_threshold = STAGE1_CONFIG.get('confidence_threshold', 0.01)
        original_proposals_json = STAGE2_CONFIG['proposals_json']

        STAGE1_CONFIG['confidence_threshold'] = conf_thresh
        # Store iou_threshold for generate_proposals to use
        STAGE1_CONFIG['nms_iou_threshold'] = iou_thresh
        STAGE2_CONFIG['proposals_json'] = str(proposals_file)

        try:
            run_proposal_generation()
        finally:
            # Restore threshold configuration
            STAGE1_CONFIG['confidence_threshold'] = original_conf_threshold
            # Clean up temporary nms_iou_threshold
            if 'nms_iou_threshold' in STAGE1_CONFIG:
                del STAGE1_CONFIG['nms_iou_threshold']
            # Note: Don't restore proposals_json, as subsequent stages need the newly generated file

        # Statistics
        with open(proposals_file) as f:
            proposals_data = json.load(f)

        # proposals_data is a list of dicts: [{"img_path": ..., "rois": [...], "labels": [...]}, ...]
        total_proposals = sum(len(item['rois']) for item in proposals_data)
        avg_proposals = total_proposals / max(len(proposals_data), 1)

        logger.info(f" Proposal generation complete")
        logger.info(f"  Total: {total_proposals}")
        logger.info(f"  Average per image: {avg_proposals:.1f}")

        self.training_history['stages']['proposals_generation'] = {
            'status': 'completed',
            'total_proposals': total_proposals,
            'avg_proposals_per_image': avg_proposals,
            'proposals_file': str(proposals_file)
        }

    def stage4_optimize_stage2_hyperparams(self):
        """4: Stage 2"""
        from src.config import STAGE2_CONFIG, IMPROVED_MODEL_CONFIG

        # Check if we should skip optimization and use default hyperparameters
        if self.skip_stage2_hyperparam:
            logger.info("⏭  Skipping Stage 2 hyperparameter optimization (using default hyperparameters)")

            # Use default hyperparameters from config
            default_params = {
                'lr': STAGE2_CONFIG.get('learning_rate', 1e-4),
                'pos_iou': STAGE2_CONFIG.get('positive_iou_thresh', 0.5),
                'neg_iou': STAGE2_CONFIG.get('negative_iou_thresh', 0.3),
                'bbox_weight': STAGE2_CONFIG.get('bbox_loss_weight', 2.0),
                'num_heads': IMPROVED_MODEL_CONFIG.get('cross_roi_num_heads', 8),
                'pos_embed_dim': IMPROVED_MODEL_CONFIG.get('cross_roi_position_embed_dim', 128),
                'reduction_ratio': IMPROVED_MODEL_CONFIG.get('fusion_reduction_ratio', 16),
                'diversity_weight': IMPROVED_MODEL_CONFIG.get('diversity_loss_weight', 0.0)
            }

            logger.info("  Using default hyperparameters:")
            for key, value in default_params.items():
                logger.info(f"    {key}: {value}")

            self.training_history['stages']['stage2_hyperopt'] = {
                'status': 'skipped',
                'best_params': default_params,
                'note': 'Used default hyperparameters (optimization skipped)'
            }

            return default_params

        # Normal optimization path
        from src.optimization.hyperparameter_search import HyperparameterOptimizer

        logger.info(" Stage 2 ...")
        logger.info(f"   {self.config['optimization']['n_trials']} ")

        optimizer = HyperparameterOptimizer(optimization_phase='stage2')
        results = optimizer.optimize(
            n_trials=self.config['optimization']['n_trials'],
            timeout=self.config['optimization'].get('timeout')
        )

        best_params = results['best_params']
        best_value = results['best_value']

        if results.get('all_failed', False):
            logger.warning("  All hyperparameter optimization trials failed!")
            logger.warning("    Using default configuration from STAGE2_CONFIG")
            logger.info(f"  Default parameters:")
            for key, value in best_params.items():
                logger.info(f"    {key}: {value}")
        else:
            logger.info(f" ")
            logger.info(f"  mAP: {best_value:.4f}")
            logger.info(f"  :")
            for key, value in best_params.items():
                logger.info(f"    {key}: {value}")

        #  (only if trials succeeded)
        if not results.get('all_failed', False):
            try:
                optimizer.visualize_results()
                logger.info(f"  : optimization_results/")
            except Exception as e:
                logger.warning(f": {e}")

        self.training_history['stages']['stage2_hyperopt'] = {
            'status': 'completed',
            'best_value': best_value,
            'best_params': best_params,
            'n_trials': self.config['optimization']['n_trials']
        }

        return best_params

    def stage5_train_stage2_full(self, best_hyperparams):
        """5: Stage 2"""
        from src.training.train_stage2 import ImprovedStage2Trainer
        from src.config import STAGE2_CONFIG, IMPROVED_MODEL_CONFIG

        logger.info(" Stage 2...")

        # 
        train_config = {**STAGE2_CONFIG}
        train_config['epochs'] = self.config['stage2']['epochs']
        train_config['batch_size'] = self.config['stage2']['batch_size']
        train_config['patience'] = self.config['stage2']['patience']

        # 
        train_config['positive_iou_thresh'] = best_hyperparams.get('pos_iou', 0.5)
        train_config['negative_iou_thresh'] = best_hyperparams.get('neg_iou', 0.3)
        train_config['learning_rate'] = best_hyperparams.get('lr', 1e-4)
        train_config['bbox_loss_weight'] = best_hyperparams.get('bbox_weight', 2.0)

        model_config = {**IMPROVED_MODEL_CONFIG}
        model_config['cross_roi_num_heads'] = best_hyperparams.get('num_heads', 8)
        model_config['cross_roi_position_embed_dim'] = best_hyperparams.get('pos_embed_dim', 128)
        model_config['fusion_reduction_ratio'] = best_hyperparams.get('reduction_ratio', 16)
        model_config['diversity_loss_weight'] = best_hyperparams.get('diversity_weight', 0.0)

        # 
        model_config['use_dynamic_fusion'] = True
        model_config['use_cross_roi_attention'] = True
        model_config['fusion_use_channel_attention'] = True
        model_config['fusion_use_spatial_attention'] = True
        model_config['fusion_use_cross_attention'] = True

        # 
        logger.info(f": {'' if self.use_validation else ''}")
        trainer = ImprovedStage2Trainer(
            config=train_config,  # Fixed: parameter name is 'config', not 'train_config'
            model_config=model_config,
            use_validation=self.use_validation
        )

        # 
        trainer.train()

        # 
        best_epoch = trainer.best_epoch
        best_val_acc = trainer.best_val_acc if self.use_validation else 0.0

        logger.info(f" Stage 2")
        logger.info(f"  epoch: {best_epoch}")
        logger.info(f"  : {best_val_acc:.2f}%")

        # 
        from pathlib import Path
        best_model_path = Path(STAGE2_CONFIG['weights_path']).parent / 'stage2_best_val_acc.pth'
        if best_model_path.exists():
            STAGE2_CONFIG['weights_path'] = str(best_model_path)
            logger.info(f"  : {best_model_path}")

        self.training_history['stages']['stage2_full_training'] = {
            'status': 'completed',
            'best_epoch': best_epoch,
            'best_val_acc': best_val_acc,
            'hyperparams': best_hyperparams,
            'best_model': str(best_model_path) if best_model_path.exists() else None
        }

        return {
            'best_epoch': best_epoch,
            'best_val_acc': best_val_acc,
            'best_model': str(best_model_path) if best_model_path.exists() else None
        }

    def stage6_optimize_deployment_thresholds(self):
        """6: """
        from src.optimization.hyperparameter_search import HyperparameterOptimizer

        logger.info("...")

        optimizer = HyperparameterOptimizer(optimization_phase='deployment')
        results = optimizer.optimize()

        best_config = results['best_config']

        logger.info(f" ")
        logger.info(f"  Stage 1: {best_config['stage1_threshold']:.4f}")
        logger.info(f"  Stage 2: {best_config['stage2_threshold']:.4f}")
        logger.info(f"  mAP: {best_config['map']:.4f}")

        self.training_history['stages']['deployment_threshold_opt'] = {
            'status': 'completed',
            'stage1_threshold': best_config['stage1_threshold'],
            'stage2_threshold': best_config['stage2_threshold'],
            'map': best_config['map']
        }

        return best_config

    def stage7_model_ensemble_tta(self):
        """7: """
        logger.info("TTA...")

        # checkpoint ()
        checkpoints = [
            'stage2_best_val_acc.pth',
            'stage2_best_val_acc_ema.pth',
            'stage2_best_val_loss.pth',
            'stage2_best_val_loss_ema.pth',
            'stage2_final.pth',
            'stage2_final_ema.pth',
            'stage2_final_swa.pth',
        ]

        available_checkpoints = []
        for ckpt in checkpoints:
            ckpt_path = Path('weights') / ckpt
            if ckpt_path.exists():
                available_checkpoints.append(str(ckpt_path))

        logger.info(f"  {len(available_checkpoints)} checkpoint")
        for ckpt in available_checkpoints:
            logger.info(f"  - {ckpt}")

        # TTA
        tta_config = {
            'enabled': True,
            'flips': ['none', 'horizontal'],  # 
            'scales': [0.9, 1.0, 1.1],        # 
        }

        logger.info("TTA:")
        logger.info(f"  : {tta_config['flips']}")
        logger.info(f"  : {tta_config['scales']}")

        self.training_history['stages']['model_ensemble_tta'] = {
            'status': 'completed',
            'num_checkpoints': len(available_checkpoints),
            'checkpoints': available_checkpoints,
            'tta_config': tta_config
        }

        return {
            'checkpoints': available_checkpoints,
            'tta_config': tta_config
        }

    def stage8_final_evaluation(self, deploy_thresholds):
        """8: """
        from src.evaluation.evaluate_model import run_evaluation

        logger.info("...")

        # 
        metrics = run_evaluation(
            use_ema=True,
            output_dir=str(self.results_dir / 'final_evaluation'),
            conf_thresh=deploy_thresholds['stage2_threshold'],
            return_metrics=True
        )

        logger.info(f" ")
        logger.info(f"  mAP@0.5: {metrics['mAP']:.4f}")
        logger.info(f"  Precision: {metrics['overall']['precision']:.4f}")
        logger.info(f"  Recall: {metrics['overall']['recall']:.4f}")
        logger.info(f"  F1-Score: {metrics['overall']['f1']:.4f}")

        logger.info("\n:")
        for cls_name, stats in metrics['per_class'].items():
            logger.info(f"  {cls_name}:")
            logger.info(f"    AP: {stats.get('ap', 0.0):.4f}")
            logger.info(f"    Precision: {stats['precision']:.4f}")
            logger.info(f"    Recall: {stats['recall']:.4f}")

        self.training_history['stages']['final_evaluation'] = {
            'status': 'completed',
            'metrics': {
                'map': metrics['mAP'],
                'precision': metrics['overall']['precision'],
                'recall': metrics['overall']['recall'],
                'f1': metrics['overall']['f1']
            },
            'per_class': metrics['per_class']
        }

        return {
            'map': metrics['mAP'],
            'precision': metrics['overall']['precision'],
            'recall': metrics['overall']['recall'],
            'f1': metrics['overall']['f1']
        }

    def _save_training_history(self, final_results):
        """"""
        self.training_history['end_time'] = datetime.now().isoformat()
        self.training_history['final_results'] = final_results

        output_file = self.results_dir / 'training_history.json'
        with open(output_file, 'w') as f:
            json.dump(self.training_history, f, indent=2)

        logger.info(f"\n: {output_file}")

    # =====  =====

    def run_stage1_only(self):
        """Stage 1"""
        logger.info(" Stage 1 ...")
        self.stage0_data_quality_check()
        results = self.stage1_train_yolo()
        threshold = self.stage2_optimize_stage1_threshold()
        self.stage3_generate_proposals(threshold)
        return results

    def run_stage2_only(self):
        """Stage 2"""
        logger.info(" Stage 2 ...")
        # 
        best_hyperparams = self._load_best_hyperparams()
        results = self.stage5_train_stage2_full(best_hyperparams)
        return results

    def run_optimize_only(self):
        """"""
        logger.info("...")
        best_hyperparams = self.stage4_optimize_stage2_hyperparams()
        return best_hyperparams

    def run_evaluate_only(self):
        """"""
        logger.info("...")
        # 
        deploy_thresholds = self.stage6_optimize_deployment_thresholds()
        # 
        results = self.stage8_final_evaluation(deploy_thresholds)
        return results

    def _load_best_hyperparams(self):
        """"""
        # 
        history_file = self.results_dir / 'training_history.json'
        if history_file.exists():
            with open(history_file) as f:
                history = json.load(f)
                if 'stage2_hyperopt' in history.get('stages', {}):
                    return history['stages']['stage2_hyperopt']['best_params']

        # 
        logger.warning("")
        return {
            'pos_iou': 0.5,
            'neg_iou': 0.3,
            'lr': 1e-4,
            'bbox_weight': 2.0,
            'num_heads': 8,
            'pos_embed_dim': 128,
            'reduction_ratio': 16,
            'diversity_weight': 0.0
        }


def main():
    """"""
    parser = argparse.ArgumentParser(
        description='PRISM ',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
:
  # 
  python train_high_performance.py --mode full

  # Stage 1
  python train_high_performance.py --mode stage1

  # Stage 2
  python train_high_performance.py --mode stage2

  # 
  python train_high_performance.py --mode optimize --n-trials 50

  # 
  python train_high_performance.py --mode evaluate

  # 
  python train_high_performance.py --mode full --config my_config.yaml
        """
    )

    parser.add_argument(
        '--mode',
        type=str,
        required=True,
        choices=['full', 'stage1', 'stage2', 'optimize', 'evaluate'],
        help=''
    )
    parser.add_argument(
        '--config',
        type=str,
        default='config.yaml',
        help=''
    )
    parser.add_argument(
        '--n-trials',
        type=int,
        default=30,
        help='optimize'
    )

    args = parser.parse_args()

    # 
    trainer = HighPerformanceTrainer(config_path=args.config)

    # 
    if args.mode == 'optimize':
        trainer.config['optimization']['n_trials'] = args.n_trials

    # 
    try:
        if args.mode == 'full':
            results = trainer.run_full_pipeline()
        elif args.mode == 'stage1':
            results = trainer.run_stage1_only()
        elif args.mode == 'stage2':
            results = trainer.run_stage2_only()
        elif args.mode == 'optimize':
            results = trainer.run_optimize_only()
        elif args.mode == 'evaluate':
            results = trainer.run_evaluate_only()

        logger.info("\n" + "=" * 80)
        logger.info(" !")
        logger.info("=" * 80)

        return 0

    except KeyboardInterrupt:
        logger.warning("\n")
        return 1

    except Exception as e:
        logger.error(f"\n: {e}")
        import traceback
        traceback.print_exc()
        return 1


if __name__ == '__main__':
    sys.exit(main())
