"""
PRISM 超参数优化模块

实现分阶段超参数优化：
1. Stage 1 proposals阈值优化（快速）
2. Stage 2 训练超参优化（Optuna）
3. 部署阈值优化（网格搜索）
4. 端到端联合优化（高级）

使用方法:
    python -m src.optimization.hyperparameter_search --phase stage1
    python -m src.optimization.hyperparameter_search --phase stage2 --n-trials 30
    python -m src.optimization.hyperparameter_search --phase deployment
"""

import optuna
from optuna.pruners import MedianPruner, HyperbandPruner
from optuna.samplers import TPESampler
import numpy as np
import json
import logging
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Tuple, Optional
import matplotlib.pyplot as plt
import seaborn as sns

from src.config import STAGE1_CONFIG, STAGE2_CONFIG, IMPROVED_MODEL_CONFIG
from src.models.proposer import YOLOProposer
from src.training.train_stage2 import ImprovedStage2Trainer
from src.evaluation.evaluate_model import evaluate_model

# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class HyperparameterOptimizer:
    """超参数优化主类"""

    def __init__(self, optimization_phase='stage2', use_train_set=False, fast_search=False):
        """
        Args:
            optimization_phase: 'stage1', 'stage2', 'deployment', 'end_to_end'
            use_train_set: 使用训练集而非验证集进行Stage 1阈值优化
            fast_search: 使用快速搜索（更少的阈值点）
        """
        self.phase = optimization_phase
        self.use_train_set = use_train_set
        self.fast_search = fast_search
        self.results_dir = Path('optimization_results')
        self.results_dir.mkdir(exist_ok=True)

        # 创建study名称
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        self.study_name = f'{optimization_phase}_{timestamp}'

    def optimize(self, n_trials=20, timeout=None):
        """
        运行超参数优化

        Args:
            n_trials: 优化试验次数
            timeout: 超时时间（秒）
        """
        logger.info(f"=" * 80)
        logger.info(f"Starting Hyperparameter Optimization - Phase: {self.phase}")
        logger.info(f"=" * 80)

        if self.phase == 'stage1':
            results = self._optimize_stage1()
        elif self.phase == 'stage2':
            results = self._optimize_stage2(n_trials, timeout)
        elif self.phase == 'deployment':
            results = self._optimize_deployment()
        elif self.phase == 'end_to_end':
            results = self._optimize_end_to_end(n_trials, timeout)
        else:
            raise ValueError(f"Unknown phase: {self.phase}")

        # 保存结果
        self._save_results(results)

        return results

    def _optimize_stage1(self) -> Dict:
        """
        Optimize Stage 1 proposals generation thresholds (confidence + NMS IoU)

        Fast method: Grid search + statistical analysis (no training required)
        """
        logger.info("\n" + "=" * 80)
        logger.info("Phase 1: Stage 1 Proposals Threshold Optimization")
        logger.info("=" * 80)

        # Search space selection for confidence threshold
        if self.fast_search:
            # Fast search: 8 confidence threshold points
            conf_thresholds = [0.001, 0.003, 0.005, 0.01, 0.015, 0.02, 0.03, 0.05]
            iou_thresholds = [0.5, 0.7]  # Only 2 IoU values for fast search
            logger.info(f"Using fast search mode")
        else:
            # Detailed search: 18 confidence threshold points (fine-grained, especially low range)
            conf_thresholds = [
                0.0001, 0.0003, 0.0005,  # Very low thresholds
                0.001, 0.002, 0.003, 0.004, 0.005,  # Low thresholds (fine-grained)
                0.007, 0.01,  # Medium-low thresholds
                0.015, 0.02, 0.025, 0.03,  # Medium thresholds
                0.04, 0.05, 0.07, 0.1  # High thresholds
            ]
            # NMS IoU threshold search space
            iou_thresholds = [0.5, 0.6, 0.7, 0.8]  # Test 4 NMS IoU values
            logger.info(f"Using detailed search mode (recommended)")

        logger.info(f"Testing {len(conf_thresholds)} confidence × {len(iou_thresholds)} NMS IoU values")
        logger.info(f"Confidence range: {min(conf_thresholds):.4f} - {max(conf_thresholds):.4f}")
        logger.info(f"NMS IoU range: {min(iou_thresholds):.2f} - {max(iou_thresholds):.2f}")
        logger.info(f"Total combinations: {len(conf_thresholds) * len(iou_thresholds)}")

        results = []

        # Grid search over confidence threshold × NMS IoU threshold
        for iou_thresh in iou_thresholds:
            logger.info(f"\n{'='*60}")
            logger.info(f"Testing NMS IoU threshold: {iou_thresh}")
            logger.info(f"{'='*60}")

            for conf_thresh in conf_thresholds:
                logger.info(f"\n  Confidence: {conf_thresh:.4f}, NMS IoU: {iou_thresh:.2f}")

                # Generate proposals and analyze quality
                stats = self._analyze_proposals_quality(conf_thresh, iou_thresh)

                results.append({
                    'conf_threshold': conf_thresh,
                    'iou_threshold': iou_thresh,
                    **stats
                })

                logger.info(f"    Recall: {stats['recall']:.3f}")
                logger.info(f"    Avg proposals/image: {stats['avg_proposals']:.1f}")
                logger.info(f"    Background ratio: {stats['bg_ratio']:.2%}")

        # Select best configuration
        best_config = self._select_best_stage1_threshold(results)

        logger.info("\n" + "=" * 80)
        logger.info("Stage 1 Optimization Results")
        logger.info("=" * 80)
        logger.info(f"Best confidence threshold: {best_config['conf_threshold']:.4f}")
        logger.info(f"Best NMS IoU threshold: {best_config['iou_threshold']:.2f}")
        logger.info(f"Expected recall: {best_config['recall']:.3f}")
        logger.info(f"Avg proposals/image: {best_config['avg_proposals']:.1f}")
        logger.info(f"Background ratio: {best_config['bg_ratio']:.2%}")

        return {
            'phase': 'stage1',
            'all_results': results,
            'best_config': best_config
        }

    def _optimize_stage2(self, n_trials=20, timeout=None) -> Dict:
        """
        优化Stage 2训练超参数

        使用Optuna + Pruning
        """
        logger.info("\n" + "=" * 80)
        logger.info("Phase 2: Stage 2 Training Hyperparameters Optimization")
        logger.info("=" * 80)

        # 创建Optuna study
        study = optuna.create_study(
            direction='maximize',
            study_name=self.study_name,
            storage=f'sqlite:///{self.results_dir}/{self.study_name}.db',
            pruner=MedianPruner(n_startup_trials=5, n_warmup_steps=10),
            sampler=TPESampler(seed=42)
        )

        # Define objective function
        def objective(trial):
            logger.info(f"\n{'='*60}")
            logger.info(f"Trial {trial.number + 1}/{n_trials}")
            logger.info(f"{'='*60}")

            # Sample hyperparameters
            config = self._sample_stage2_hyperparams(trial)

            logger.info("Sampled configuration:")
            for key, value in config.items():
                logger.info(f"  {key}: {value}")

            # Train and evaluate (with early stopping)
            try:
                val_map = self._train_and_evaluate_stage2(config, trial, max_epochs=30)
                logger.info(f"Trial {trial.number} completed: mAP = {val_map:.4f}")
                return val_map
            except optuna.TrialPruned as e:
                logger.warning(f"Trial {trial.number} pruned (performance too low)")
                raise
            except RuntimeError as e:
                if "out of memory" in str(e).lower():
                    logger.error(f"Trial {trial.number} failed: GPU OOM")
                    logger.error("  Try reducing batch_size or roi_size in config")
                else:
                    logger.error(f"Trial {trial.number} failed: {e}")
                raise optuna.TrialPruned()
            except Exception as e:
                logger.error(f"Trial {trial.number} failed with unexpected error:")
                logger.error(f"  Error type: {type(e).__name__}")
                logger.error(f"  Error message: {str(e)}")
                import traceback
                logger.debug(traceback.format_exc())
                raise optuna.TrialPruned()

        # Run optimization
        study.optimize(objective, n_trials=n_trials, timeout=timeout)

        # Check if any trials completed successfully
        completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]

        if not completed_trials:
            logger.error("=" * 80)
            logger.error("All trials failed! No successful hyperparameter configurations found.")
            logger.error("=" * 80)
            logger.error("Possible reasons:")
            logger.error("  1. Stage 1 proposals quality is too poor")
            logger.error("  2. GPU out of memory (try smaller batch size)")
            logger.error("  3. Data issues (check proposals.json)")
            logger.error("")
            logger.error("Falling back to default configuration...")

            # Return default configuration
            from src.config import STAGE2_CONFIG
            default_config = {
                'positive_iou_thresh': STAGE2_CONFIG.get('positive_iou_thresh', 0.5),
                'negative_iou_thresh': STAGE2_CONFIG.get('negative_iou_thresh', 0.3),
                'learning_rate': STAGE2_CONFIG.get('learning_rate', 1e-4),
                'bbox_loss_weight': STAGE2_CONFIG.get('bbox_loss_weight', 2.0),
                'unfreeze_layers': STAGE2_CONFIG.get('unfreeze_layers', 2),
            }

            return {
                'phase': 'stage2',
                'study': study,
                'best_value': None,
                'best_params': default_config,
                'all_failed': True
            }

        # Results analysis
        logger.info("\n" + "=" * 80)
        logger.info("Stage 2 Optimization Results")
        logger.info("=" * 80)
        logger.info(f"Completed trials: {len(completed_trials)}/{len(study.trials)}")
        logger.info(f"Best mAP: {study.best_value:.4f}")
        logger.info(f"Best parameters:")
        for key, value in study.best_params.items():
            logger.info(f"  {key}: {value}")

        return {
            'phase': 'stage2',
            'study': study,
            'best_value': study.best_value,
            'best_params': study.best_params
        }

    def _optimize_deployment(self) -> Dict:
        """
        优化部署推理阈值

        快速网格搜索（加载已训练模型）
        """
        logger.info("\n" + "=" * 80)
        logger.info("Phase 3: Deployment Thresholds Optimization")
        logger.info("=" * 80)

        # 搜索空间
        stage1_thresholds = [0.005, 0.01, 0.015, 0.02]
        stage2_thresholds = [0.15, 0.20, 0.25, 0.30, 0.35]

        results = []
        best_map = 0
        best_config = None

        for s1_thresh in stage1_thresholds:
            for s2_thresh in stage2_thresholds:
                logger.info(f"\nTesting S1={s1_thresh}, S2={s2_thresh}")

                # 评估（加载预训练模型）
                metrics = self._evaluate_with_thresholds(s1_thresh, s2_thresh)

                result = {
                    'stage1_threshold': s1_thresh,
                    'stage2_threshold': s2_thresh,
                    **metrics
                }
                results.append(result)

                logger.info(f"  mAP: {metrics['map']:.3f}, "
                          f"P: {metrics['precision']:.3f}, "
                          f"R: {metrics['recall']:.3f}")

                if metrics['map'] > best_map:
                    best_map = metrics['map']
                    best_config = result

        logger.info("\n" + "=" * 80)
        logger.info("Deployment Optimization Results")
        logger.info("=" * 80)
        logger.info(f"Best mAP: {best_map:.4f}")
        logger.info(f"Best Stage 1 threshold: {best_config['stage1_threshold']}")
        logger.info(f"Best Stage 2 threshold: {best_config['stage2_threshold']}")
        logger.info(f"Precision: {best_config['precision']:.3f}")
        logger.info(f"Recall: {best_config['recall']:.3f}")

        return {
            'phase': 'deployment',
            'all_results': results,
            'best_config': best_config
        }

    def _optimize_end_to_end(self, n_trials=15, timeout=None) -> Dict:
        """
        端到端联合优化（高级）

        同时优化Stage 1, Stage 2, 部署阈值
        """
        logger.info("\n" + "=" * 80)
        logger.info("Phase 4: End-to-End Joint Optimization")
        logger.info("=" * 80)
        logger.warning("This phase is computationally expensive!")

        study = optuna.create_study(
            direction='maximize',
            study_name=self.study_name,
            storage=f'sqlite:///{self.results_dir}/{self.study_name}.db',
            pruner=HyperbandPruner(),
            sampler=TPESampler(seed=42)
        )

        def objective(trial):
            # 采样所有关键超参数
            config = {
                # Stage 1
                'stage1_conf_train': trial.suggest_float('stage1_conf_train', 0.001, 0.05, log=True),

                # Stage 2
                'pos_iou': trial.suggest_float('pos_iou', 0.3, 0.7),
                'neg_iou': trial.suggest_float('neg_iou', 0.1, 0.5),
                'lr': trial.suggest_float('lr', 1e-5, 5e-4, log=True),
                'bbox_weight': trial.suggest_float('bbox_weight', 0.5, 5.0),
                'num_heads': trial.suggest_categorical('num_heads', [4, 8, 16]),

                # Deployment
                'stage1_conf_deploy': trial.suggest_float('stage1_conf_deploy', 0.001, 0.05, log=True),
                'stage2_conf_deploy': trial.suggest_float('stage2_conf_deploy', 0.1, 0.5),
            }

            # 约束条件
            if config['neg_iou'] >= config['pos_iou']:
                raise optuna.TrialPruned()

            # 完整流程
            try:
                test_map = self._run_full_pipeline(config, trial)
            except Exception as e:
                logger.error(f"Trial {trial.number} failed: {e}")
                raise optuna.TrialPruned()

            return test_map

        study.optimize(objective, n_trials=n_trials, timeout=timeout)

        # Check if any trials completed successfully
        completed_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]

        if not completed_trials:
            logger.error("=" * 80)
            logger.error("All end-to-end optimization trials failed!")
            logger.error("=" * 80)
            logger.error("End-to-end optimization is very demanding. Possible reasons:")
            logger.error("  1. Insufficient GPU memory")
            logger.error("  2. Poor initial Stage 1/Stage 2 configuration")
            logger.error("  3. Training instability")
            logger.error("")
            logger.error("Recommendation: Use separate stage optimizations instead")

            return {
                'phase': 'end_to_end',
                'study': study,
                'best_value': None,
                'best_params': None,
                'all_failed': True
            }

        logger.info("\n" + "=" * 80)
        logger.info("End-to-End Optimization Results")
        logger.info("=" * 80)
        logger.info(f"Completed trials: {len(completed_trials)}/{len(study.trials)}")
        logger.info(f"Best test mAP: {study.best_value:.4f}")
        logger.info(f"Best parameters:")
        for key, value in study.best_params.items():
            logger.info(f"  {key}: {value}")

        return {
            'phase': 'end_to_end',
            'study': study,
            'best_value': study.best_value,
            'best_params': study.best_params
        }

    # ===== Helper Methods =====

    def _analyze_proposals_quality(self, conf_threshold: float, iou_threshold: float = 0.7) -> Dict:
        """
        Analyze proposals quality (no training required).

        Args:
            conf_threshold: Confidence threshold for proposal generation
            iou_threshold: NMS IoU threshold for proposal suppression
        """
        from src.models.proposer import YOLOProposer
        from src.config import DATASET_DIR, DATA_YAML, SERVER_CONFIG
        import yaml
        from pathlib import Path
        from PIL import Image

        logger.debug(f"  Analyzing with conf={conf_threshold:.4f}, nms_iou={iou_threshold:.2f}")

        # 加载数据集配置
        with open(DATA_YAML) as f:
            data = yaml.safe_load(f)

        # 选择使用训练集或验证集
        if self.use_train_set:
            split_name = 'train'
            logger.info(f"  Using training set for threshold optimization")
        else:
            split_name = 'val'

        val_dir = Path(DATASET_DIR) / data[split_name]
        val_label_dir = Path(DATASET_DIR) / data[split_name].replace('images', 'labels')

        val_images = list(val_dir.glob('*.jpg')) + list(val_dir.glob('*.png'))

        # Use random sampling for more representative evaluation
        import random
        if len(val_images) > 200:
            random.seed(42)  # Reproducible sampling
            val_images = random.sample(val_images, 200)  # Use 200 random images for faster evaluation
            logger.info(f"  Using 200 randomly sampled images from {split_name} set")
        else:
            logger.info(f"  Using all {len(val_images)} images from {split_name} set")

        # 初始化proposer
        proposer = YOLOProposer(
            weights_path=SERVER_CONFIG['stage1_weights'],
            device='cuda' if STAGE1_CONFIG.get('device') == 'cuda' else 'cpu'
        )

        # 收集统计数据
        total_proposals = 0
        total_ground_truths = 0
        total_matches = 0
        proposals_per_image = []

        for img_path in val_images:
            # Generate proposals with specified NMS threshold
            # IMPORTANT: Use same tile-based inference as actual proposal generation
            proposals = proposer.propose(
                str(img_path),
                tile_size=STAGE1_CONFIG.get('tile_size', 640),
                tile_overlap=STAGE1_CONFIG.get('tile_overlap', 100),
                conf_thresh=conf_threshold,
                iou_thresh=iou_threshold
            )

            num_proposals = len(proposals)
            proposals_per_image.append(num_proposals)
            total_proposals += num_proposals

            # 加载ground truth
            label_path = val_label_dir / f"{img_path.stem}.txt"
            if label_path.exists():
                img = Image.open(img_path)
                w, h = img.size

                with open(label_path) as f:
                    gt_boxes = []
                    for line in f:
                        parts = line.strip().split()
                        if len(parts) != 5:
                            continue

                        cls, cx, cy, bw, bh = map(float, parts)

                        # Convert YOLO format to absolute coordinates
                        x1 = (cx - bw / 2) * w
                        y1 = (cy - bh / 2) * h
                        x2 = (cx + bw / 2) * w
                        y2 = (cy + bh / 2) * h
                        gt_boxes.append([x1, y1, x2, y2])

                total_ground_truths += len(gt_boxes)

                # Calculate matches with lower IoU threshold for proposal generation
                import torch
                if len(proposals) > 0 and len(gt_boxes) > 0:
                    proposals_tensor = torch.tensor(proposals)
                    gt_tensor = torch.tensor(gt_boxes)

                    from torchvision.ops import box_iou
                    ious = box_iou(proposals_tensor, gt_tensor)
                    max_ious = ious.max(dim=0)[0]
                    # Use 0.3 IoU threshold for proposal matching (more lenient for high recall)
                    matches = (max_ious > 0.3).sum().item()
                    total_matches += matches

        # Calculate statistics
        recall = total_matches / max(total_ground_truths, 1)
        avg_proposals = total_proposals / max(len(val_images), 1)
        max_proposals = max(proposals_per_image) if proposals_per_image else 0

        # Estimate background ratio (how many proposals are background)
        bg_ratio = 1.0 - (total_matches / max(total_proposals, 1))

        # Add diagnostic information
        logger.debug(f"  Total images: {len(val_images)}")
        logger.debug(f"  Total ground truths: {total_ground_truths}")
        logger.debug(f"  Total proposals: {total_proposals}")
        logger.debug(f"  Total matches (IoU>0.3): {total_matches}")

        stats = {
            'recall': recall,
            'avg_proposals': avg_proposals,
            'max_proposals': max_proposals,
            'bg_ratio': bg_ratio,
            'total_ground_truths': total_ground_truths,
            'total_matches': total_matches,
        }

        return stats

    def _select_best_stage1_threshold(self, results: List[Dict]) -> Dict:
        """
        Select best Stage 1 threshold configuration.

        Strategy:
        1. Filter for recall >= 0.95 (or lower if needed)
        2. Among valid configs, select one with fewest proposals per image
        3. This balances high recall with computational efficiency
        """
        if not results:
            raise ValueError("No threshold results to select from")

        # Strategy: recall >= 0.95, minimize proposals count
        valid_results = [r for r in results if r['recall'] >= 0.95]

        if not valid_results:
            # Lower requirement to recall >= 0.90
            valid_results = [r for r in results if r['recall'] >= 0.90]
            logger.info("No configs with recall >= 0.95, trying >= 0.90")

        if not valid_results:
            # Further lower to recall >= 0.80
            valid_results = [r for r in results if r['recall'] >= 0.80]
            logger.info("No configs with recall >= 0.90, trying >= 0.80")

        if not valid_results:
            # If still none, select best recall
            logger.warning("No results with recall >= 0.80, selecting best recall config")
            best = max(results, key=lambda x: x['recall'])
            logger.warning(f"Selected conf={best['conf_threshold']:.4f}, nms_iou={best['iou_threshold']:.2f} "
                          f"with recall={best['recall']:.3f}")
            return best

        # Among valid results, select one with fewest proposals
        # This improves Stage 2 training speed while maintaining recall
        best = min(valid_results, key=lambda x: x['avg_proposals'])

        logger.info(f"Selected conf={best['conf_threshold']:.4f}, nms_iou={best['iou_threshold']:.2f}")
        logger.info(f"  Recall: {best['recall']:.3f}, Avg proposals: {best['avg_proposals']:.1f}")

        return best

    def _sample_stage2_hyperparams(self, trial: optuna.Trial) -> Dict:
        """采样Stage 2超参数"""
        config = {
            # IoU阈值
            'positive_iou_thresh': trial.suggest_float('pos_iou', 0.3, 0.7),
            'negative_iou_thresh': trial.suggest_float('neg_iou', 0.1, 0.5),

            # 训练参数
            'learning_rate': trial.suggest_float('lr', 1e-5, 5e-4, log=True),
            'bbox_loss_weight': trial.suggest_float('bbox_weight', 0.5, 5.0),
            'unfreeze_layers': trial.suggest_categorical('unfreeze_layers', [0, 1, 2]),

            # 注意力参数
            'cross_roi_num_heads': trial.suggest_categorical('num_heads', [4, 8, 16]),
            'cross_roi_position_embed_dim': trial.suggest_categorical('pos_embed_dim', [64, 128, 256]),
            'diversity_loss_weight': trial.suggest_float('diversity_weight', 0.0, 0.05),

            # 融合参数
            'fusion_reduction_ratio': trial.suggest_categorical('reduction_ratio', [8, 16, 32]),
        }

        # 约束：neg_iou < pos_iou
        if config['negative_iou_thresh'] >= config['positive_iou_thresh']:
            raise optuna.TrialPruned()

        return config

    def _train_and_evaluate_stage2(self, config: Dict, trial: optuna.Trial, max_epochs=30) -> float:
        """训练并评估Stage 2（带pruning）"""
        import torch
        import gc

        # 更新配置
        train_config = {**STAGE2_CONFIG, **config, 'epochs': max_epochs}
        model_config = {**IMPROVED_MODEL_CONFIG}

        # 应用超参数
        model_config['cross_roi_num_heads'] = config['cross_roi_num_heads']
        model_config['cross_roi_position_embed_dim'] = config['cross_roi_position_embed_dim']
        model_config['use_diversity_loss'] = config['diversity_loss_weight'] > 0
        model_config['diversity_loss_weight'] = config['diversity_loss_weight']

        trainer = None
        best_val_map = 0.0

        try:
            # Check CUDA health before starting trial
            if torch.cuda.is_available():
                try:
                    test = torch.zeros(1, device='cuda')
                    _ = test + 1
                    del test
                    torch.cuda.synchronize()
                except Exception as e:
                    logger.error(f"✗ CUDA unhealthy before trial {trial.number}: {e}")
                    logger.error("  This usually means a previous trial corrupted CUDA state")
                    raise RuntimeError(f"CUDA context corrupted before trial {trial.number}: {e}")

            # 训练
            logger.info(f"Starting trial {trial.number} with config: {config}")
            trainer = ImprovedStage2Trainer(train_config, model_config, use_validation=True)

            # 修改trainer以支持中间报告（用于pruning）
            for epoch in range(max_epochs):
                # 训练一个epoch
                train_metrics = trainer.train_epoch(epoch)

                # 验证
                val_metrics = trainer.validate()
                val_map = val_metrics.get('accuracy', 0.0) / 100.0  # 转换为[0,1]

                # 中间报告
                trial.report(val_map, step=epoch)

                # Pruning检查
                if trial.should_prune():
                    logger.info(f"Trial {trial.number} pruned at epoch {epoch}")
                    raise optuna.TrialPruned()

                # 记录最佳
                if val_map > best_val_map:
                    best_val_map = val_map

            return best_val_map

        finally:
            # CRITICAL: Clean up CUDA resources after each trial
            # Without this, 30 trials will accumulate and corrupt CUDA state
            if trainer is not None:
                # Delete model and optimizer to free CUDA memory
                if hasattr(trainer, 'model'):
                    del trainer.model
                if hasattr(trainer, 'optimizer'):
                    del trainer.optimizer
                if hasattr(trainer, 'scheduler'):
                    del trainer.scheduler
                if hasattr(trainer, 'scaler'):
                    del trainer.scaler
                if hasattr(trainer, 'ema'):
                    del trainer.ema
                if hasattr(trainer, 'swa'):
                    del trainer.swa
                del trainer

            # Force garbage collection
            gc.collect()

            # Synchronize and empty CUDA cache
            if torch.cuda.is_available():
                torch.cuda.synchronize()
                torch.cuda.empty_cache()

            logger.debug(f"✓ Trial {trial.number} CUDA resources cleaned")

    def _evaluate_with_thresholds(self, stage1_thresh: float, stage2_thresh: float) -> Dict:
        """使用指定阈值评估模型"""
        from src.models.proposer import YOLOProposer
        from src.models.refiner import ROIRefinerModel
        from src.evaluation.evaluator import DetectionEvaluator
        from src.config import DATASET_DIR, DATA_YAML, SERVER_CONFIG, DEVICE
        import yaml
        from pathlib import Path
        from PIL import Image
        import torch
        from torchvision import transforms

        logger.info(f"  Evaluating with S1={stage1_thresh}, S2={stage2_thresh}")

        # 加载数据集配置
        with open(DATA_YAML) as f:
            data = yaml.safe_load(f)
            num_classes = data['nc']
            class_names = data['names']

        # 使用验证集
        val_dir = Path(DATASET_DIR) / data['val']
        val_label_dir = Path(DATASET_DIR) / data['val'].replace('images', 'labels')

        val_images = list(val_dir.glob('*.jpg')) + list(val_dir.glob('*.png'))
        val_images = val_images[:50]  # 使用50张图像加速评估

        # 初始化模型
        proposer = YOLOProposer(
            weights_path=SERVER_CONFIG['stage1_weights'],
            device=DEVICE
        )

        refiner = ROIRefinerModel(device=DEVICE)

        # 加载最佳refiner模型 (统一命名 + 向后兼容)
        weights_dir = Path(STAGE2_CONFIG['weights_path']).parent
        possible_paths = [
            weights_dir / 'stage2_best_val_acc.pth',  # 统一命名
            weights_dir / 'stage2_best_val_loss.pth',
            weights_dir / 'stage2_final.pth',
            # 向后兼容旧命名
            weights_dir / 'stage2_improved_best_val_acc.pth',
            weights_dir / 'stage2_refiner_best_val_acc.pth',
            Path(STAGE2_CONFIG['weights_path'])
        ]

        best_model_path = None
        for path in possible_paths:
            if path.exists():
                best_model_path = path
                break

        if best_model_path:
            checkpoint = torch.load(best_model_path, map_location=DEVICE)
            if 'model_state_dict' in checkpoint:
                refiner.load_state_dict(checkpoint['model_state_dict'])
            else:
                refiner.load_state_dict(checkpoint)
            logger.info(f"  Loaded refiner from: {best_model_path}")
        else:
            logger.warning(f"  No refiner weights found, using random initialization")

        refiner.eval()

        # 初始化evaluator
        evaluator = DetectionEvaluator(
            num_classes=num_classes,
            class_names=class_names,
            iou_threshold=0.5
        )

        # Setup preprocessing
        transform = transforms.Compose([
            transforms.Resize((STAGE2_CONFIG['roi_size'], STAGE2_CONFIG['roi_size'])),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])

        # 评估所有验证图像
        for img_path in val_images:
            # Stage 1: Generate proposals
            # IMPORTANT: Use same tile-based inference as actual proposal generation
            # Use optimized NMS IoU threshold if available, otherwise default to 0.7
            nms_iou_thresh = STAGE1_CONFIG.get('nms_iou_threshold', 0.7)
            proposals = proposer.propose(
                str(img_path),
                tile_size=STAGE1_CONFIG.get('tile_size', 640),
                tile_overlap=STAGE1_CONFIG.get('tile_overlap', 100),
                conf_thresh=stage1_thresh,
                iou_thresh=nms_iou_thresh
            )

            if len(proposals) == 0:
                # No detections
                label_path = val_label_dir / f"{img_path.stem}.txt"
                ground_truths = self._load_ground_truths(img_path, label_path)
                evaluator.add_batch([], ground_truths)
                continue

            # Stage 2: Refine proposals
            full_image = Image.open(img_path).convert("RGB")
            roi_batch = []

            for box in proposals:
                x1, y1, x2, y2 = map(int, box)
                x1 = max(0, x1)
                y1 = max(0, y1)
                x2 = min(full_image.width, x2)
                y2 = min(full_image.height, y2)

                if x2 > x1 and y2 > y1:
                    roi_img = full_image.crop((x1, y1, x2, y2))
                    roi_batch.append(transform(roi_img))

            if len(roi_batch) == 0:
                label_path = val_label_dir / f"{img_path.stem}.txt"
                ground_truths = self._load_ground_truths(img_path, label_path)
                evaluator.add_batch([], ground_truths)
                continue

            roi_tensors = torch.stack(roi_batch).to(DEVICE)

            with torch.no_grad():
                class_logits, bbox_deltas = refiner(roi_tensors)

            # Decode predictions
            scores = torch.softmax(class_logits, dim=1)
            class_probs, class_preds = torch.max(scores, dim=1)

            predictions = []
            for i, roi in enumerate(proposals[:len(roi_batch)]):
                prob = class_probs[i].item()
                cls_id = class_preds[i].item()

                # Filter by stage2 threshold
                if cls_id < num_classes and prob > stage2_thresh:
                    predictions.append({
                        'class': cls_id,
                        'confidence': prob,
                        'bbox': roi.tolist()
                    })

            # Load ground truth
            label_path = val_label_dir / f"{img_path.stem}.txt"
            ground_truths = self._load_ground_truths(img_path, label_path)

            # Add to evaluator
            evaluator.add_batch(predictions, ground_truths)

        # 计算指标
        metrics_result = evaluator.compute_metrics()

        metrics = {
            'map': metrics_result['mAP'],
            'precision': metrics_result['overall']['precision'],
            'recall': metrics_result['overall']['recall'],
        }

        return metrics

    def _load_ground_truths(self, img_path: Path, label_path: Path) -> list:
        """Load ground truth labels from YOLO format label file."""
        from PIL import Image

        ground_truths = []

        if label_path.exists():
            img = Image.open(img_path)
            w, h = img.size

            with open(label_path) as f:
                for line in f:
                    parts = line.strip().split()
                    if len(parts) != 5:
                        continue

                    cls, cx, cy, bw, bh = map(float, parts)

                    # Convert YOLO format to absolute coordinates
                    x1 = (cx - bw / 2) * w
                    y1 = (cy - bh / 2) * h
                    x2 = (cx + bw / 2) * w
                    y2 = (cy + bh / 2) * h

                    ground_truths.append({
                        'class': int(cls),
                        'bbox': [x1, y1, x2, y2]
                    })

        return ground_truths

    def _run_full_pipeline(self, config: Dict, trial: optuna.Trial) -> float:
        """运行完整pipeline（端到端）"""
        import torch
        import os

        logger.info(f"\nTrial {trial.number}: Running full pipeline")
        logger.info(f"  Stage 1 train conf: {config['stage1_conf_train']:.4f}")
        logger.info(f"  Stage 2 hyperparams: pos_iou={config['pos_iou']:.2f}, lr={config['lr']:.6f}")
        logger.info(f"  Deploy thresholds: S1={config['stage1_conf_deploy']:.4f}, S2={config['stage2_conf_deploy']:.2f}")

        # Step 1: 分析proposals质量（使用训练阶段的阈值）
        logger.info("  Step 1/3: Analyzing proposals quality...")
        proposals_stats = self._analyze_proposals_quality(config['stage1_conf_train'])

        # 如果recall太低，提前pruning
        if proposals_stats['recall'] < 0.85:
            logger.warning(f"  Low recall {proposals_stats['recall']:.3f}, pruning trial")
            raise optuna.TrialPruned()

        # Step 2: 训练Stage 2（简化版，少量epochs）
        logger.info("  Step 2/3: Training Stage 2...")

        # 构建Stage 2配置
        stage2_config = {
            'positive_iou_thresh': config['pos_iou'],
            'negative_iou_thresh': config['neg_iou'],
            'learning_rate': config['lr'],
            'bbox_loss_weight': config['bbox_weight'],
        }

        model_config = {
            **IMPROVED_MODEL_CONFIG,
            'cross_roi_num_heads': config['num_heads'],
        }

        # 快速训练（10个epochs）
        try:
            trainer = ImprovedStage2Trainer(
                {**STAGE2_CONFIG, **stage2_config, 'epochs': 10},
                model_config,
                use_validation=True
            )

            best_val_acc = 0.0
            for epoch in range(10):
                train_metrics = trainer.train_epoch(epoch)
                val_metrics = trainer.validate()
                val_acc = val_metrics.get('accuracy', 0.0)

                # 中间报告
                trial.report(val_acc / 100.0, step=epoch)

                # Pruning检查
                if trial.should_prune():
                    logger.info(f"  Trial pruned at epoch {epoch}")
                    raise optuna.TrialPruned()

                if val_acc > best_val_acc:
                    best_val_acc = val_acc
                    # 保存临时模型
                    temp_model_path = self.results_dir / f'temp_trial_{trial.number}.pth'
                    torch.save({
                        'model_state_dict': trainer.model.state_dict(),
                        'epoch': epoch,
                        'val_acc': val_acc
                    }, temp_model_path)

        except Exception as e:
            logger.error(f"  Training failed: {e}")
            raise optuna.TrialPruned()

        # Step 3: 部署阶段评估（使用部署阈值）
        logger.info("  Step 3/3: Deployment evaluation...")

        # 临时加载训练好的模型进行评估
        temp_model_path = self.results_dir / f'temp_trial_{trial.number}.pth'
        if not temp_model_path.exists():
            logger.error("  Temp model not found")
            raise optuna.TrialPruned()

        # 使用部署阈值评估
        try:
            # 临时修改STAGE2_CONFIG指向临时模型
            original_weights_path = STAGE2_CONFIG['weights_path']
            STAGE2_CONFIG['weights_path'] = str(temp_model_path)

            metrics = self._evaluate_with_thresholds(
                config['stage1_conf_deploy'],
                config['stage2_conf_deploy']
            )

            # 恢复配置
            STAGE2_CONFIG['weights_path'] = original_weights_path

            # 清理临时模型
            os.remove(temp_model_path)

        except Exception as e:
            logger.error(f"  Evaluation failed: {e}")
            # 恢复配置
            STAGE2_CONFIG['weights_path'] = original_weights_path
            raise optuna.TrialPruned()

        test_map = metrics['map']
        logger.info(f"  Final test mAP: {test_map:.4f}")

        return test_map

    def _save_results(self, results: Dict):
        """保存优化结果"""
        output_file = self.results_dir / f'{self.study_name}_results.json'

        # 处理不能序列化的对象
        if 'study' in results:
            study = results.pop('study')
            results['study_name'] = study.study_name

            # Check if study has successful trials before accessing best_value/best_params
            # When all trials fail, these attributes don't exist and will raise ValueError
            if results.get('all_failed', False):
                # All trials failed, best_value/best_params already in results from error handling
                logger.info("Saving results with default configuration (all trials failed)")
            else:
                # Normal case: at least one trial succeeded
                results['best_value'] = study.best_value
                results['best_params'] = study.best_params

        with open(output_file, 'w') as f:
            json.dump(results, f, indent=2)

        logger.info(f"\nResults saved to: {output_file}")

    def visualize_results(self, study_name: str = None):
        """可视化优化结果"""
        if study_name is None:
            study_name = self.study_name

        # 加载study
        study = optuna.load_study(
            study_name=study_name,
            storage=f'sqlite:///{self.results_dir}/{study_name}.db'
        )

        # 创建可视化
        self._plot_optimization_history(study)
        self._plot_param_importances(study)
        self._plot_parallel_coordinate(study)

    def _plot_optimization_history(self, study):
        """绘制优化历史"""
        import optuna.visualization as vis

        fig = vis.plot_optimization_history(study)
        fig.write_html(self.results_dir / f'{study.study_name}_history.html')

    def _plot_param_importances(self, study):
        """绘制参数重要性"""
        import optuna.visualization as vis

        fig = vis.plot_param_importances(study)
        fig.write_html(self.results_dir / f'{study.study_name}_importance.html')

    def _plot_parallel_coordinate(self, study):
        """绘制并行坐标图"""
        import optuna.visualization as vis

        fig = vis.plot_parallel_coordinate(study)
        fig.write_html(self.results_dir / f'{study.study_name}_parallel.html')


def main():
    """命令行入口"""
    import argparse

    parser = argparse.ArgumentParser(description='PRISM Hyperparameter Optimization')
    parser.add_argument('--phase', type=str, required=True,
                       choices=['stage1', 'stage2', 'deployment', 'end_to_end'],
                       help='Optimization phase')
    parser.add_argument('--n-trials', type=int, default=20,
                       help='Number of optimization trials')
    parser.add_argument('--timeout', type=int, default=None,
                       help='Optimization timeout in seconds')
    parser.add_argument('--visualize', action='store_true',
                       help='Generate visualization after optimization')

    args = parser.parse_args()

    # 创建optimizer
    optimizer = HyperparameterOptimizer(optimization_phase=args.phase)

    # 运行优化
    results = optimizer.optimize(n_trials=args.n_trials, timeout=args.timeout)

    # 可视化
    if args.visualize and args.phase in ['stage2', 'end_to_end']:
        optimizer.visualize_results()

    logger.info("\n" + "=" * 80)
    logger.info("Optimization Complete!")
    logger.info("=" * 80)


if __name__ == '__main__':
    main()
