"""
GPU Memory Dynamic Management System

自动检测GPU显存并动态调整推理参数，确保最优性能而不OOM。
"""
import torch
import logging
from typing import Dict, Tuple

logger = logging.getLogger(__name__)


class GPUMemoryManager:
    """GPU显存动态管理器"""

    # 模型显存占用估算 (MB)
    MODEL_MEMORY_ESTIMATE = {
        'stage1_yolo': 500,      # YOLOv10n
        'stage2_refiner': 2500,  # ROI Refiner (OverLoCK + DINOv2)
        'stage2_ema': 2500,      # EMA模型（如果加载）
    }

    # 每张图片的临时显存占用 (MB)
    PER_IMAGE_MEMORY = {
        'stage1': 50,   # YOLO推理
        'stage2': 100,  # ROI Refiner推理
    }

    # 安全系数（保留一定余量）
    SAFETY_MARGIN = 0.85  # 只使用85%的显存

    def __init__(self):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.gpu_info = self._detect_gpu()
        self.optimal_params = self._calculate_optimal_params()

    def _detect_gpu(self) -> Dict:
        """检测GPU信息"""
        if self.device == 'cpu':
            logger.warning("⚠️  No GPU detected, running on CPU")
            return {
                'available': False,
                'name': 'CPU',
                'total_memory_mb': 0,
                'available_memory_mb': 0
            }

        # 获取GPU信息
        gpu_props = torch.cuda.get_device_properties(0)
        total_memory_mb = gpu_props.total_memory / (1024 ** 2)

        # 清空缓存并测量可用显存
        torch.cuda.empty_cache()
        torch.cuda.synchronize()

        # 获取当前空闲显存
        free_memory_mb = torch.cuda.mem_get_info()[0] / (1024 ** 2)

        gpu_info = {
            'available': True,
            'name': gpu_props.name,
            'total_memory_mb': total_memory_mb,
            'free_memory_mb': free_memory_mb,
            'compute_capability': f"{gpu_props.major}.{gpu_props.minor}"
        }

        logger.info("=" * 80)
        logger.info("GPU Information:")
        logger.info(f"  Name: {gpu_info['name']}")
        logger.info(f"  Total Memory: {total_memory_mb:.0f} MB ({total_memory_mb/1024:.1f} GB)")
        logger.info(f"  Free Memory: {free_memory_mb:.0f} MB ({free_memory_mb/1024:.1f} GB)")
        logger.info(f"  Compute Capability: {gpu_info['compute_capability']}")
        logger.info("=" * 80)

        return gpu_info

    def _calculate_optimal_params(self) -> Dict:
        """根据显存计算最优参数"""
        if not self.gpu_info['available']:
            return {
                'batch_size_stage1': 1,
                'batch_size_stage2': 1,
                'num_workers': 2,
                'pin_memory': False,
                'max_concurrent_users': 1,
                'recommendation': 'CPU模式，性能受限'
            }

        free_memory = self.gpu_info['free_memory_mb']
        usable_memory = free_memory * self.SAFETY_MARGIN

        # 减去模型占用
        model_memory = (
            self.MODEL_MEMORY_ESTIMATE['stage1_yolo'] +
            self.MODEL_MEMORY_ESTIMATE['stage2_refiner']
        )
        remaining_memory = usable_memory - model_memory

        if remaining_memory < 500:
            logger.warning("⚠️  显存不足，可能出现OOM")

        # 计算最优batch size
        # Stage2是瓶颈，优先保证Stage2 batch
        max_batch_stage2 = max(1, int(remaining_memory / self.PER_IMAGE_MEMORY['stage2']))
        max_batch_stage1 = max(1, int(remaining_memory / self.PER_IMAGE_MEMORY['stage1']))

        # 限制最大batch（避免推理效率下降）
        batch_size_stage1 = min(max_batch_stage1, 32)
        batch_size_stage2 = min(max_batch_stage2, 16)

        # 计算最大并发用户数（Gradio多用户）
        # 每个用户请求约占用 stage1 + stage2 的显存
        memory_per_request = (
            self.PER_IMAGE_MEMORY['stage1'] +
            self.PER_IMAGE_MEMORY['stage2']
        )
        max_concurrent = max(1, int(remaining_memory / memory_per_request))
        max_concurrent = min(max_concurrent, 10)  # 上限10个并发

        # DataLoader workers
        # CPU核心数的一半，但不超过8
        import multiprocessing
        num_workers = min(multiprocessing.cpu_count() // 2, 8)

        params = {
            'batch_size_stage1': batch_size_stage1,
            'batch_size_stage2': batch_size_stage2,
            'num_workers': num_workers,
            'pin_memory': True,
            'max_concurrent_users': max_concurrent,
            'prefetch_factor': 2,
        }

        # 生成推荐信息
        if free_memory < 4000:  # < 4GB
            params['recommendation'] = '显存较小，建议单用户使用'
            params['max_concurrent_users'] = 1
        elif free_memory < 8000:  # 4-8GB
            params['recommendation'] = '显存中等，支持2-3个并发'
            params['max_concurrent_users'] = min(params['max_concurrent_users'], 3)
        else:  # > 8GB
            params['recommendation'] = '显存充足，支持多用户并发'

        logger.info("Optimal Parameters Calculated:")
        logger.info(f"  Stage1 Batch Size: {params['batch_size_stage1']}")
        logger.info(f"  Stage2 Batch Size: {params['batch_size_stage2']}")
        logger.info(f"  DataLoader Workers: {params['num_workers']}")
        logger.info(f"  Max Concurrent Users: {params['max_concurrent_users']}")
        logger.info(f"  Recommendation: {params['recommendation']}")
        logger.info("=" * 80)

        return params

    def get_batch_size(self, stage: str = 'stage2') -> int:
        """获取推荐的batch size"""
        key = f'batch_size_{stage}'
        return self.optimal_params.get(key, 1)

    def get_dataloader_config(self) -> Dict:
        """获取DataLoader配置"""
        return {
            'num_workers': self.optimal_params['num_workers'],
            'pin_memory': self.optimal_params['pin_memory'],
            'prefetch_factor': self.optimal_params.get('prefetch_factor', 2),
        }

    def get_max_concurrent_users(self) -> int:
        """获取最大并发用户数"""
        return self.optimal_params['max_concurrent_users']

    def check_available_memory(self) -> Tuple[float, float]:
        """检查当前可用显存"""
        if not self.gpu_info['available']:
            return 0, 0

        torch.cuda.empty_cache()
        free, total = torch.cuda.mem_get_info()
        free_mb = free / (1024 ** 2)
        total_mb = total / (1024 ** 2)

        return free_mb, total_mb

    def print_memory_status(self):
        """打印当前显存状态"""
        if not self.gpu_info['available']:
            logger.info("Running on CPU")
            return

        free_mb, total_mb = self.check_available_memory()
        used_mb = total_mb - free_mb
        usage_percent = (used_mb / total_mb) * 100

        logger.info(f"GPU Memory: {used_mb:.0f}/{total_mb:.0f} MB ({usage_percent:.1f}% used)")

    def estimate_batch_memory(self, batch_size: int, stage: str = 'stage2') -> float:
        """估算batch推理的显存占用"""
        per_image = self.PER_IMAGE_MEMORY[stage]
        estimated_mb = per_image * batch_size
        return estimated_mb

    def can_handle_batch(self, batch_size: int, stage: str = 'stage2') -> bool:
        """检查是否能处理指定batch size"""
        estimated = self.estimate_batch_memory(batch_size, stage)
        free_mb, _ = self.check_available_memory()

        # 保留安全余量
        return estimated < (free_mb * self.SAFETY_MARGIN)


# 全局单例
_memory_manager = None

def get_memory_manager() -> GPUMemoryManager:
    """获取全局GPU内存管理器"""
    global _memory_manager
    if _memory_manager is None:
        _memory_manager = GPUMemoryManager()
    return _memory_manager


if __name__ == '__main__':
    # 测试
    logging.basicConfig(level=logging.INFO)

    manager = get_memory_manager()

    print("\n" + "=" * 80)
    print("GPU Memory Manager Test")
    print("=" * 80)

    print(f"\nRecommended Stage1 Batch Size: {manager.get_batch_size('stage1')}")
    print(f"Recommended Stage2 Batch Size: {manager.get_batch_size('stage2')}")
    print(f"Max Concurrent Users: {manager.get_max_concurrent_users()}")

    print("\nDataLoader Config:")
    print(manager.get_dataloader_config())

    print("\nCurrent Memory Status:")
    manager.print_memory_status()

    print("\nBatch Memory Estimation:")
    for bs in [1, 4, 8, 16, 32]:
        mem = manager.estimate_batch_memory(bs, 'stage2')
        can_handle = manager.can_handle_batch(bs, 'stage2')
        status = "✅" if can_handle else "❌"
        print(f"  Batch {bs:2d}: {mem:6.0f} MB {status}")
