# memory_manager.py  (更新版)
import os
import asyncio
import aegaeon_native
from typing import List, Tuple

class ModelCacheManager:
    def __init__(self, cache_dir: str = "/tmp/aegaeon_model_cache"):
        self.cache_dir = cache_dir
        os.makedirs(self.cache_dir, exist_ok=True)
        self.cache_meta = {}  # model_name -> path
        self.scale_latencies = {}

    async def ensure_model_cached(self, model_name: str) -> str:
        # same logic as before: if local path exists return it, else return model_name
        await asyncio.sleep(0)
        if os.path.exists(model_name):
            return model_name
        return model_name

    async def prefetch_file_to_gpu_chunks(self, filepath: str, gpu_id: int, chunk_bytes: int = 32 * 1024 * 1024):
        """
        Wrapper over aegaeon_native.prefetch_file_to_gpu_chunks.
        Returns list of dicts: { "device_ptr": int, "size": int, "event_handle": bytes }
        """
        # call native function (blocking call returning list)
        chunks = aegaeon_native.prefetch_file_to_gpu_chunks(filepath, gpu_id, chunk_bytes)
        # chunks is a Python list of dicts
        return chunks

    def record_scale_latency(self, model_name: str, sec: float):
        self.scale_latencies.setdefault(model_name, []).append(sec)

    def release_prefetch_events(self):
        aegaeon_native.release_all_prefetch_events()
