# instance.py (更新版关键片段)
import asyncio
import time
import os
import aegaeon_native
from memory_manager import ModelCacheManager

class GPUInstance:
    def __init__(self, gpu_id: int, role: str = "decode"):
        self.gpu_id = gpu_id
        self.role = role
        self.current_model = None
        self.llm = None
        self.lock = asyncio.Lock()
        self.model_cache = ModelCacheManager()

    async def scale_up(self, model_name: str, prefetch: bool = True, local_checkpoint_path: str = None):
        async with self.lock:
            if self.current_model == model_name:
                return
            t0 = time.time()
            # swapout / cleanup if needed (omitted for brevity)
            # ensure model cached
            model_path = await self.model_cache.ensure_model_cached(model_name)

            prefetch_chunks = []
            try:
                # if model_path is local file: call native prefetch (chunked)
                if prefetch and os.path.exists(model_path) and os.path.isfile(model_path):
                    prefetch_chunks = await self.model_cache.prefetch_file_to_gpu_chunks(model_path, self.gpu_id, chunk_bytes=32*1024*1024)
                else:
                    prefetch_chunks = []
            except Exception as e:
                print("prefetch failed:", e)
                prefetch_chunks = []

            # start vLLM engine creation in thread to overlap
            loop = asyncio.get_running_loop()
            def _make_llm():
                from vllm import LLM
                return LLM(model=model_path, dtype="fp16", tensor_parallel_degree=1)
            self.llm = await loop.run_in_executor(None, _make_llm)

            # wait for first few chunk events to complete so we have some data resident
            wait_count = min(2, len(prefetch_chunks))
            for i in range(wait_count):
                ev_bytes = prefetch_chunks[i]["event_handle"]
                try:
                    aegaeon_native.wait_event_handle_bytes(ev_bytes)
                except Exception as e:
                    # if waiting fails, log and continue
                    print("wait_event_handle_bytes failed:", e)

            # optionally free events
            # We keep events until explicitly released by ModelCacheManager.release_prefetch_events (or call here)
            # For demo, release all to cleanup
            try:
                self.model_cache.release_prefetch_events()
            except Exception:
                pass

            self.current_model = model_name
            self.model_cache.record_scale_latency(model_name, time.time() - t0)
            return
