# kvcache.py
import asyncio
import torch
from typing import Dict, Optional
import numpy as np

class KVBlock:
    def __init__(self, req_id, shape, device):
        self.req_id = req_id
        self.shape = shape
        self.device = device
        self.gpu_tensor: Optional[torch.Tensor] = None
        self.host_pinned: Optional[torch.Tensor] = None
        self.inflight_event: Optional[torch.cuda.Event] = None

class KVCacheManager:
    def __init__(self, node_gpu_id=0, pinned_pool=None):
        self.node_gpu_id = node_gpu_id
        self.kv_blocks: Dict[str, KVBlock] = {}
        # a very small pinned buffer pool if provided (ModelCacheManager.get_pinned_pool)
        self.pinned_pool = pinned_pool
        self.stream = torch.cuda.Stream(device=node_gpu_id)

    async def store_kv_for_request(self, req_id: str, tokens, device="cuda:0"):
        # For simplicity, we represent kv as uint8 blob of token IDs here.
        arr = np.array([0 if t is None else 1 for t in tokens], dtype=np.uint8)  # placeholder
        n = arr.nbytes
        # get pinned buffer if available
        if self.pinned_pool:
            pinned = await self.pinned_pool.acquire(timeout=2.0)
            pinned[:n].copy_(torch.from_numpy(arr))
            host_buf = pinned
        else:
            host_buf = torch.from_numpy(arr).pin_memory()
        block = KVBlock(req_id:req_id, shape=(n,), device=device)
        block.host_pinned = host_buf
        # immediately async copy to GPU
        with torch.cuda.stream(self.stream):
            gpu_tensor = torch.empty(n, dtype=torch.uint8, device=device)
            gpu_tensor.copy_(host_buf[:n], non_blocking=True)
            evt = torch.cuda.Event()
            evt.record(self.stream)
        block.gpu_tensor = gpu_tensor
        block.inflight_event = evt
        self.kv_blocks[req_id] = block
        return block

    async def ensure_kv_on_gpu(self, req_id: str, stream: Optional[torch.cuda.Stream] = None):
        if req_id not in self.kv_blocks:
            raise RuntimeError("KV not found")
        block = self.kv_blocks[req_id]
        if block.gpu_tensor is not None:
            # wait if it's inflight
            if block.inflight_event is not None:
                block.inflight_event.synchronize()
            return
        # otherwise bring from host_pinned into GPU
        if stream is None:
            stream = self.stream
        with torch.cuda.stream(stream):
            gpu_tensor = torch.empty(block.shape, dtype=torch.uint8, device=f"cuda:{self.node_gpu_id}")
            gpu_tensor.copy_(block.host_pinned, non_blocking=True)
            evt = torch.cuda.Event()
            evt.record(stream)
        block.gpu_tensor = gpu_tensor
        block.inflight_event = evt
        return

    async def swapout_all_to_host(self, stream: Optional[torch.cuda.Stream] = None):
        if stream is None:
            stream = self.stream
        for req_id, block in list(self.kv_blocks.items()):
            if block.gpu_tensor is None:
                continue
            with torch.cuda.stream(stream):
                host_buf = torch.empty_like(block.gpu_tensor, device="cpu").pin_memory()
                host_buf.copy_(block.gpu_tensor, non_blocking=True)
                evt = torch.cuda.Event()
                evt.record(stream)
            block.host_pinned = host_buf
            block.gpu_tensor = None
            block.inflight_event = evt

    def append_token_to_kv(self, req_id: str, token, device="cuda:0"):
        # in-depth implementation would append to kv, extend GPU tensor, or create new chunk
        # here we use placeholder no-op
        pass
