"""CacheEngine class for managing the KV cache."""
from typing import Dict, List

import torch
import torch_musa

from vllm.config import CacheConfig, ModelConfig, ParallelConfig
from vllm.logger import init_logger
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, is_pin_memory_available, get_head_size_pad

logger = init_logger(__name__)


class CacheEngine:
    """Manages the KV cache.

    This class is responsible for initializing and managing the GPU and CPU KV
    caches. It also provides methods for performing KV cache operations, such
    as swapping and copying.
    """

    def __init__(
        self,
        cache_config: CacheConfig,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
    ) -> None:
        self.cache_config = cache_config
        self.model_config = model_config
        self.parallel_config = parallel_config

        self.head_size = model_config.get_head_size()
        self.num_layers = model_config.get_num_layers(parallel_config)
        self.num_heads = model_config.get_num_kv_heads(parallel_config)

        self.block_size = cache_config.block_size
        self.num_gpu_blocks = cache_config.num_gpu_blocks
        self.num_cpu_blocks = cache_config.num_cpu_blocks

        if cache_config.cache_dtype == "auto":
            self.dtype = model_config.dtype
        else:
            self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]

        # Initialize the cache.
        self.gpu_cache: List[torch.Tensor] = []
        self.cpu_cache: List[torch.Tensor] = []

        tp = self.parallel_config.tensor_parallel_size

        self.head_size_pad = get_head_size_pad(self.head_size, self.num_heads, self.dtype)

        assert(tp <= torch.musa.device_count())
        for i in range(tp):
          mc = self._allocate_kv_cache(self.num_gpu_blocks, f"musa:{i}")
          self.gpu_cache.append(mc)
          cc = self._allocate_kv_cache(self.num_cpu_blocks, "cpu")
          self.cpu_cache.append(cc)

    def _allocate_kv_cache(
        self,
        num_blocks: int,
        device: str,
    ) -> torch.Tensor:
        """Allocates KV cache on the specified device."""
        kv_cache_shape = (num_blocks, 2, self.num_layers, self.num_heads, self.block_size, self.head_size_pad)
        logger.info(f"Allocates {device} kv cache with shape: {kv_cache_shape}")

        pin_memory = is_pin_memory_available() if device == "cpu" else False
        return torch.zeros(kv_cache_shape,
                           dtype=self.dtype,
                           pin_memory=pin_memory,
                           device=device)

    def swap_in(self, src_to_dst: Dict[int, int]) -> None:
        # logger.info("kv cache swap in")
        for i in range(self.parallel_config.tensor_parallel_size):
          for src, dst in src_to_dst.items():
            self.gpu_cache[i][dst].copy_(self.cpu_cache[i][src], non_blocking = True)

    def swap_out(self, src_to_dst: Dict[int, int]) -> None:
        # logger.info("kv cache swap out")
        for i in range(self.parallel_config.tensor_parallel_size):
          for src, dst in src_to_dst.items():
            self.cpu_cache[i][dst].copy_(self.gpu_cache[i][src], non_blocking = True)

    def copy(self, src_to_dsts: Dict[int, List[int]]) -> None:
        # logger.info("kv cache copy")
        for i in range(self.parallel_config.tensor_parallel_size):
          for src, dsts in src_to_dsts.items():
            for dst in dsts:
              self.gpu_cache[i][dst].copy_(self.gpu_cache[i][src], non_blocking = True)

    @staticmethod
    def get_cache_block_size(
        cache_config: CacheConfig,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
    ) -> int:
        head_size = model_config.get_head_size()
        num_heads = model_config.get_num_kv_heads(parallel_config)
        num_layers = model_config.get_num_layers(parallel_config)
        tp = parallel_config.tensor_parallel_size

        if cache_config.cache_dtype == "auto":
            dtype = model_config.dtype
        else:
            dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]

        head_size_pad = get_head_size_pad(head_size, num_heads, dtype)

        key_cache_block = cache_config.block_size * num_heads * head_size_pad
        value_cache_block = key_cache_block
        total = num_layers * (key_cache_block + value_cache_block)
        dtype_size = _get_dtype_size(dtype)
        return dtype_size * total


def _get_dtype_size(dtype: torch.dtype) -> int:
    return torch.tensor([], dtype=dtype).element_size()
