"""CacheEngine class for managing the KV cache."""
from typing import List

import torch

from vllm.attention import get_attn_backend
from vllm.config import CacheConfig, DeviceConfig, ModelConfig, ParallelConfig
from vllm.logger import init_logger
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType,
                        get_dtype_size, is_pin_memory_available)

logger = init_logger(__name__)


#! 负责KV Cache的管理
class CacheEngine:
    """Manages the KV cache.

    This class is responsible for initializing and managing the GPU and CPU KV
    caches. It also provides methods for performing KV cache operations, such
    as swapping and copying.
    """

    def __init__(
        self,
        cache_config: CacheConfig,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
        device_config: DeviceConfig,
    ) -> None:
        self.cache_config    = cache_config
        self.model_config    = model_config
        self.parallel_config = parallel_config
        self.device_config   = device_config

        self.head_size            = model_config.get_head_size()
        # Models like Jamba, have mixed typed layers, E.g Mamba
        #! 注意力的层数与流水线并行的数量相关
        self.num_attention_layers = model_config.get_num_layers_by_block_type(
            parallel_config, LayerBlockType.attention)
        #! num_kv_heads与张量并行的数量有关
        # TODO: check
        #! 这里要注意却分MHA/GQA/MQA等
        self.num_kv_heads         = model_config.get_num_kv_heads(parallel_config)

        self.block_size     = cache_config.block_size
        self.num_gpu_blocks = cache_config.num_gpu_blocks
        # TODO: 这里是为什么？
        if self.num_gpu_blocks:
            self.num_gpu_blocks //= parallel_config.pipeline_parallel_size
        self.num_cpu_blocks = cache_config.num_cpu_blocks
        if self.num_cpu_blocks:
            self.num_cpu_blocks //= parallel_config.pipeline_parallel_size

        if cache_config.cache_dtype == "auto":
            self.dtype = model_config.dtype
        else:
            self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]

        # Get attention backend.
        #! 选择注意力计算的后端，常见的有FlashAttn-v2， XFORMERS
        self.attn_backend = get_attn_backend(self.head_size,
                                             model_config.dtype,
                                             cache_config.cache_dtype,
                                             self.block_size,
                                             model_config.is_attention_free)

        # Initialize the cache.
        self.gpu_cache = self._allocate_kv_cache(
            self.num_gpu_blocks, self.device_config.device_type)
        self.cpu_cache = self._allocate_kv_cache(self.num_cpu_blocks, "cpu")

    def _allocate_kv_cache(
        self,
        num_blocks: int,
        device: str,
    ) -> List[torch.Tensor]:
        """Allocates KV cache on the specified device."""
        #! return (2, num_blocks, block_size, num_kv_heads, head_size)
        kv_cache_shape = self.attn_backend.get_kv_cache_shape(
            num_blocks, self.block_size, self.num_kv_heads, self.head_size)
        pin_memory = is_pin_memory_available() if device == "cpu" else False  # 非wsl的平台都可以
        kv_cache: List[torch.Tensor] = []
        #! 通过torch的函数分配显存
        # TODO: 为什么以layer为切分单位
        for _ in range(self.num_attention_layers):
            # null block in CpuGpuBlockAllocator requires at least that
            # block to be zeroed-out.
            # We zero-out everything for simplicity.
            kv_cache.append(
                torch.zeros(kv_cache_shape,          #! 0表示K Cache，1表示V Cache
                            dtype=self.dtype,
                            pin_memory=pin_memory,
                            device=device))
        return kv_cache

    # TODO: 是否考虑异步的方式交换，在合适的地方同步即可？
    # TODO: src_to_dst 张量的作用
    # TODO: 这里可以优化写成为1个CUDA Kernel
    # vllm/csrc/cache_kernels.cu
    def swap_in(self, src_to_dst: torch.Tensor) -> None:
        for i in range(self.num_attention_layers):
            self.attn_backend.swap_blocks(self.cpu_cache[i], self.gpu_cache[i],
                                          src_to_dst)

    def swap_out(self, src_to_dst: torch.Tensor) -> None:
        for i in range(self.num_attention_layers):
            self.attn_backend.swap_blocks(self.gpu_cache[i], self.cpu_cache[i],
                                          src_to_dst)

    # 将GPU Cache指定索引的KV Cache拷贝到别的地方
    def copy(self, src_to_dsts: torch.Tensor) -> None:
        self.attn_backend.copy_blocks(self.gpu_cache, src_to_dsts)


    #! 计算一个KV Cache块的显存空间大小
    #! 一个KV Cache块的大小可以通用参数指定，通常是16、32、64等
    #! 每个token的KV Cache的大小计算方式为:
    #   1. 每一个K/V向量的元素格式等于隐藏层大小的维度，即d;
    #   2. 在MHA中，d 被分为num_heads个head_size， 因此一个K/V向量的元素格式为num_heads * head_size;
    #   3. K/V两个向量的元素综述为num_heads * head_size * 2;
    #   3. 假设模型有num_layers层，因此该模型对应的一个token对应的KV Cache元素数量为num_heads * head_size * 2 * num_layers;
    #   5. 假设一个KV Cache块存block_size个token的KV Cache，于是有: block_size * num_heads * head_size * 2 * num_layers
    #   6. 占用的空间为: block_size * num_heads * head_size * 2 * num_layers * sizeof(kvcache dtype)
    # 
    # ####
    @staticmethod
    def get_cache_block_size(
        cache_config: CacheConfig,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
    ) -> int:
        head_size = model_config.get_head_size()                                  # 单个head的向量宽度
        num_heads = model_config.get_num_kv_heads(parallel_config)                # 头数量
        num_attention_layers = model_config.get_num_layers_by_block_type(         # 模型的层数
            parallel_config, LayerBlockType.attention)

        key_cache_block = cache_config.block_size * num_heads * head_size
        value_cache_block = key_cache_block
        total = num_attention_layers * (key_cache_block + value_cache_block)
        if cache_config.cache_dtype == "auto":                                    #! 如果是"auto"，则使用模型默认的数据类型作为KV Cache的类型
            dtype = model_config.dtype
        else:
            dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
        dtype_size = get_dtype_size(dtype)
        return dtype_size * total
