# built-in
import os
import sys
import ctypes
import torch
import array

import mmh3
import xxhash
import numpy as np

from   typing import TYPE_CHECKING, NewType
from   typing import Dict, List, Optional, Tuple, Union

# 3rd party
from vllm.config   import ModelConfig
from vllm.config   import ParallelConfig
from vllm.config   import VllmConfig
from vllm.sequence import ExecuteModelRequest
from vllm.sequence import SequenceGroup
from vllm.sequence import SequenceGroupMetadata


from vllm.logger                     import init_logger
from vllm.worker.cache_engine        import CacheEngine

from vllm.distributed.parallel_state import get_tensor_model_parallel_rank
from vllm.distributed.utils          import get_pp_indices

# 1st part
from task_queue    import TaskQueue

if TYPE_CHECKING:
    from vllm.core.block_manager    import BlockSpaceManager


logger = init_logger(__name__)


# TODO(yangxianpku： 2025.08.28)： 放在envs.py中实现，并改变默认位置
# 默认安装到/usr/local/ctkvstore下
home_path         = os.getenv('CTKVSTORE_HOME_PATH', '/usr/local/ctkvstore/')
sdk_path          = os.getenv('KVCACHE_SDK_PATH',  home_path + 'kvcache-sdk')
workertool_path   = os.getenv('KVCACHE_WORKERTOOL_PATH', home_path +'worker-tools')

grpc_port         = os.getenv('GRPC_PORT', '50051')
sys.path.append(sdk_path)
sys.path.append(workertool_path)

import KVCacheSDK
import WorkerTool

HASH_ID        = "hash_id"
PARENT_HASH_ID = "parent_hash_id"
LOCAL_HASH_ID  = "local_hash_id"
VERIFY_HASH_ID = "verify_hash_id"

device_map = {
    'cpu' :  KVCacheSDK.DeviceType.CPU,
    'cuda':  KVCacheSDK.DeviceType.GPU,
    'npu' :  KVCacheSDK.DeviceType.NPU,
}


RPCStub     =  NewType('RPCStub',     KVCacheSDK.KVCacheSDK)
HashList    =  NewType('HashList',    List[KVCacheSDK.BlockHashId])
SeqHashList =  NewType('SeqHashList', List[List[KVCacheSDK.BlockHashId]])


if torch.cuda.is_available():
    device_type = "cuda"
# TODO(yangxianpku：2025.08.28)：这里在CPU机器上会报错
elif torch.npu.is_available():
    device_type = "npu"
else:
    device_type = "cpu"


# TODO(yangxianpku:2025.09.01): g_stub与g_engine_sdk的区别是什么？
g_workertool              = None
g_stub                    = None
g_engine_sdk              = None
g_task_queue              = None
g_prefetch_task_queue     = TaskQueue()
g_worker_local_rank       = -1
g_worker_rank             = -1

# TODO(yangxianpku： 2025.08.28)： 放在envs.py中实现
#! 需要重计算的长度小于该值就直接从计算不走KVStore检索
g_skip_prefetch_threshold = int(os.getenv("VLLM_SKIP_PREFETCH_THREASHOLD", 1024))


#! (sdk: pp_rank)
# TODO(yangxianpku： 2025.08.29): 同一个sdk传不同的rank值不就完事了？
g_stubs: List[Tuple[RPCStub, int]] = []



def update_prefetch_stubs(
    client_pp_rank_list: List[Tuple[RPCStub, int]]
) -> None:
    """扩展RPC客户端列表

    Args:
        client_pp_rank_list 新的RPC客户端列表
    """
    g_stubs.extend(client_pp_rank_list)




def prefetch_prefix_cache_task(stub: RPCStub,
                seq_hash_id_list: SeqHashList,
                cache_block_size: int, 
                vllm_config: VllmConfig,
                pp_rank: int,
            ) -> None:
    """从存储预取特定hashIds的KV Cache到内存中(底层调用stub)，该函数将被
    提交到task_queue, 由单独的线程异步处理.

    Args:
        stub:              gRPC Client端实例
        seq_hash_id_list:  需要预取的KV Cache块哈希列表
        cache_block_size:  单个block的总KV Cache大小(含TP+PP)
        vllm_config:       vLLM全局配置
        pp_rank:           流水线并行rank
    """
    # 模型总层数，当前PP rank的负责的起始模型层，结束模型层
    # 以Qwen2.5 7B模型为例，模型总28层
    # 如果PP=2, 则PP Rank = 0: [0,  14)
    #            PP Rank = 1: [14, 28)
    layer_num, start_layer, end_layer = get_layer_info_with_pp_rank(
                                            vllm_config.model_config,
                                            vllm_config.parallel_config, 
                                            pp_rank,
                                        )
    tp_num = vllm_config.parallel_config.tensor_parallel_size if \
                not vllm_config.model_config.use_mla else 1
    
    stub.PrefetchPrefixCache(cache_block_size, 
                            seq_hash_id_list, 
                            start_layer, 
                            end_layer,
                            layer_num, 
                            cache_block_size // layer_num, 
                            tp_num
                        )


def prefetch_prefix_cache_distributed(seq_hash_list: SeqHashList,
                                    cache_block_size: int, 
                                    vllm_config: VllmConfig
                                ) -> None:
    """将prefetch_prefix_cache_task任务和参数添加到队列中
    #! 同步方法，会阻塞等待预取完成

    Args:
        prefetch_seq_hash_id_list 需要预取的BlockHashId列表
        cache_block_size (int):   需要预取的整个block_size的所有TP/PP KV Cache大小(字节)
        vllm_config (VllmConfig): vLLM全局配置
    """

    #! 如果为空直接跳过
    hash_count = 0
    for hash_ids in seq_hash_list:
        hash_count += len(hash_ids)
    
    if hash_count == 0:
        logger.info("no prefetch in this step")
        return
    

    # 确保正确初始化
    # TODO(yangxianpku: 2025.08.29): 这里IP和device_type是否传入正确？
    # 是否应该根据RANK ID 确定？
    # RPC IP根据环境变量动态配置
    if len(g_stubs) == 0:
        g_stubs.append(
                (KVCacheSDK.KVCacheSDK(f"0.0.0.0:{grpc_port}", 
                device_type == "cuda"), 0)
            )


    # 统一预取
    g_prefetch_task_queue.start_worker()
    assert  cache_block_size % 4096 == 0
    #! 已经对齐过了
    # aligned_cache_block_size = align_to_4k(cache_block_size)  

    # TODO(yangxianpku: 2025.08.29): 这里的rank是pp_rank
    for sdk, rank in g_stubs:
        # 往任务队列中添加任务(函数 + 参数)
        g_prefetch_task_queue.enqueue(prefetch_prefix_cache_task, 
                                    sdk, 
                                    seq_hash_list,
                                    cache_block_size, 
                                    vllm_config, 
                                    rank
                                )
    g_prefetch_task_queue.join()    # 阻塞等队列中的任务完成处理





def prefetch_when_add_seq(
    seq_group: SequenceGroup,
    vllm_config: VllmConfig,
) -> None:
    # close it since currently its not fully functional
    return




def check_if_new_process(vllm_config: VllmConfig, cache_block_size: int) -> None:
    """初始化全局变量g_stub/g_workertool/g_task_queue，创建对应大小的共享内存文件, 启动任务队列

    Args:
        vllm_config:      vLLM全局配置
        cache_block_size: 单个block的总KV Cache大小(单TP, 单PP)
    """
    logger.info("refresh global for worker, "
                f"deviceType:{device_type}, "
                f"deviceId:{get_current_device()}, "
                f"memSize:{cache_block_size}")
    
    global g_stub
    global g_workertool
    global g_task_queue

    # TODO(yangxianpku:2025.09.01): 更改服务端IP地址
    g_stub = KVCacheSDK.KVCacheSDK(
                                f"0.0.0.0:{grpc_port}", 
                                device_type == "cuda"
                            )
    logger.debug(f"refresh kvcache sdk")

    g_workertool = WorkerTool.WorkerTool(device_map[device_type])
    logger.debug(f"refresh worker tool")

    g_task_queue = TaskQueue()
    logger.debug("refresh global for worker end, "
                f"deviceType:{device_type}, "
                f"deviceId:{get_current_device()}")
    
    num_layers, start_layer, end_layer, = get_layer_info(vllm_config.model_config, 
                                                        vllm_config.parallel_config
                                                    )
    
    # 计算整个模型的单块KV Cache大小(包含所有的TP和PP)
    if vllm_config.model_config.use_mla:
        cache_block_size = cache_block_size * num_layers // (end_layer - start_layer)
    else:
        cache_block_size = cache_block_size * vllm_config.parallel_config.\
            tensor_parallel_size * num_layers // (end_layer - start_layer)
        
    g_task_queue.start_worker()
    lock_memory_pool(cache_block_size)




def init_new_sdk_for_engine() -> None:
    global g_engine_sdk
    if g_engine_sdk is None:
        # TODO(yangxianpku:2025.09.01): 更改服务端IP地址
        g_engine_sdk = RPCStub(
                            f"0.0.0.0:{grpc_port}", 
                            device_type == "cuda",
                        )


def get_new_sdk_for_engine() -> RPCStub:
    global g_engine_sdk
    if g_engine_sdk is None:
        # TODO(yangxianpku:2025.09.01): 更改服务端IP地址
        g_engine_sdk = RPCStub(
                            f"0.0.0.0:{grpc_port}", 
                            device_type == "cuda",
                        )
    return g_engine_sdk


# def swap_segments(segment_dict:Dict[int, KVCacheSDK.Segment],
#                 kv_caches: List[torch.Tensor], direction: int,
#                 vllm_config: VllmConfig,
#             ) -> None:
#     """_summary_

#     Args:
#         segment_dict: [block_id, Segment]
#         kv_cache:     每一层的KV Cache Tensor张量(List表示层数)
#         direction:    读写方向: 
#                         - 0 : DEVICE_TO_HOST, 
#                         - 1 : HOST_TO_DEVICE, 
#                         - 2 : DEVICE_TO_DEVICE,
#                         - 3 : HOST_TO_HOST,
#         vllm_config:  vLLM全局配置
#     """
#     # segments: List[KVCacheSDK.Segment] = []
#     # kv_cache_idxs: List[int] = []
#     # for kv_cache_idx, segment in segment_dict.items():
#     #     segments.append(segment)
#     #     kv_cache_idxs.append(kv_cache_idx)
#     # # TODO(yangxianpku:2025.09.01): 重写上述实现
#     kv_cache_idxs: List[int]           = list(segment_dict.keys())
#     segments: List[KVCacheSDK.Segment] = list(segment_dict.values())


#     num_layer    = len(kv_caches)
#     device_cache = []
#     for idx in range(0, num_layer):
#         # 将tensor的地址整数转换为void*类型指针
#         kv_cache_ptr          = ctypes.c_void_p(kv_caches[idx].data_ptr())
#         #! 创建一个 Capsule 对象，用于包装 C 指针并在 Python 中安全传递
#         kv_cache_ptr_capsule  = ctypes.pythonapi.PyCapsule_New(
#                         kv_cache_ptr,  # C void*指针
#                         b'c_void_p',   # Capsule 的类型名称/标识符
#                         None           # 释放Capsule时的清理函数，
#                                        # None表示不进行自动内存管理，
#                                        # 由原始torch.Tensor负责
#                     )
#         device_cache.append(kv_cache_ptr_capsule)

#     # TODO(yangxianpku:2025.09.01): List对象没有shape
#     kv_shape = kv_caches[0].shape

#     #! vllm KV Cache Shape: 
#     #   - MLA:   (num_blocks, block_size, head_size) 
#     #   - Other: (2, num_blocks, block_size, num_kv_heads, head_size)
#     use_mla        = vllm_config.model_config.use_mla
#     num_blocks_dim = 0 if use_mla else 1
#     num_blocks     = kv_shape[num_blocks_dim]


#     block_size   = vllm_config.cache_config.block_size
#     num_kv_heads = vllm_config.model_config.get_total_num_kv_heads()
#     head_size    = vllm_config.model_config.get_head_size()
#     element_size = kv_caches[0].element_size()
#     has_value    = not use_mla

#     g_workertool.swap(segments, 
#                     device_cache, 
#                     kv_cache_idxs, 
#                     direction, 
#                     num_layer, 
#                     num_blocks, 
#                     block_size, 
#                     num_kv_heads,
#                     head_size, 
#                     element_size, 
#                     has_value
#                 )




def align_to_4k(size: int) -> int:
    # 将size对齐到4k大小
    return (size + 4095) // 4096 * 4096




def get_layer_info(
        model_config: ModelConfig,
        parallel_config: "ParallelConfig"
    ) -> Tuple[int, int, int]:
    """获取指定pp_rank下对应的模型起始和结束层编号信息(自动计算pp_rank)

    Args:
        model_config (ModelConfig):       模型配置
        parallel_config (ParallelConfig): 并行配置

    Returns:
        Tuple[int, int, int]: 返回模型层数、pp_rank对应的start, end 层编号
    """
    if model_config.hf_text_config.model_type == "deepseek_mtp":
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                        "num_nextn_predict_layers", 
                                        0
                                    )
    else:
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                            "num_hidden_layers", 0)
    pp_rank = parallel_config.rank // parallel_config.tensor_parallel_size
    pp_size = parallel_config.pipeline_parallel_size

    start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)

    return total_num_hidden_layers, start, end




def get_layer_info_with_pp_rank(model_config: ModelConfig,
                                parallel_config: "ParallelConfig",
                                pp_rank: int,
                            ) -> Tuple[int, int, int]:
    """获取指定pp_rank下对应的模型起始和结束层编号信息(自动计算pp_rank)

    Args:
        model_config (ModelConfig):       模型配置
        parallel_config (ParallelConfig): 并行配置
        pp_rank: int                      流水线并行rank

    Returns:
        Tuple[int, int, int]: 返回隐层数、pp_rank对应的start, end 层编号
    """
    # 获取模型的总层数
    if model_config.hf_text_config.model_type == "deepseek_mtp":
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                            "num_nextn_predict_layers", 0)
    else:
        total_num_hidden_layers = getattr(model_config.hf_text_config,
                                            "num_hidden_layers", 0)
    pp_size = parallel_config.pipeline_parallel_size
    start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
    return total_num_hidden_layers, start, end




def compute_verify_hash(is_first_block: bool,
                        prev_block_hash: Optional[int],
                        cur_block_token_ids: List[int],
                        extra_hash: Optional[int] = None
                    ) -> bytes:
    """计算备用哈希， 取前块哈希的一部分值与当前块token_ids拼接

    Args:
        is_first_block (bool): 是否是第一个快
        prev_block_hash (Optional[int]): 前一个块的海西
        cur_block_token_ids (List[int]): 当前块token_ids
        extra_hash (Optional[int], optional): 额外哈希

    Returns:
        bytes: 返回计算的备用哈希
    """
    if is_first_block or prev_block_hash is None:
        # OD_kvcache make hash stable
        dist_tuple = [0, *cur_block_token_ids]  #! 前面添加一个0
    else:
        dist_tuple = [int.from_bytes(
                                    prev_block_hash[:7], 
                                    byteorder=sys.byteorder
                                ), 
                        *cur_block_token_ids
                    ]
    dtype = "<i8" if sys.byteorder == "little" else ">i8"
    arr        = np.array(dist_tuple, dtype=dtype)
    arr_bytes  = arr.tobytes()
    hash_value = mmh3.mmh3_x64_128_digest(arr_bytes) # bytes
    return hash_value




def compute_local_hash(cur_block_token_ids: list[int]) -> int:
    # TODO[Done] (yangxianpku, 2025.08.28): 使用array替代原始实现
    """使用xxhash计算当前块的哈希(local_hash)

    Args:
        cur_block_token_ids list[int]: 当前块的token_ids

    Returns:
        int: 返回哈希的int表达
    """
    arr_bytes = array.array('I', cur_block_token_ids).tobytes()
    return xxhash.xxh3_64_intdigest(arr_bytes, seed=1337)




def get_block_hash_ids(vllm_config: VllmConfig,
                    seq_token_id_list: List[int],
                    seq_block_hash_list: List[int],
                    start_idx: int,
                    end_idx: int = -1,
                ) -> Tuple[List[KVCacheSDK.BlockHashId], 
                        Dict[str, List[int]]]:
    """获取序列的块哈希信息，其中：
        - hashId 来自vLLM本身的计算(本质是个前缀哈希);
        - localHash调用自己的函数实现计算;
        - verifyHash也调用自己的实现逻辑进行计算;

    Args:
        vllm_config (VllmConfig):        vllm 整体配置
        seq_token_id_list (List[int]):   序列的所有token ids
        seq_block_hash_list (List[int]): 该序列在vLLM的BlockSpaceManager中占用的blocks的hash
        start_idx (int):                 BlockSpaceManager的起始块位置
        end_idx (int, optional):         BlockSpaceManager的结束块位置. 默认-1表示到所有占据块的末尾.

    Returns:
        Tuple[List[KVCacheSDK.BlockHashId], Dict[str, List[int]]]: 

        返回结构: 
            1. BlockHashId 列表
            2. {
                HASH_ID: [],
                PARENT_HASH_ID: [],
                LOCAL_HASH_ID: [],
                VERIFY_HASH_ID: [],
            }
    """
    # adapt V1 partial req
    if end_idx == -1:
        end_idx = len(seq_block_hash_list)        # 直接从start到末尾

    # cal external hash
    block_hash_list = seq_block_hash_list[start_idx:end_idx]

    prefetch_block_num = len(block_hash_list)     # 需要预取的块数量
    if prefetch_block_num == 0:
        return [], {
            HASH_ID: [],
            PARENT_HASH_ID: [],
            LOCAL_HASH_ID: [],
            VERIFY_HASH_ID: [],
        }

    # cal parent hash:
    parent_block_hash_list: List[int]
    if start_idx == 0:
        parent_block_hash_list = [0] + block_hash_id_list[: -1]
    else:
        parent_block_hash_list = seq_block_hash_list[start_idx - 1: end_idx - 1]
    
    block_size = vllm_config.cache_config.block_size   # KV Cache块大小

    local_hash_list: List[int] = []
    verify_hash_list: List[int]         = []
    # TODO(yangxianpku: 2025.08.28): chunk_size保持与block_size一致，是否合理？
    for idx in range(start_idx, start_idx + prefetch_block_num):
        local_hash_list.append(
                compute_local_hash(
                    seq_token_id_list[idx * block_size: (idx + 1) * block_size]
                )
        )
    
    # TODO(yangxianpku: 2025.08.28): 移动至envs.py中管理
    if os.getenv("USE_VERIFY_HASH", "False").lower() in ("1", "true"):
        for idx in range(start_idx, start_idx + prefetch_block_num):
            verify_hash_list.append(
                # TODO(yangxianpku: 2025.08.28): 为什么全部当做首块处理？？？
                compute_verify_hash(True, # is_first_block
                                    None, # prev_block_hash
                                    # cur_block_token_ids
                                    seq_token_id_list[idx * block_size: (idx + 1) * block_size],
                                    None # extra_hash
                                )
            )
    else:
        for _ in range(start_idx, start_idx + prefetch_block_num):
            verify_hash_list.append(None)

    prefetch_hash_dict = {
        HASH_ID:         block_hash_list,
        PARENT_HASH_ID:  parent_block_hash_list,
        LOCAL_HASH_ID:   local_hash_list,
        VERIFY_HASH_ID:  verify_hash_list,
    }

    block_hash_id_list = []

    for idx in range(0, prefetch_block_num):
        block_hash_id_list.append(
            KVCacheSDK.BlockHashId(
                block_hash_list[idx],
                parent_block_hash_list[idx],
                local_hash_list[idx],
                verify_hash_list[idx]
            )
        )

    return block_hash_id_list, prefetch_hash_dict
        




def prepare_recompute_ids(vllm_config: VllmConfig, block_manager: "BlockSpaceManager",
                        seq_group_metadata_list: Optional[List[SequenceGroupMetadata]],
                    ) -> Tuple[
                                Dict[str, Dict[int, List[int]]], 
                                List[List[KVCacheSDK.BlockHashId]], 
                                bool
                            ]:
    """准备多个请求(多个seq_group)需要重计算的token的哈希信息, 过滤掉prefix caching已经计算了的tokens

    Args:
        vllm_config (VllmConfig): vLLM全局配置
        seq_group_metadata_list (Optional[List[SequenceGroupMetadata]]): 序列组元数据列表
        block_manager (BlockSpaceManager): vLLM BlockSpaceManager
        cache_block_size (int): vLLM BlockSpaceManager 块大小

    Returns:
        Tuple[ 
            Dict[str, Dict[int, List[int]]]:
                {
                    HASH_ID:        {},
                    PARENT_HASH_ID: {},
                    LOCAL_HASH_ID:  {},
                    VERIFY_HASH_ID: {},
                }
            List[List[KVCacheSDK.BlockHashId]]:
                seq_group_block_hash_list group 中的每个sequence一个list
            bool                                需要重计算的token数量是否小于阈值
        ]
    """
    seq_group_block_hash_dict = {
                                    HASH_ID:        {},
                                    PARENT_HASH_ID: {},
                                    LOCAL_HASH_ID:  {},
                                    VERIFY_HASH_ID: {},
                                }
    seq_group_block_hash_list = []   # 序列的block 哈希列表
    logger.debug("prepare_recompute_ids start")

    token_num = 0
    #! 如果没有开启前缀缓存，则直接返回空
    # TODO(yangxianpku: 2025.08.28)：需要支持不启用enable_prefix_caching的场景
    if not vllm_config.cache_config.enable_prefix_caching:
        logger.warning("Prefix caching is not enabled! "
                    "Please check configuration.")
        return seq_group_block_hash_dict, \
            seq_group_block_hash_list,    \
            token_num <= g_skip_prefetch_threshold
    

    for seq_group_metadata in seq_group_metadata_list:         # 遍历每一个序列组(请求)
        #! 单个请求只能唯一处于prefill或decode阶段
        for seq_id in seq_group_metadata.seq_data.keys():      # 该序列组中所有序列的seq_id
            #! 序列在vLLM的BlockSpaceManager中占用的blocks的hash(只有满块才会计算哈希)
            # /home/yangxianpku/.local/lib/python3.10/site-packages/vllm/core/block/prefix_caching_block.py
            # #993: self._seq_id_to_blocks_hashes: Dict[int, List[int]] = {}
            seq_block_hash_list = block_manager.\
                                    _computed_blocks_tracker.\
                                    _seq_id_to_blocks_hashes.get(seq_id, [])
            
            # 获取特定序列的prompt+output token ids
            seq_token_ids = seq_group_metadata.seq_data[seq_id].get_token_ids()
            if seq_group_metadata.is_prompt:                   #! 如果该请求处于prefill阶段
                # 已经计算了的块数量(在前缀缓存中使用)
                start_block_idx = len(seq_group_metadata.computed_block_nums)
                logger.debug(f"seq_id:{seq_id}, "
                            f"total_block_nums:{seq_block_hash_list}, "
                            f"computed_block_nums:{start_block_idx}"
                        )
                #! 如果全部在HBM的前缀缓存中
                if start_block_idx == len(seq_block_hash_list):
                    continue

                # 1. seq_block_hash_list: 该序列seq_id的BlockHashId列表
                # 2. seq_block_hash_dict: {
                #     HASH_ID:        [],
                #     PARENT_HASH_ID: [],
                #     LOCAL_HASH_ID:  [],
                #     VERIFY_HASH_ID: [],
                # }
                seq_block_hash_list, seq_block_hash_dict =\
                                        get_block_hash_ids(vllm_config, 
                                                        seq_token_ids,
                                                        seq_block_hash_list, 
                                                        start_block_idx
                                                    )
                
                seq_group_block_hash_list.append(seq_block_hash_list)
                seq_group_block_hash_dict[HASH_ID][seq_id]        = seq_block_hash_dict[HASH_ID]
                seq_group_block_hash_dict[PARENT_HASH_ID][seq_id] = seq_block_hash_dict[PARENT_HASH_ID]
                seq_group_block_hash_dict[LOCAL_HASH_ID][seq_id]  = seq_block_hash_dict[LOCAL_HASH_ID]
                seq_group_block_hash_dict[VERIFY_HASH_ID][seq_id] = seq_block_hash_dict[VERIFY_HASH_ID]
                token_num += len(seq_block_hash_list) * vllm_config.cache_config.block_size
            else:                                             #! 如果该请求处于decode阶段
                logger.debug(f"decode, not prompt")
                #! 如果凑满整数
                # 已经计算完hash的块数等于该序列组占据的块数
                # block_tables: dict[int, list[int]] ==> { seq_id, phy_block_num }
                if len(seq_block_hash_list) == len(seq_group_metadata.block_tables[seq_id]):

                    # 最后的这个块是新生成的
                    decode_start_idx = len(seq_block_hash_list) - 1
                    _, seq_block_hash_dict = get_block_hash_ids(vllm_config, 
                                                                seq_token_ids,
                                                                seq_block_hash_list, 
                                                                decode_start_idx    # 设置其实的位置
                                                            )
                    # TODO(yangxaianpku: 2025.08.28): 这里为什么没有append(seq_block_hash_list)
                    seq_group_block_hash_dict[HASH_ID][seq_id]        = seq_block_hash_dict[HASH_ID]
                    seq_group_block_hash_dict[PARENT_HASH_ID][seq_id] = seq_block_hash_dict[PARENT_HASH_ID]
                    seq_group_block_hash_dict[LOCAL_HASH_ID][seq_id]  = seq_block_hash_dict[LOCAL_HASH_ID]
                    seq_group_block_hash_dict[VERIFY_HASH_ID][seq_id] = seq_block_hash_dict[VERIFY_HASH_ID]

    logger.debug("prepare_recompute_ids-->seq_group_block_hash_list: %s", 
                str(seq_group_block_hash_list))
    logger.debug("prepare_recompute_ids end")

    logger.debug(f"token_num:{token_num}, "
                f"g_skip_prefetch_threshold:{g_skip_prefetch_threshold},"
                f"equal:{token_num < g_skip_prefetch_threshold}")
    
    logger.debug("prepare_recompute_ids: "
                f"{seq_group_block_hash_dict}, "
                f"{seq_group_block_hash_list}")
    
    return seq_group_block_hash_dict,  seq_group_block_hash_list,  token_num <= g_skip_prefetch_threshold




#! 这里类型注解有问题，应该是List才对
def cal_recompute_ids_and_prefetch(seq_group_metadata_list: Optional[List[SequenceGroupMetadata]],
# def cal_recompute_ids_and_prefetch(seq_group_metadata_list: Optional[SequenceGroupMetadata],
    engine, vllm_config: VllmConfig, virtual_engine: int) -> Dict[str, Dict[int, List[int]]]:
    """_summary_

    Args:
        seq_group_metadata_list (Optional[SequenceGroupMetadata]): _description_
        engine (_type_):          _description_
        vllm_config (VllmConfig): _description_
        virtual_engine (int):     _description_

    Returns:
        Dict[str, Dict[int, List[int]]]: _description_
    """
    #! 单个block(默认block_size=16)所有层的KV Cache的字节数(已考虑MLA, 并考虑TP & PP)
    # 每个PP分尽可能均分层数
    # TP主要均分头数
    cache_block_size = CacheEngine.get_cache_block_size(vllm_config.cache_config, 
                                                        vllm_config.model_config,
                                                        vllm_config.parallel_config
                                                    )
    #! 模型总层数，该实例存储的KV Cache的[起始层, 结束层)
    # pp_size = 1 时， 存储模型的所有层；layer_num 总的模型层数
    layer_num, start_layer, end_layer = get_layer_info(vllm_config.model_config, 
                                                       vllm_config.parallel_config
                                                    )
    # 计算整个模型的单块KV Cache大小(包含所有的TP和PP)
    if vllm_config.model_config.use_mla:
        cache_block_size = cache_block_size * layer_num // (end_layer - start_layer)
    else:
        cache_block_size = cache_block_size * vllm_config.parallel_config.\
            tensor_parallel_size * layer_num  // (end_layer - start_layer)

    #! 这里不正确，给与删除， 直接使用上述作为cache_block_size作为单个block的KV Cache大小
    # single_layer_cache_size = cache_block_size // (end_layer - start_layer)

    # full_cache_size = single_layer_cache_size * layer_num

    seq_group_block_hash_id_dict, seq_group_block_hash_list, should_skip_prefetch = \
        prepare_recompute_ids(vllm_config, 
                            engine.scheduler[virtual_engine].block_manager, 
                            seq_group_metadata_list
                        )
    logger.debug(f"seq_group_block_hash_id_dict {seq_group_block_hash_id_dict}")

    full_cache_size_aligned = align_to_4k(cache_block_size)
    is_prompt               = False if len(seq_group_metadata_list) == 0 \
                                else seq_group_metadata_list[0].is_prompt

    logger.debug(f"is_prompt:{is_prompt}, should_skip_prefetch:{should_skip_prefetch}")


    #! 如果处于prefill阶段且不跳过
    # decode阶段是不需要预取的
    if is_prompt and not should_skip_prefetch:
        logger.debug(f"The step is prompt.")
        # 向任务队列中添加预取任务
        prefetch_prefix_cache_distributed(seq_group_block_hash_list, 
                                        full_cache_size_aligned, 
                                        vllm_config
                                    )
    return seq_group_block_hash_id_dict




def slayer_write(worker_tool: WorkerTool.WorkerTool,
                kv_cache: torch.Tensor, 
                num_blocks: int, 
                event = None
            ) -> None:
    """单层KV Cache写入到Sgement中

    Args:
        worker_tool: C++侧调用端工具
        kv_cache:    单层KV Cache原始张量
        num_blocks:  PagedAttn KV Cache块数量
        event:       cuda, acl Event
    """
    if event is not None:
        worker_tool.singleLayerWrite(ctypes.pythonapi.PyCapsule_New(kv_cache.data_ptr(), 
                                                                    b'c_void_p', 
                                                                    None),
                                    num_blocks,
                                    ctypes.pythonapi.PyCapsule_New(event, 
                                                                b'c_void_p', 
                                                                None)
                                                            )
    else:
        worker_tool.singleLayerWrite(ctypes.pythonapi.PyCapsule_New(kv_cache.data_ptr(), 
                                                                    b'c_void_p', 
                                                                    None
                                                                ),
                                    num_blocks,
                                    None
                                )



def slayer_write_async(kv_cache: torch.Tensor, event = None) -> None:
    kv_cache_shape = kv_cache.shape
    if kv_cache_shape[0] == 2:
        num_blocks = kv_cache_shape[1]   # Otherwise
    else:
        num_blocks = kv_cache_shape[0]   # MLA

    g_task_queue.enqueue(slayer_write,
                        g_workertool, 
                        kv_cache,
                        num_blocks, 
                        event
                    )




def get_current_device() -> int:
    """获取当前的设备ID"""
    if device_type == "npu":
        device_id = torch.npu.current_device()
    elif device_type == "cuda":
        device_id = torch.cuda.current_device()
    else:
        device_id = -1
    return device_id




def call_layer_level_cache_fill(
        kv_caches: List[torch.Tensor],
        to_read_segment_dict: Dict[int, KVCacheSDK.Segment],
        to_save_segment_dict: Dict[int, KVCacheSDK.Segment],
        vllm_config: VllmConfig,
    ) -> None:
    """将_layerBufs中的数据都Load到vLLM的KV Cache中

    Args:
        kv_caches: 多层KV Cache Tensor
        to_read_segment_dict: _description_
        to_save_segment_dict: _description_
        vllm_config: _description_
    """
    _, start_layer, _ = get_layer_info(vllm_config.model_config, 
                                    vllm_config.parallel_config
                                )
    
    #! vllm KV Cache Shape: 
    #   - MLA:   (num_blocks, block_size, head_size) 
    #   - Other: (2, num_blocks, block_size, num_kv_heads, head_size)
    kv_shape     = kv_caches[0].shape # [2, 2296, 64, 8, 128]
    use_mla      = 1 if vllm_config.model_config.use_mla else 0

    num_blocks   = kv_shape[1 - use_mla]
    element_size = kv_caches[0].element_size()
    block_size   = vllm_config.cache_config.block_size
    head_size    = vllm_config.model_config.get_head_size()
    num_head     = vllm_config.model_config.get_total_num_kv_heads()
    if use_mla:
        num_head = 1
    
    has_value    = not use_mla
    tp_rank      = get_tensor_model_parallel_rank() if has_value else 0
    tp_num       = vllm_config.parallel_config.tensor_parallel_size if has_value else 1

    # call swap init
    # TODO(yangxianpku：2025.09.01)：WorkerTool.cpp中未实现该方法
    g_workertool.setDeviceId(get_current_device())
    # TODO(yangxianpku：2025.09.01)：这个东西貌似不用每次调用都更新一次
    g_workertool.updateLayerBufInfo(to_read_segment_dict, 
                                    to_save_segment_dict, 
                                    start_layer, 
                                    block_size, 
                                    num_head,
                                    head_size, 
                                    element_size, 
                                    has_value, 
                                    tp_rank, 
                                    tp_num
                                )
    # TODO(yangxianpku：2025.09.01)：这里int类型指针未封装为ctypes.c_void_p类型
    kv_cache_c_ptrs = [ctypes.pythonapi.PyCapsule_New(cache.data_ptr(), 
                                                    b'c_void_p', 
                                                    None
                                                ) for cache in kv_caches]
    
    # 将_layerBufs中的数据都Load到vLLM的KV Cache中
    g_workertool.startLayerFill(kv_cache_c_ptrs, num_blocks)
    g_workertool.waitLayerFillFinish() # 等待所有层处理完成




def wait_layer_fill_finish() -> None:
    g_workertool.waitLayerFillFinish() # 等待下一层读到GPU显存中，forward中调用




def allocate_memory(cache_block_size: int,
                    hash_ids: List[int],
                    write_record_size: int,
                    start_layer: int,
                    end_layer: int,
                    layer_num: int,
                ) -> List[KVCacheSDK.Segment]:
    """分块块哈希信息分配指定数量的Segment

    Args:
        cache_block_size:     内存块大小
        hash_ids (List[int]): 待分配的哈希ID列表
        write_record_size:    张量并行大小
        start_layer (int):    当前Segment起始层
        end_layer (int):      当前Segment结束层
        layer_num (int):      模型总层数

    Returns:
        List[KVCacheSDK.Segment]: 分配好的Segments
    """
    if len(hash_ids) == 0:
        return []
    
    cache_block_size = align_to_4k(cache_block_size)
    segments = g_stub.AllocateMemory(cache_block_size, 
                                hash_ids,
                                write_record_size, 
                                start_layer,
                                end_layer, 
                                layer_num, 
                                cache_block_size // layer_num
                            )
    return segments



# TODO(yangxianpku: 2025.08.21): BlockHashId无该字段，考虑删除
# def get_external_hash_dict(seq_group_block_hash_id_dict: Dict[int, HashList],
#                            # seq_id, seq对应的哈希ID
#                         ) -> Dict[int, List[int]]:
#     external_hash_dict = {}
#     for idx, block_hash_ids in seq_group_block_hash_id_dict.items():
#         external_hash_dict[idx] = [block_hash.externalHashId 
#                                 for block_hash in block_hash_ids]
#     return external_hash_dict




def batch_query_prefix_cache(hash_id_list: List[List[int]]
                            ) -> List[List[KVCacheSDK.Segment]]:
    if g_stub is not None:  #! in worker process
        return g_stub.BatchQueryPrefixCache(hash_id_list)
    else:                   #! in engine process
        if len(g_stubs) == 0:
            g_stubs.append((KVCacheSDK.KVCacheSDK(f"0.0.0.0:{grpc_port}",
                                                device_type == "cuda"), 
                            0
                        )
                    )
        # TODO(yangxianpku:2025.09.02): 有那么多stubs，什么仅仅使用[0][0]号？
        return g_stubs[0][0].BatchQueryPrefixCache(hash_id_list)




def get_prefetch_and_update_model_input(
        # 模型执行请求,LLM将会为每个批请求创建一个该对象
        # 在某一个迭代步骤中，所有需要推理的数据的集合
        execute_model_req: Optional[ExecuteModelRequest],
        vllm_config: VllmConfig) -> Tuple[Dict[int, int], Dict[int, int]]:
    """根据模型执行请求，分析需要读取和存储的块信息

    Args:
        execute_model_req : 模型执行请求
        vllm_config:        vLLM全局请求

    Returns:
        Tuple[
            Dict[int, int], to_read_block_dict P阶段需要读取的块: {hash_id : vLLM Paged Block Id}
            Dict[int, int]  to_save_block_dict P/D阶段需要存储的块: {hash_id : vLLM Paged Block Id}
        ]: 
    """
    # 拿到的每个seq对应的需要重算的block的hash_id
    # seq_group_block_hash_id_dict = {
    #     "hash_id": {},       
    #     "parent_hash_id": {},
    #     "local_hash_id":  {},
    #     "verify_hash_id": {},
    # }
    # dict[str, dict[int, list[int]]]: 内层dict的int key表示seq_id
    seq_block_hash_map = execute_model_req.seq_group_block_hash_id_dict
    seq_hash_map       = seq_block_hash_map[HASH_ID]  # dict[int, list[int]]


    # 要写数据的Dict[kv_cache_idx, Segment]
    to_read_block_dict: Dict[int, int] = {}
    to_save_block_dict: Dict[int, int] = {}

    seq_id_list   = list(seq_hash_map.keys())    # 所有序列ID: List[int]
    seq_hash_list = list(seq_hash_map.values())  # 素有序列的哈希列表: List[List[int]]

    # List[List[KVCacheSDK.Segment]]
    query_seq_segments_list = batch_query_prefix_cache(seq_hash_list)

    # {seq_id : List[KVCacheSDK.Segment], ... }
    query_seq_segments_dict = {seq_id:segments for seq_id, segments 
                        in zip(seq_id_list, query_seq_segments_list)}

    # 实际prefetch token小于等于1k跳过fill逻辑, [hashId]
    # 每个seq_group 检索命中的物理块ID
    to_extend_hash_id_list: List[List[int]] = []
    
    #! seq_group_metadata_list: List[SequenceGroupMetadata]
    #! 1. 遍历每个seq_group_metadata, 即每个请求
    for seq_group_metadata in execute_model_req.seq_group_metadata_list:
        if seq_group_metadata.is_prompt is True:  #! prefill

            #! 2. 遍历每个请求的每个序列
            # seq_data: seq_id ==> SequenceData
            for seq_id in seq_group_metadata.seq_data.keys():
                # list[int] 本seq的有序prefetch的block对应hash_id
                prefetch_hash_ids = seq_hash_map.get(seq_id, [])

                # 获取的segments，因可能拿不全所以变换为dict调用
                segment_list = query_seq_segments_dict.get(seq_id, [])
                segment_dict = {segment.getSegmentId() : segment for segment in segment_list}

                # 该seq_id占用的vLLM phy块ID: List[int]
                seq_block_table = seq_group_metadata.block_tables[seq_id]

                # seq开始prefetch的block_idx(前缀缓存已经计算过的块数量)
                start_block_idx = len(seq_group_metadata.computed_block_nums)


                # Prefill阶段最后预取到的block_id， 该id后续的都需要重计算并行要存储
                # 先假定位最后一个块
                miss_block_idx = start_block_idx + len(prefetch_hash_ids)

                #! 3. 遍历每个序列的每个哈希块
                for prefetch_idx in range(0, len(prefetch_hash_ids)):
                    # 对每个hash_id尝试获取
                    kv_cache_idx = seq_block_table[start_block_idx + prefetch_idx]
                    hash_id = prefetch_hash_ids[prefetch_idx]

                    if hash_id not in segment_dict.keys():
                        # segment没拿到，认为prefetch结束，剩下的都是本次生成要存储的，跳出
                        miss_block_idx = start_block_idx + prefetch_idx

                        # 循环向save_dict填充{hash_id，kvcache_id}
                        for to_save_idx in range(prefetch_idx, len(prefetch_hash_ids)):
                            to_save_block_dict[prefetch_hash_ids[to_save_idx]] = \
                                seq_block_table[start_block_idx + to_save_idx]
                        break
                    else:
                        # Server端有对应的segment，写到对应的kvcache
                        to_read_block_dict[hash_id] = kv_cache_idx
                
                # computed_block_nums添加prefetch命中的blocks
                to_extend_hash_id_list.append(seq_block_table[start_block_idx : miss_block_idx])
        else:   #! decode
            for seq_id in seq_group_metadata.seq_data.keys():
                if seq_id not in seq_hash_map.keys():
                    continue
                to_save_block_dict[seq_hash_map[seq_id][0]] = seq_group_metadata.block_tables[seq_id][-1]

    # 判断是否跳过
    if len(to_read_block_dict) * vllm_config.cache_config.block_size <= g_skip_prefetch_threshold:
        logger.info("kvcache fill is skipped in this turn")
        to_read_block_dict = {}
    else:
        list_idx = 0
        for seq_group_metadata in execute_model_req.seq_group_metadata_list:
            if seq_group_metadata.is_prompt == True:
                seq_group_metadata.computed_block_nums.extend(to_extend_hash_id_list[list_idx])
                logger.debug("computed_block_nums: %s", str(seq_group_metadata.computed_block_nums))
                list_idx += 1

    logger.debug("to_save_block_dict: %s", str(to_save_block_dict))
    logger.debug("to_read_block_dict: %s", str(to_read_block_dict))
    logger.debug("get_prefetch_and_update_model_input end")
    return to_save_block_dict, to_read_block_dict




def get_read_write_block_dicts(
        kv_cache_block_size: int,
        vllm_config: VllmConfig,
        execute_model_req: Optional[ExecuteModelRequest],
        kv_cache: List[torch.Tensor],
    ) -> Tuple[Dict[int, int], Dict[int, int]]:
    """根据模型执行请求，分析需要读取和存储的块信息

    Args:
        execute_model_req : 模型执行请求
        vllm_config:        vLLM全局请求

    Returns:
        Tuple[
            Dict[int, int], to_read_block_dict P阶段需要读取的块: {hash_id : vLLM Paged Block Id}
            Dict[int, int]  to_save_block_dict P/D阶段需要存储的块: {hash_id : vLLM Paged Block Id}
        ]: 
    """
    return get_prefetch_and_update_model_input(execute_model_req, 
                                            vllm_config)





def get_to_save_block_hash_list(seq_group_block_hash_id_dict: Union[
                                        Dict[str, Dict[int, List[int]]], 
                                        Dict[str, Dict[str, List[int]]]
                                ],
                                to_save_hash_ids: List[int],
                            ) -> HashList:
    """根据to_save_hash_ids中的哈希id查询seq_group_block_hash_id_dict中该哈希id的完整信息

    Args:
        seq_group_block_hash_id_dict : ExecuteModelRequest中的哈希ID信息
        to_save_hash_ids:              需要保存的hash_ids

    Returns:
        HashList: 
    """
    to_save_block_hash_list = []

    # seq_group_block_hash_id_dict = {
    #     "hash_id": {},       
    #     "parent_hash_id": {},
    #     "local_hash_id":  {},
    #     "verify_hash_id": {},
    # }
    # 内层dict的int key表示seq_id
    for seq_id in seq_group_block_hash_id_dict[HASH_ID].keys():
        # 该seq_id有多少个hash_ids
        for idx in range(0, len(seq_group_block_hash_id_dict[HASH_ID][seq_id])):
            hash_id = seq_group_block_hash_id_dict[HASH_ID][seq_id][idx]
            if hash_id in to_save_hash_ids:
                to_save_block_hash_list.append(
                    KVCacheSDK.BlockHashId(
                        seq_group_block_hash_id_dict[HASH_ID][seq_id][idx],
                        seq_group_block_hash_id_dict[PARENT_HASH_ID][seq_id][idx],
                        seq_group_block_hash_id_dict[LOCAL_HASH_ID][seq_id][idx],
                        seq_group_block_hash_id_dict[VERIFY_HASH_ID][seq_id][idx]
                    )
                )
    return to_save_block_hash_list




def query_allocate_prefill(kv_cache_block_size: int,
                        vllm_config: VllmConfig,
                        to_read_block_dict: Dict[int, int],
                        to_save_block_dict: Dict[int, int],
                        seq_group_block_hash_id_dict: Dict[str, Dict[int, List[int]]],
                        kv_caches: List[torch.Tensor],
                    ) -> None:
    """_summary_

    Args:
        kv_cache_block_size: vLLM Paged Attn块大小
        vllm_config:        vLLM全局配置
        to_read_block_dict: P阶段需要读取的块:   {hash_id : vLLM Paged Block Id}
        to_save_block_dict: P/D阶段需要存储的块: {hash_id : vLLM Paged Block Id}
        seq_group_block_hash_id_dict: ExecuteModelRequest中的哈希ID信息
        kv_caches:                    KV Cache数据
    """
    logger.debug(f"query allocate prefill start, pid:{os.getpid()}")
    num_layers, start_layer, end_layer, = get_layer_info(vllm_config.model_config, 
                                                        vllm_config.parallel_config)
    if vllm_config.model_config.use_mla:
        cache_block_size = kv_cache_block_size * num_layers // (end_layer - start_layer)
    else:
        cache_block_size = kv_cache_block_size * vllm_config.parallel_config.\
            tensor_parallel_size * num_layers // (end_layer - start_layer)

    # 内部会对齐，这里忽略
    # aligned_cache_block_size = align_to_4k(cache_block_size)
    to_save_hash_ids = list(to_save_block_dict.keys())          # List[int]

    # seq_group_block_hash_id_dict = {
    #     "hash_id": {},       
    #     "parent_hash_id": {},
    #     "local_hash_id":  {},
    #     "verify_hash_id": {},
    # }
    # 内层dict的int key表示seq_id
    seq_hash_dict    = seq_group_block_hash_id_dict[HASH_ID]

    # 需要保存的哈希列表: List[KVCacheSDK.BlockHashId]
    to_save_block_hash_list = get_to_save_block_hash_list(
                                        seq_group_block_hash_id_dict, 
                                        to_save_hash_ids
                                    )
    
    logger.debug(f"query and prefill, pid:{os.getpid()}")
    # query and prifill
    to_read_segment_dict = {} # vLLM Paged Phy Block ID ==> Segment
    seq_id_list       = list(seq_hash_dict.keys())    # seq_id: List[int]
    seq_hash_ids_list = list(seq_hash_dict.values())  # seq对应的hash: List[List[int]]

    # List[Segment]
    query_seq_segments_list = batch_query_prefix_cache(seq_hash_ids_list)
    query_seq_segments_dict = {seq_id:segments for seq_id, segments in 
                            zip(seq_id_list, query_seq_segments_list)}
    logger.debug(f"{query_seq_segments_dict=}")

    # 1. 计算需要读取的segment_dict信息
    for seq_id, _ in seq_group_block_hash_id_dict[HASH_ID].items():
        seq_segments = query_seq_segments_dict[seq_id]
        for seq_segment in seq_segments:
            if seq_segment.getSegmentId() in to_read_block_dict.keys():
                to_read_segment_dict[to_read_block_dict[seq_segment.getSegmentId()]] = seq_segment

    # 2. 计算需要保存的segment_dict信息
    logger.debug(f"{to_save_block_hash_list=}")
    tp_size = vllm_config.parallel_config.tensor_parallel_size if \
                not vllm_config.model_config.use_mla else 1
    to_save_segments = allocate_memory(cache_block_size, 
                                    to_save_block_hash_list,
                                    tp_size,
                                    start_layer, 
                                    end_layer, 
                                    num_layers
                                )
    logger.debug(f"after allocate memory, pid:{os.getpid()}")

    to_save_segment_dict = {}
    for to_save_segment in to_save_segments:
        to_save_segment_dict[to_save_block_dict[to_save_segment.getSegmentId()]] = to_save_segment
    
    logger.debug(f"call layer level cache fill, pid:{os.getpid()}, {to_read_segment_dict=}")
    call_layer_level_cache_fill(kv_caches, 
                            to_read_segment_dict,
                            to_save_segment_dict, 
                            vllm_config
                        )
    logger.debug(f"query allocate prefill end, pid:{os.getpid()}")
    return to_save_segment_dict




def all_layer_write_task(
    worker_tool: WorkerTool.WorkerTool,
    kv_cache_c_ptrs,
    num_blocks: int,
) -> None:
    worker_tool.allLayerWrite(kv_cache_c_ptrs, num_blocks)




def seal_memory(
    to_seal_block_list: List[int],
    tp_num: int,
) -> None:
    g_stub.sealMemory(to_seal_block_list, tp_num)




def seal_memory_task(
    kv_cache_sdk: KVCacheSDK.KVCacheSDK,
    to_seal_block_list: List[int],
    tp_num: int,
    do_save: bool
) -> None:
    kv_cache_sdk.sealMemory(to_seal_block_list, 
                            tp_num, 
                            do_save
                        )




def submit_seal_memory_task(
    vllm_config: VllmConfig,
    kv_cache_sdk: KVCacheSDK.KVCacheSDK,
    to_seal_block_list: List[int],
    tp_num: int
) -> None:
    if vllm_config.model_config.use_mla and g_worker_rank != 0:
        g_task_queue.enqueue(seal_memory_task, 
                            g_stub, 
                            to_seal_block_list, 
                            tp_num, 
                            False
                        )
    else:
        g_task_queue.enqueue(seal_memory_task, 
                            g_stub, 
                            to_seal_block_list, 
                            tp_num, 
                            True
                        )




def write_full_blocks(to_save_segment_dict: Dict[int, KVCacheSDK.Segment],
        kv_caches: List[torch.Tensor], vllm_config: VllmConfig,
        is_prefill: bool) -> None:
    """_summary_

    Args:
        to_save_segment_dict : _description_
        kv_caches:             _description_
        vllm_config:           _description_
        kv_cache_block_size:   _description_
        is_prefill:            _description_
    """
    logger.info(f"vllm_config.model_config.use_mla : "
                f"{vllm_config.model_config.use_mla} "
                f"g_worker_local_rank : "
                f"{g_worker_local_rank}"
            )
    # TODO(yangxianpku: 2025.09.02): 谁来写该值？？？
    # MLA的场景下仅由主work写即可
    if vllm_config.model_config.use_mla and g_worker_local_rank != 0:
        return

    if len(to_save_segment_dict) == 0:
        logger.debug("no write in this turn")
        return
    logger.debug("write_full_blocks start")


    to_seal_block_list:  List[int] = []
    to_write_block_dict: Dict[int, KVCacheSDK.Segment] = {}
    for phy_block_id, save_segment in to_save_segment_dict.items():
        if save_segment.getIsFilled() is True:
            continue

        to_seal_block_list.append(save_segment.getSegmentId())
        if not is_prefill:
            to_write_block_dict[phy_block_id] = save_segment

    if len(to_write_block_dict) > 0:
        logger.debug(f"{to_write_block_dict=}")
        kv_cache_shape = kv_caches[0].shape

        factor = 1 if vllm_config.model_config.use_mla else 0
        num_blocks = kv_cache_shape[1 - factor]

        kv_cache_c_ptrs = [ctypes.pythonapi.PyCapsule_New(cache.data_ptr(), 
                                                        b'c_void_p', 
                                                        None
                                                    ) for cache in kv_caches]
        g_task_queue.enqueue(all_layer_write_task, 
                            g_workertool, 
                            kv_cache_c_ptrs, 
                            num_blocks
                        )

    tp_num = vllm_config.parallel_config.tensor_parallel_size if \
                        not vllm_config.model_config.use_mla else 1
    submit_seal_memory_task(vllm_config, 
                            g_stub, 
                            to_seal_block_list, 
                            tp_num
                        )
    logger.debug("write_full_blocks end")




def lock_memory_pool(cache_block_size) -> None:
    """查找cache_block_size对应的共享内存文件，从服务端获取该内存大小对应对的共享内存文件，然后与客户端的对比，
    若服务端存在该大小对的共享内存文件，则直接返回。若不存在或文件名不同(文件名不同时则先unmap掉原来的)，这创建
    一个该大小的共享内存文件。

    Args:
        cache_block_size: 单block的共享内存大小(所有层，所有TP+PP)
    """
    cache_block_size = align_to_4k(cache_block_size)
    logger.debug(f"cache_block_size: {cache_block_size}")

    logger.debug(f"device: {device_map[device_type]}, "
                f"type:{type(device_map[device_type])}")


    g_stub.LockMemoryPool(cache_block_size, 
                        device_map[device_type]
                    )



#! 清理全局变量资源、清理workertool
def prefetch_utils_clean():
    global g_workertool, g_stub
    global g_task_queue, g_prefetch_task_queue

    # 清理g_workertool
    if g_workertool is not None:
        del g_workertool
        g_workertool = None

    if g_stub is not None:
        del g
        g_stub = None

    if g_task_queue is not None:
        logger.debug("cleaning g_task_queue")
        g_task_queue.stop()
        del g_task_queue
        g_task_queue = None
    
    if g_prefetch_task_queue is not None:
        logger.debug("cleaning g_prefetch_task_queue")
        g_prefetch_task_queue.stop()
        del g_prefetch_task_queue
        g_prefetch_task_queue = None

