import ctypes
import torch

from   typing import Dict, List, Tuple, Union

from vllm.logger                     import init_logger
from vllm.config                     import VllmConfig
from vllm.distributed.parallel_state import get_tensor_model_parallel_rank
from vllm.v1.core.kv_cache_utils     import KVCacheBlock
from vllm.v1.core.kv_cache_utils     import BlockHashType
from vllm.v1.core.sched.output       import NewRequestData
from vllm.v1.core.sched.output       import CachedRequestData
from vllm.v1.core.sched.output       import SchedulerOutput
from vllm.v1.request                 import Request
from vllm.v1.kv_cache_interface      import KVCacheGroupSpec
from vllm.v1.worker.gpu_input_batch  import InputBatch

from .                               import prefetch_utils
from .prefetch_utils                 import RPCStub
from .prefetch_utils                 import SeqHashList
from .prefetch_utils                 import HashList

# prefetch_utils 已经包含环境变量和路径，可以直接引入
import KVCacheSDK   

logger = init_logger(__name__)

g_onlyDecode = False


def get_cache_block_size(kv_cache_group_spec: KVCacheGroupSpec) -> int:
    """获取单个block_size指定模型层数包含的KV Cache大小

    Args:
        kv_cache_group_spec: KV Cache组Spec配置信息

    Returns:
        int: 返回具体的字节大小
    """
    kv_cache_spec = kv_cache_group_spec.kv_cache_spec
    logger.debug(f"get_cache_block_size, lays: "
                # 模型层数
                f"{len(kv_cache_group_spec.layer_names)}, "  
                # 单层block_size KV字节大小 
                f"page_size: {kv_cache_spec.page_size_bytes}"
            )
    return len(kv_cache_group_spec.layer_names) * kv_cache_spec.page_size_bytes




def prefetch_prefix_cache_distributed(
        seq_hash_id_list: SeqHashList,
        stub_pp_rank_list: List[Tuple[RPCStub, int]],
        vllm_config: VllmConfig,
        aligned_cache_size: int,
    ) -> None:
    """将prefetch_prefix_cache_task任务和参数添加到队列中

    prefetch_prefix_cache_task:从存储预取特定hashIds的KV Cache到内存中(底层调用stub)，
    该函数将被提交到task_queue, 由单独的线程异步处理.

    Args:
        req_block_hash_id_list (SeqHashList): _description_
        sdk_pp_rank_list (List[Tuple[RPCStub, int]]): _description_
        vllm_config (VllmConfig): _description_
        aligned_cache_size (int): _description_
    """
    prefetch_utils.g_prefetch_task_queue.start_worker()
    
    for stub, pp_rank in stub_pp_rank_list:
        logger.debug("sdk_pp_rank_list not null")
        prefetch_utils.g_prefetch_task_queue.enqueue(
                prefetch_utils.prefetch_prefix_cache_task,
                stub, seq_hash_id_list, aligned_cache_size, 
                vllm_config, pp_rank
            )
    prefetch_utils.g_prefetch_task_queue.join()


def adjust_cal_recompute_ids_and_prefetch(
    vllm_config,
    kv_cache_config,
    scheduler,
    model_executor,
    scheduler_output,
) -> None:
    # 分布式unify，挑选任意一个group即可
    kv_cache_block_size = get_cache_block_size(kv_cache_config.kv_cache_groups[0])
    schedule_output_hash_id_dict, req_block_hash_id_list, should_skip_prefetch_v1 = \
        adjust_prepare_recompute_ids(vllm_config, scheduler_output, scheduler.kv_cache_manager.req_to_block_hashes,
            scheduler.kv_cache_manager.req_to_blocks, scheduler.requests)
    logger.debug(f"req_block_hash_id_list: {req_block_hash_id_list}")
    sdk_pp_rank_list = getattr(model_executor, "client_pp_rank_list", None)
    layer_num, start_layer, end_layer = prefetch_utils.get_layer_info(vllm_config.model_config,
        vllm_config.parallel_config)
    if vllm_config.model_config.use_mla:
        single_layer_cache_size = kv_cache_block_size // (end_layer - start_layer)
    else:
        single_layer_cache_size = kv_cache_block_size // (end_layer - start_layer) * \
            vllm_config.parallel_config.tensor_parallel_size
    full_cache_size = single_layer_cache_size * layer_num
    aligned_cache_size = prefetch_utils.align_to_4k(full_cache_size)
    tp_num = vllm_config.parallel_config.tensor_parallel_size if not vllm_config.model_config.use_mla else 1
    logger.debug(f"should skip prefetch v1:{should_skip_prefetch_v1}")
    if not should_skip_prefetch_v1:
        if sdk_pp_rank_list is not None:
            prefetch_prefix_cache_distributed(req_block_hash_id_list, sdk_pp_rank_list,
                vllm_config, aligned_cache_size)
        else:
            kv_cache_sdk = prefetch_utils.get_new_sdk_for_engine()
            logger.debug("llm engine temporary kvcache sdk")
            kv_cache_sdk.PrefetchPrefixCache(aligned_cache_size, req_block_hash_id_list, start_layer,
                    end_layer, layer_num, single_layer_cache_size, tp_num)
    
    # OD_kvcache v1
    scheduler_output.req_block_hash_id_dict = schedule_output_hash_id_dict
    scheduler_output.kv_cache_block_size = kv_cache_block_size


def adjust_prepare_recompute_ids(
    vllm_config: VllmConfig,
    scheduler_output: SchedulerOutput,
    block_manager_req_to_block_hashes: Dict[str, List[BlockHashType]],
    block_manager_req_to_blocks: Dict[str, List[KVCacheBlock]],
    scheduler_reqs: Dict[str, Request]
) -> Tuple[Dict[str, Dict[str, List[int]]], List[List[KVCacheSDK.BlockHashId]], bool]:
    logger.info("adjust_prepare_recompute_ids start")
    # 修改为 Dict[str, Dict[str, List[int]]]
    schedule_output_hash_id_dict: Dict[str, Dict[str, List[int]]] = {
        prefetch_utils.HASH_ID: {},
        prefetch_utils.PARENT_HASH_ID: {},
        prefetch_utils.LOCAL_HASH_ID: {},
        prefetch_utils.VERIFY_HASH_ID: {}
    }

    req_group_hash_id_list: List[List[KVCacheSDK.BlockHashId]] = []
    prompt_token_num = 0
    if vllm_config.cache_config.enable_prefix_caching:
        # get new reqs recompute block hashIds --> prefill/prompt
        for new_schedule_req in scheduler_output.scheduled_new_reqs:
            new_req_prefetch_start_idx = new_schedule_req.num_computed_tokens // vllm_config.cache_config.block_size
            new_req_block_hash_type_list = block_manager_req_to_block_hashes[new_schedule_req.req_id]
            # V1 从BlockHashType转换为HashId
            new_req_block_hash_id_list = [blockHashType.hash_value for blockHashType in new_req_block_hash_type_list]
            new_req_prefetch_end_idx = scheduler_output.num_scheduled_tokens[new_schedule_req.req_id] // \
                vllm_config.cache_config.block_size
            logger.debug(f"scheduled_new_req:{new_schedule_req.req_id}, total_block_nums:{new_req_block_hash_id_list}, \
                computed_block_num:{new_req_prefetch_start_idx}")
            new_no_cached_num = min(scheduler_output.num_scheduled_tokens[new_schedule_req.req_id],
                len(new_schedule_req.prompt_token_ids)) - new_schedule_req.num_computed_tokens
            if new_no_cached_num != vllm_config.cache_config.block_size:
                prefetch_block_list, prefetch_split_hash_dict = prefetch_utils.get_block_hash_ids(vllm_config,
                    new_schedule_req.prompt_token_ids, new_req_block_hash_id_list, new_req_prefetch_start_idx,
                    new_req_prefetch_start_idx + new_req_prefetch_end_idx)
                req_group_hash_id_list.append(prefetch_block_list)
                schedule_output_hash_id_dict[prefetch_utils.HASH_ID][new_schedule_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.PARENT_HASH_ID][new_schedule_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.PARENT_HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.LOCAL_HASH_ID][new_schedule_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.LOCAL_HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.VERIFY_HASH_ID][new_schedule_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.VERIFY_HASH_ID]
                prompt_token_num += new_no_cached_num
            logger.debug(f"scheduled_new_req:{new_schedule_req.req_id}, prefetch blocks: \
                {new_req_prefetch_end_idx - new_req_prefetch_start_idx}, \
                total prompt_tokens:{len(new_schedule_req.prompt_token_ids)}")
        # get resumed and running recompute block hashIds --> resumed-prefill, running-decode
        for cached_scheduled_req in scheduler_output.scheduled_cached_reqs:
            cache_req_prefetch_start_idx = cached_scheduled_req.num_computed_tokens // \
                vllm_config.cache_config.block_size
            cached_req_block_hash_type_list = block_manager_req_to_block_hashes[cached_scheduled_req.req_id]
            logger.debug(f"cache req: {cached_scheduled_req.req_id}, \
                block_manager.blockHash:{cached_req_block_hash_type_list}")
            # V1 从BlockHashType转换为HashId
            cached_req_block_hash_id_list = \
                [blockHashType.hash_value for blockHashType in cached_req_block_hash_type_list]
            cache_req_prefetch_end_idx = scheduler_output.num_scheduled_tokens[cached_scheduled_req.req_id] // \
                vllm_config.cache_config.block_size
            if cached_scheduled_req.resumed_from_preemption:
                logger.debug("cached preemption reqs")
                logger.info(f"cached_scheduled_req:{cached_scheduled_req.req_id}, \
                    total_block_nums:{cached_req_block_hash_id_list}, \
                    computed_block_num:{cache_req_prefetch_start_idx}")
                prefetch_block_list, prefetch_split_hash_dict = prefetch_utils.get_block_hash_ids(vllm_config,
                    cached_scheduled_req.new_token_ids, cached_req_block_hash_id_list, cache_req_prefetch_start_idx,
                    cache_req_prefetch_start_idx + cache_req_prefetch_end_idx)
                req_group_hash_id_list.append(prefetch_block_list)
                schedule_output_hash_id_dict[prefetch_utils.HASH_ID][cached_scheduled_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.PARENT_HASH_ID][cached_scheduled_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.PARENT_HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.LOCAL_HASH_ID][cached_scheduled_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.LOCAL_HASH_ID]
                schedule_output_hash_id_dict[prefetch_utils.VERIFY_HASH_ID][cached_scheduled_req.req_id] = \
                    prefetch_split_hash_dict[prefetch_utils.VERIFY_HASH_ID]
                prompt_token_num += len(cached_scheduled_req.new_token_ids) - cached_scheduled_req.num_computed_tokens
                logger.debug(f"cached_req:{cached_scheduled_req.req_id}, \
                    total prompt_tokens:{len(cached_scheduled_req.new_token_ids)}")
            else:
                logger.debug("cached running reqs")
                # running-decode, verify physical block and logic block
                # if equal: the last of hashlist is newest block and probably recompute to prefetch 
                req = scheduler_reqs[cached_scheduled_req.req_id]
                logger.debug(f"cache req: {cached_scheduled_req.req_id}, num_tokens_with_spec: \
                    {req.num_tokens_with_spec}, num_computed_tokens:{cached_scheduled_req.num_computed_tokens}")
                if req.num_tokens_with_spec % vllm_config.cache_config.block_size == 0:
                    block_index = cached_scheduled_req.num_computed_tokens // vllm_config.cache_config.block_size
                    bmanager = block_manager_req_to_blocks[cached_scheduled_req.req_id]
                    logger.debug(f"block id: {bmanager[block_index].block_id}, \
                        last block tokens num:{len(bmanager[block_index]._block_hash.token_ids)}")
                    prefetch_block_list, prefetch_split_hash_dict = prefetch_utils.get_block_hash_ids(vllm_config,
                        req._all_token_ids, cached_req_block_hash_id_list, block_index)
                    req_group_hash_id_list.append(prefetch_block_list)
                    schedule_output_hash_id_dict[prefetch_utils.HASH_ID][cached_scheduled_req.req_id] = \
                        prefetch_split_hash_dict[prefetch_utils.HASH_ID]
                    schedule_output_hash_id_dict[prefetch_utils.PARENT_HASH_ID][cached_scheduled_req.req_id] = \
                        prefetch_split_hash_dict[prefetch_utils.PARENT_HASH_ID]
                    schedule_output_hash_id_dict[prefetch_utils.LOCAL_HASH_ID][cached_scheduled_req.req_id] = \
                        prefetch_split_hash_dict[prefetch_utils.LOCAL_HASH_ID]
                    schedule_output_hash_id_dict[prefetch_utils.VERIFY_HASH_ID][cached_scheduled_req.req_id] = \
                        prefetch_split_hash_dict[prefetch_utils.VERIFY_HASH_ID]
    logger.info("adjust_prepare_recompute_ids end")
    return schedule_output_hash_id_dict, req_group_hash_id_list, \
        prompt_token_num <= prefetch_utils.g_skip_prefetch_threshold


def adjust_update_model_input(
    vllm_config: VllmConfig,
    scheduler_output: SchedulerOutput
) -> Tuple[Dict[str, Dict[int, KVCacheSDK.Segment]], Dict[str, int]]:
    logger.info("adjust_update_model_input start")
    # 修改模型输入前，记录每个req对应的num_computed_tokens
    before_req_to_computed_tokens:Dict[str, int] = {}   # {req_id: num_computed_tokens}
    # 修改模型输入，去除预取到的部分
    # 更新输入
    req_hash_map = scheduler_output.req_block_hash_id_dict[prefetch_utils.HASH_ID]
     # {req_id: prefetched_tokens_num(cached from cpu/disk)}
    avail_req_token_num_dict:Dict[str, int] = {}
    # query并将信息保存，以获取save\read block及allocate memory复用
    # 内容为{req_id: {segmentId: segment}}
    query_segment_dict:Dict[str, Dict[int, KVCacheSDK.Segment]] = {}
    # 批量获取segments
    req_hash_map_keys:List[str] = list(req_hash_map.keys())
    req_hash_map_vals:List[List[int]] = list(req_hash_map.values())
    # List<List<segment>>
    batch_query_list:List[List[KVCacheSDK.Segment]] = prefetch_utils.batch_query_prefix_cache(req_hash_map_vals)      

    for idx, map_id in enumerate(req_hash_map_keys):
        for scheduled_new_reqs in scheduler_output.scheduled_new_reqs:
            if map_id == scheduled_new_reqs.req_id:
                segments = batch_query_list[idx]
                segments_dict = {segment.getSegmentId() : segment for segment in segments}
                query_segment_dict[scheduled_new_reqs.req_id] = segments_dict
                avail_req_token_num_dict[scheduled_new_reqs.req_id] = \
                    len(segments) * vllm_config.cache_config.block_size  # req_id --> prefetched tokens num
                break
        for scheduled_cached_reqs in scheduler_output.scheduled_cached_reqs:
            if map_id == scheduled_cached_reqs.req_id:
                segments = batch_query_list[idx]
                segments_dict = {segment.getSegmentId() : segment for segment in segments}
                query_segment_dict[scheduled_cached_reqs.req_id] = segments_dict
                avail_req_token_num_dict[scheduled_cached_reqs.req_id] = \
                    len(segments) * vllm_config.cache_config.block_size
                break
    
    # 过滤decode阶段，decode不进行模型输入更新
    total_update_tokens_num = 0
    adjust_update_factor_num = 0
    for prefetch_id in req_hash_map.keys():
        # 更新涉及的request
        for new_req in scheduler_output.scheduled_new_reqs:
            if new_req.req_id == prefetch_id:
                before_req_to_computed_tokens[prefetch_id] = new_req.num_computed_tokens
                if scheduler_output.num_scheduled_tokens[prefetch_id] != avail_req_token_num_dict[prefetch_id]:
                    scheduler_output.num_scheduled_tokens[prefetch_id] -= avail_req_token_num_dict[prefetch_id]
                    new_req.num_computed_tokens += avail_req_token_num_dict[prefetch_id]
                else:
                    scheduler_output.num_scheduled_tokens[prefetch_id] = vllm_config.cache_config.block_size
                    new_req.num_computed_tokens += avail_req_token_num_dict[prefetch_id] - \
                        vllm_config.cache_config.block_size
                    adjust_update_factor_num += vllm_config.cache_config.block_size
                total_update_tokens_num += avail_req_token_num_dict[prefetch_id]
                logger.debug(f"new req:{prefetch_id}, after num computed tokens:{new_req.num_computed_tokens}")
                break
        for cache_req in scheduler_output.scheduled_cached_reqs:
            #非decode执行
            if cache_req.req_id == prefetch_id:
                before_req_to_computed_tokens[prefetch_id] = cache_req.num_computed_tokens
            if cache_req.req_id == prefetch_id and cache_req.resumed_from_preemption:
                if scheduler_output.num_scheduled_tokens[prefetch_id] != avail_req_token_num_dict[prefetch_id]:
                    scheduler_output.num_scheduled_tokens[prefetch_id] -= avail_req_token_num_dict[prefetch_id]
                    cache_req.num_computed_tokens += avail_req_token_num_dict[prefetch_id]
                else:
                    scheduler_output.num_scheduled_tokens[prefetch_id] = vllm_config.cache_config.block_size
                    cache_req.num_computed_tokens += avail_req_token_num_dict[prefetch_id] - \
                        vllm_config.cache_config.block_size
                    adjust_update_factor_num += vllm_config.cache_config.block_size
                total_update_tokens_num += avail_req_token_num_dict[prefetch_id]
                logger.debug(f"cache req:{prefetch_id}, preempted:{cache_req.resumed_from_preemption}, \
                    after num computed tokens:{cache_req.num_computed_tokens}")
                break

    logger.debug(f"avail_req_token_num_dict key:{avail_req_token_num_dict.keys()}, \
        value:{avail_req_token_num_dict.values()}, total_update_tokens_num:{total_update_tokens_num}")
    scheduler_output.total_num_scheduled_tokens -= total_update_tokens_num - adjust_update_factor_num
    logger.debug(f"query kvcache sdk segment:{query_segment_dict}")
    logger.info("adjust_update_model_input end")
    return query_segment_dict, before_req_to_computed_tokens


def adjust_prefill_get_read_write_dicts(
    vllm_config: VllmConfig,
    prefetch_hash_ids: List[int],
    scheduled_req: Union[NewRequestData, CachedRequestData],
    before_num_computed_tokens: int,
    input_batch: InputBatch,
    to_read_block_dict: Dict[int, int],
    to_save_block_dict: Dict[int, int],
    segment_dict: Dict[int, KVCacheSDK.Segment]
) -> Tuple[Dict[int, int], Dict[int, int], Dict[int, int]]:

    # req对应block table: list[int]，下标为逻辑id
    req_index = input_batch.req_id_to_index[scheduled_req.req_id]
    req_block_table = input_batch.block_table.block_table_np[req_index]
    # 传入修改前的scheduler_output
    prefetch_start_block_idx = before_num_computed_tokens // vllm_config.cache_config.block_size

    for prefetch_idx, hash_id in enumerate(prefetch_hash_ids):
        kv_cache_idx = req_block_table[prefetch_start_block_idx + prefetch_idx]
        if hash_id not in segment_dict.keys():
            # segment没有对应block，则都需要本次生成后存储的
            for to_save_idx in range(prefetch_idx, len(prefetch_hash_ids)):
                to_save_block_dict[prefetch_hash_ids[to_save_idx]] = \
                    req_block_table[prefetch_start_block_idx + to_save_idx]
            break
        else:
            to_read_block_dict[hash_id] = kv_cache_idx


def adjust_get_read_write_block_dicts(
    vllm_config: VllmConfig,
    schedule_output_new_reqs: List[NewRequestData],
    schedule_output_cached_reqs: List[CachedRequestData],
    before_req_to_computed_tokens: Dict[str, int],
    req_block_hash_map: Dict[str, Dict[str, List[int]]], # 获取每个req要recompute的hashIdlist Dict[str, Dict[str, List[int]]]
    input_batch: InputBatch,
    query_sdk_segment_dict: Dict[str, Dict[int, KVCacheSDK.Segment]]
) -> Tuple[Dict[int, int], Dict[int, int], Dict[int, int]]:
    logger.info("adjust_get_read_write_block_dicts start")
    # 取出hash_id
    req_hash_map = req_block_hash_map[prefetch_utils.HASH_ID]

    # 读写数据的Dict[kv_cache_idx, Segment]
    prefill_to_read_block_dict: Dict[int, int] = {}
    prefill_to_save_block_dict: Dict[int, int] = {}
    decode_to_save_block_dict: Dict[int, int] = {}

    # 遍历new reqs --> prefill
    for new_scheduler_req in schedule_output_new_reqs:
        logger.debug(f"new_scheduler_req:{new_scheduler_req.req_id}")
        if new_scheduler_req.req_id not in req_hash_map.keys():
            continue
        adjust_prefill_get_read_write_dicts(vllm_config, req_hash_map[new_scheduler_req.req_id],
            new_scheduler_req, before_req_to_computed_tokens[new_scheduler_req.req_id], input_batch,
            prefill_to_read_block_dict, prefill_to_save_block_dict, query_sdk_segment_dict[new_scheduler_req.req_id])

    # 遍历cache req --> prefill and decode
    for cached_schedule_req in schedule_output_cached_reqs:
        logger.debug(f"cached_schedule_req:{cached_schedule_req.req_id}, \
            preempted:{cached_schedule_req.resumed_from_preemption}")
        if cached_schedule_req.resumed_from_preemption: # prefill -- resumed
            adjust_prefill_get_read_write_dicts(vllm_config, req_hash_map[cached_schedule_req.req_id],
                cached_schedule_req, before_req_to_computed_tokens[cached_schedule_req.req_id], input_batch,
                prefill_to_read_block_dict, prefill_to_save_block_dict,
                query_sdk_segment_dict[cached_schedule_req.req_id])
        else:  # decode -- running
            if cached_schedule_req.req_id not in req_hash_map.keys():
                continue
            req_index = input_batch.req_id_to_index[cached_schedule_req.req_id]
            req_block_table = input_batch.block_table.block_table_np[req_index]
            start_block_idx = before_req_to_computed_tokens[cached_schedule_req.req_id] // \
                vllm_config.cache_config.block_size
            #不能填入-1
            decode_to_save_block_dict[req_hash_map[cached_schedule_req.req_id][0]] = \
                req_block_table[start_block_idx]

    logger.debug("prefill_to_read_block_dict: %s", str(prefill_to_read_block_dict))
    logger.debug("prefill_to_save_block_dict: %s", str(prefill_to_save_block_dict))
    logger.debug("decode_to_save_block_dict: %s", str(decode_to_save_block_dict))
    logger.info("adjust_get_read_write_block_dicts end")
    # 获取prefill和decode对应需要读到gpu和写入segment的{hash_id:kvcache_id}
    return prefill_to_read_block_dict, prefill_to_save_block_dict, decode_to_save_block_dict


# prefill allocate
def adjust_query_allocate_prefill(
    kv_cache_block_size: int,
    vllm_config: VllmConfig,
    prefill_to_read_block: Dict[int, int],
    prefill_to_save_block: Dict[int, int],
    decode_to_save_block: Dict[int, int],
    req_block_hash_map: Dict[str, Dict[str, List[int]]],
    kv_cache: List[torch.Tensor],
    query_sdk_segment_dict: Dict[str, Dict[int, KVCacheSDK.Segment]]
) -> Tuple[Dict[int, KVCacheSDK.Segment], Dict[int, KVCacheSDK.Segment]]:
    # 取出hash_id
    req_hash_map = req_block_hash_map[prefetch_utils.HASH_ID]

    # prefill
    num_layers, start_layer, end_layer, = prefetch_utils.get_layer_info(vllm_config.model_config,
        vllm_config.parallel_config)

    if vllm_config.model_config.use_mla:
        cache_block_size = kv_cache_block_size * num_layers // (end_layer - start_layer)
    else:
        cache_block_size = kv_cache_block_size * vllm_config.parallel_config.tensor_parallel_size * num_layers // (end_layer - start_layer)

    prefill_to_save_hash_ids = list(prefill_to_save_block.keys())

    prefill_to_save_block_hash_list = \
        prefetch_utils.get_to_save_block_hash_list(req_block_hash_map, prefill_to_save_hash_ids)
    tp_num = vllm_config.parallel_config.tensor_parallel_size if not vllm_config.model_config.use_mla else 1
    prefill_to_save_segments = prefetch_utils.allocate_memory(cache_block_size, prefill_to_save_block_hash_list,
        tp_num, start_layer, end_layer, num_layers)

    prefill_to_save_segment_dict: Dict[int, KVCacheSDK.Segment] = {}
    for prefill_to_save_segment in prefill_to_save_segments:
        prefill_to_save_segment_dict[prefill_to_save_block[prefill_to_save_segment.getSegmentId()]] = \
            prefill_to_save_segment

    prefill_to_read_segment_dict: Dict[int, KVCacheSDK.Segment] = {}
    for req_id in req_hash_map.keys():
        for seq_segment_id, seq_segment in query_sdk_segment_dict[req_id].items():
            if seq_segment_id in (prefill_to_save_block.keys() | decode_to_save_block.keys()):
                continue
            if seq_segment_id not in prefill_to_read_block.keys():
                logger.info(f"segment {seq_segment_id} will not be read as it's full hit last block")
                continue
            prefill_to_read_segment_dict[prefill_to_read_block[seq_segment_id]] = seq_segment

    # decode
    decode_to_save_hash_ids = list(decode_to_save_block.keys())
    decode_to_save_block_hash_list = prefetch_utils.get_to_save_block_hash_list(req_block_hash_map,
        decode_to_save_hash_ids)
    decode_to_save_segments = prefetch_utils.allocate_memory(cache_block_size, decode_to_save_block_hash_list,
        tp_num, start_layer, end_layer, num_layers)
    decode_to_save_segment_dict: Dict[int, KVCacheSDK.Segment] = {}
    for decode_to_save_segment in decode_to_save_segments:
        decode_to_save_segment_dict[decode_to_save_block[decode_to_save_segment.getSegmentId()]] = \
            decode_to_save_segment
    # worktool调用
    factor = 1 if vllm_config.model_config.use_mla else 0
    kv_cache_shape = kv_cache[0].shape
    num_blocks = kv_cache_shape[1 - factor]
    block_size = vllm_config.cache_config.block_size
    num_head = vllm_config.model_config.get_total_num_kv_heads() if not vllm_config.model_config.use_mla else 1
    head_size = vllm_config.model_config.get_head_size()
    element_size = kv_cache[0].element_size()
    has_value = not vllm_config.model_config.use_mla
    tp_rank = get_tensor_model_parallel_rank() if has_value else 0
    tp_num = vllm_config.parallel_config.tensor_parallel_size if has_value else 1
    prefetch_utils.g_workertool.setDeviceId(prefetch_utils.get_current_device_id())

    prefetch_utils.g_workertool.adjustUpdateLayerBufInfo(prefill_to_read_segment_dict, prefill_to_save_segment_dict,
        decode_to_save_segment_dict, start_layer, block_size, num_head, head_size, element_size,
        has_value, tp_rank, tp_num)

    # 更新only decode标志，由chunked_prefill模式与prefill长度确定
    global g_onlyDecode
    if prefill_to_read_block or prefill_to_save_block:
        logger.debug("the single layer write and read is set")
        g_onlyDecode = False
        kv_cache_c_ptrs = [ctypes.pythonapi.PyCapsule_New(cache.data_ptr(), b'c_void_p', None) for cache in kv_cache]
        # prefill read
        prefetch_utils.g_workertool.startLayerFill(kv_cache_c_ptrs, num_blocks)
        prefetch_utils.g_workertool.waitLayerFillFinish()  # 等待第0层读到GPU显存中
    else:
        logger.debug("the single layer write and read not set")
        g_onlyDecode = True

    return prefill_to_save_segment_dict, decode_to_save_segment_dict


# 获取g_onlyDecode字段，实现forward跳转
def get_only_decode() -> bool:
    return g_onlyDecode