# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass, field
import os
from typing import TYPE_CHECKING

import torch
try:
    import torch_npu
except ImportError as e:
    pass

from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
    KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole)
from vllm.logger import init_logger
from vllm.prefetch import prefetch_utils
from vllm.prefetch.prefetch_utils import HASH_ID, LOCAL_HASH_ID, PARENT_HASH_ID, VERIFY_HASH_ID, batch_query_prefix_cache, call_single_layer_cache_write, compute_local_hash, compute_verify_hash, g_skip_prefetch_threshold, get_layer_info, get_layer_info_with_pp_rank, prefetch_prefix_cache_distributed, write_full_blocks
from vllm.prefetch.prefetch_utils_v1 import adjust_prefetch_prefix_cache_distributed, adjust_query_allocate_prefill
from vllm.v1.core import kv_cache_utils
from vllm.v1.core.sched.output import CachedRequestData, NewRequestData, SchedulerOutput

import time

import xxhash
import KVCacheSDK

if TYPE_CHECKING:
    from vllm.attention.backends.abstract import AttentionMetadata
    from vllm.forward_context import ForwardContext
    from vllm.v1.request import Request

logger = init_logger(__name__)


def align_to_block_size(num_tokens: int, block_size) -> int:
    """Align the number of tokens to the block size.
    """
    return (num_tokens - 1) // block_size * block_size


@dataclass
class ReqMeta:
    # Request tokens
    token_ids: torch.Tensor
    # Slot mappings, should have the same length as token_ids
    slot_mapping: torch.Tensor
    # Is store or load
    is_store: bool

    @staticmethod
    def make_meta(token_ids: list[int], block_ids: list[int], block_size: int,
                  is_store: bool) -> "ReqMeta":
        valid_num_tokens = align_to_block_size(len(token_ids), block_size)
        token_ids_tensor = torch.tensor(token_ids)[:valid_num_tokens]
        block_ids_tensor = torch.tensor(block_ids)
        num_blocks = block_ids_tensor.shape[0]
        block_offsets = torch.arange(0, block_size)
        slot_mapping = block_offsets.reshape((1, block_size)) + \
                block_ids_tensor.reshape((num_blocks, 1)) * block_size
        slot_mapping = slot_mapping.flatten()[:valid_num_tokens]
        return ReqMeta(
            token_ids=token_ids_tensor,
            slot_mapping=slot_mapping,
            is_store=is_store,
        )


@dataclass
class RequestResource:
    # request id
    req_id: str

    # all of token ids of request
    full_token_id: list[int]

    # all of number of computed tokens
    full_num_computed_tokens: int

    # block hashes
    block_hashes: list[int]
    block_parent_hashes: list[int]
    block_local_hashes: list[int]
    block_verify_hashes: list[int]

    # the block allocated
    block_ids: list[int]

    def update_from_cached_request(
        self,
        request: CachedRequestData,
        block_size: int,
    ) -> int:
        logger.debug(f"update_from_cached_request: {request=}")
        new_block_cnt = 0
        if request.resumed_from_preemption:
            start_block_token_idx = 0
            block_ids_len = len(request.new_block_ids)
            self.full_token_id = request.new_token_ids.copy()
            self.full_num_computed_tokens = request.num_computed_tokens
            self.block_ids = request.new_block_ids.copy()
            self.block_hashes = {}
            self.block_parent_hashes = {}
            self.block_local_hashes = {}
            self.block_verify_hashes = {}
        else:
            start_block_token_idx = (len(self.full_token_id) // block_size) * block_size
            self.full_token_id.extend(request.new_token_ids)
            self.block_ids.extend(request.new_block_ids)
            self.full_num_computed_tokens = request.num_computed_tokens
        
        while start_block_token_idx + block_size <= len(self.full_token_id):
            self.block_hashes.append(
                kv_cache_utils.hash_block_tokens(
                    xxhash.xxh64_intdigest,
                    self.block_hashes[-1] if len(self.block_hashes) > 0 else None,
                    self.full_token_id[start_block_token_idx: start_block_token_idx + block_size],
                    None
                ).hash_value
            )
            self.block_parent_hashes.append(
                self.block_hashes[-2] if len(self.block_hashes) > 1 else 0
            )
            self.block_local_hashes.append(
                compute_local_hash(self.full_token_id[start_block_token_idx: start_block_token_idx + block_size])
            )
            if os.getenv("USE_VERIFY_HASH", "False").lower() == "true":
                self.block_verify_hashes.append(
                    compute_verify_hash(True, None,
                        self.full_token_id[start_block_token_idx: start_block_token_idx + block_size])
                )
            else:
                self.block_verify_hashes.append(None)

            start_block_token_idx += block_size
            new_block_cnt += 1
        return new_block_cnt
            

    @staticmethod
    def create_from_new_request(
        request: NewRequestData,
        num_input_tokens: int,
        block_size: int,
    ) -> "RequestResource":
        logger.debug(f"create_from_new_request: {request=}, {num_input_tokens=}")
        if isinstance(request.block_ids[0], list):
            block_ids = request.block_ids[0].copy()
        else:
            block_ids = request.block_ids.copy()
        
        hash_id_list: list[int] = []
        parent_hash_id_list: list[int] = []
        local_hash_id_list: list[int] = []
        verify_hash_id_list: list[int] = []

        start_block_token_idx = 0
        while start_block_token_idx + block_size <= num_input_tokens:
            hash_id_list.append(
                kv_cache_utils.hash_block_tokens(
                    xxhash.xxh64_intdigest,
                    hash_id_list[-1] if len(hash_id_list) > 0 else None,
                    request.prompt_token_ids[start_block_token_idx: start_block_token_idx + block_size],
                    None
                ).hash_value
            )
            parent_hash_id_list.append(
                hash_id_list[-2] if len(hash_id_list) > 1 else 0
            )
            local_hash_id_list.append(
                compute_local_hash(
                    request.prompt_token_ids[start_block_token_idx: start_block_token_idx + block_size]
                )
            )
            if os.getenv("USE_VERIFY_HASH", "False").lower() == "true":
                verify_hash_id_list.append(
                    compute_verify_hash(True, None,
                        request.prompt_token_ids[start_block_token_idx: start_block_token_idx + block_size])
                )
            else:
                verify_hash_id_list.append(None)

            start_block_token_idx += block_size

        return RequestResource(
            req_id=request.req_id,
            full_token_id=request.prompt_token_ids[:num_input_tokens].copy(),
            full_num_computed_tokens=request.num_computed_tokens,
            block_ids=block_ids,
            block_hashes=hash_id_list,
            block_parent_hashes=parent_hash_id_list,
            block_local_hashes=local_hash_id_list,
            block_verify_hashes=verify_hash_id_list
        )


class RequestResourceTracker:
    request_resource: dict[str, RequestResource]

    def __init__(self):
        self.request_resource = {}


@dataclass
class KvClientConnectorMetadata(KVConnectorMetadata):
    prefill_to_read_block_dict: dict[str, list[tuple[int, int]]]
    prefill_to_save_block_dict: dict[int, int]
    decode_to_save_block_dict: dict[int, int]

    req_block_hash_map: dict[str, dict[str, list[int]]]

    kv_cache_block_size: int = 0


class KvClientConnectorV1(KVConnectorBase_V1):

    kv_cache_block_size: int = 0

    def __init__(self, vllm_config: "VllmConfig", role: KVConnectorRole):
        super().__init__(vllm_config=vllm_config, role=role)

        self._block_size = vllm_config.cache_config.block_size

        if torch.cuda.is_available():
            self.device_type = "cuda"
        elif torch.npu.is_available():
            self.device_type = "npu"
        else:
            self.device_type = "cpu"

        if role == KVConnectorRole.SCHEDULER:
            self._tracker = RequestResourceTracker()

        if role == KVConnectorRole.WORKER:
            # here only consider v1, we have layer name <-> layer cache first, then transfer to list
            self._kv_cache: dict[str, torch.Tensor] = {}
            self._kv_cache_list: list[torch.Tensor] = []

    
    def _init_kv_cache_by_forward_context(
        self,
        forward_context: "ForwardContext"
    ) -> None:
        for layer_name, layer_attn in forward_context.no_compile_layers.items():
            layer_cache = getattr(layer_attn, "kv_cache", None)
            if layer_cache is not None:
                self._kv_cache[layer_name] = layer_cache[forward_context.virtual_engine]
                self._kv_cache_list.append(layer_cache[forward_context.virtual_engine])


    # ==============================
    # Worker-side methods
    # ==============================
    def bind_connector_metadata(
            self, connector_metadata: KVConnectorMetadata) -> None:
        """Set the connector metadata from the scheduler.

        This function should be called by the model runner every time 
        before the model execution. The metadata will be used for runtime
        KV cache loading and saving.

        Args:
            connector_metadata (dict): the connector metadata.
        """
        super().bind_connector_metadata(connector_metadata)
        logger.debug(f"{connector_metadata=}")


    def start_load_kv(self, forward_context: "ForwardContext",
                      **kwargs) -> None:
        """
        Start loading the KV cache from the connector to vLLM's paged
        KV buffer. This is called from the forward context before the
        forward pass to enable async loading during model execution.

        Args:
            forward_context (ForwardContext): the forward context.
            **kwargs: additional arguments for the load operation

        Note:
            The number of elements in kv_caches and layer_names should be 
            the same.
            
        """
        if not self._kv_cache:
            self._init_kv_cache_by_forward_context(forward_context)
        adjust_prefill_to_read_block_dict: dict[int, int] = {}
        
        for req_id, tuples in self._get_connector_metadata().prefill_to_read_block_dict.items():
            for hash_id, kv_cache_idx in tuples:
                adjust_prefill_to_read_block_dict[hash_id] = kv_cache_idx
        
        prefill_to_read_key_list: list[str] = []
        prefill_to_read_value_list: list[list[int]] = []
        for req_id, hash_id_list in self._get_connector_metadata().req_block_hash_map[HASH_ID].items():
            prefill_to_read_key_list.append(req_id)
            prefill_to_read_value_list.append(hash_id_list)

        query_segment_list = batch_query_prefix_cache(prefill_to_read_value_list)
        query_segment_dict = {}
        for idx in range(0, len(prefill_to_read_key_list)):
            query_segment_dict[prefill_to_read_key_list[idx]] = \
                {segment.getSegmentId(): segment for segment in query_segment_list[idx]}
        self.prefill_to_save_segment_dict, self.decode_to_save_segment_dict = \
            adjust_query_allocate_prefill(self._get_connector_metadata().kv_cache_block_size, self._vllm_config,
            adjust_prefill_to_read_block_dict, self._get_connector_metadata().prefill_to_save_block_dict,
            self._get_connector_metadata().decode_to_save_block_dict, self._get_connector_metadata().req_block_hash_map,
            self._kv_cache_list, query_segment_dict)


    def wait_for_layer_load(self, layer_name: str) -> None:
        """
        Block until the KV for a specific layer is loaded into vLLM's
        paged buffer. This is called from within attention layer to ensure
        async copying from start_load_kv is complete.
        
        This interface will be useful for layer-by-layer pipelining.

        Args:
            layer_name: the name of that layer
        """
        prefetch_utils.g_workertool.waitLayerFillFinish()
        pass


    def save_kv_layer(self, layer_name: str, kv_layer: torch.Tensor,
                      attn_metadata: "AttentionMetadata", **kwargs) -> None:
        """
        Start saving the a layer of KV cache from vLLM's paged buffer 
        to the connector. This is called from within attention layer to
        enable async copying during execution.

        Args:
            layer_name (str): the name of the layer.
            kv_layer (torch.Tensor): the paged KV buffer of the current 
                layer in vLLM.
            attn_metadata (AttentionMetadata): the attention metadata.
            **kwargs: additional arguments for the save operation.
        """
        current_event = getattr(attn_metadata, "current_event", None)
        if self.device_type == "npu" and current_event is not None:
            call_single_layer_cache_write(kv_layer, current_event)
        else:
            call_single_layer_cache_write(kv_layer, None)
        pass


    def wait_for_save(self):
        """
        Block until all the save operations is done. This is called
        as the forward context exits to ensure that the async saving
        from save_kv_layer is complete before finishing the forward.

        This prevents overwrites of paged KV buffer before saving done.
        """
        write_full_blocks(self._get_connector_metadata().req_block_hash_map, self.prefill_to_save_segment_dict, self._kv_cache_list,
            self._vllm_config, self._get_connector_metadata().kv_cache_block_size, True)
        write_full_blocks(self._get_connector_metadata().req_block_hash_map, self.decode_to_save_segment_dict, self._kv_cache_list,
            self._vllm_config, self._get_connector_metadata().kv_cache_block_size, False)
        pass


    # ==============================
    # Scheduler-side methods
    # ==============================
    def get_num_new_matched_tokens(
        self,
        request: "Request",
        num_computed_tokens: int,
    ) -> int:
        """
        Get number of new tokens that can be loaded from the
        external KV cache beyond the num_computed_tokens.
        
        Args:
            request (Request): the request object.
            num_computed_tokens (int): the number of locally
                computed tokens for this request

        Returns:
            the number of tokens that can be loaded from the 
            external KV cache beyond what is already computed.
        """
        # here we do not use it, currently follow the old logic
        return 0


    def update_state_after_alloc(self, request: "Request",
                                 num_external_tokens: int):
        """
        Update KVConnector state after block allocation.
        """
        # here we do not use it, currently follow the old logic
        pass

    
    @staticmethod
    def _get_block_hash_id(
        req_block_hash_map: dict[str, dict[str, list[int]]]
    ) -> dict[str, KVCacheSDK.BlockHashId]:
        block_map:dict[str, KVCacheSDK.BlockHashId] = {}
        for req_id, hash_list in req_block_hash_map[HASH_ID].items():
            block_map[req_id] = []
            for idx in range(0, len(hash_list)):
                block_map[req_id].append(
                    KVCacheSDK.BlockHashId(
                    req_block_hash_map[HASH_ID][req_id][idx],
                    req_block_hash_map[PARENT_HASH_ID][req_id][idx],
                    req_block_hash_map[LOCAL_HASH_ID][req_id][idx],
                    req_block_hash_map[VERIFY_HASH_ID][req_id][idx]
                    )
                )
        return block_map


    def _update_cached_req_resource(
        self,
        scheduler_output: SchedulerOutput,
        req_block_hash_map: dict[str, dict[str, list[int]]],
        to_batch_query_dict: dict[str, list[int]],
        decode_to_save_block_dict: dict[int, int],
    ) -> int:
        prompt_token_num = 0
        for cached_req in scheduler_output.scheduled_cached_reqs:
            req_block_hash_map[HASH_ID][cached_req.req_id] = []
            req_block_hash_map[PARENT_HASH_ID][cached_req.req_id] = []
            req_block_hash_map[LOCAL_HASH_ID][cached_req.req_id] = []
            req_block_hash_map[VERIFY_HASH_ID][cached_req.req_id] = []

            req_resource = self._tracker.request_resource[cached_req.req_id]
            new_block_cnt = req_resource.update_from_cached_request(cached_req, self._block_size)
            logger.debug(f"cached request reqource: {new_block_cnt=}, {req_resource=}")
            if cached_req.resumed_from_preemption:
                to_batch_query_dict[cached_req.req_id] = req_resource.block_hashes[req_resource.full_num_computed_tokens // self._block_size:]
                prompt_token_num += len(req_resource.full_token_id)
                start_idx = req_resource.full_num_computed_tokens // self._block_size
                for idx in range(start_idx, len(req_resource.block_hashes)):
                    req_block_hash_map[HASH_ID][cached_req.req_id].append(req_resource.block_hashes[idx])
                    req_block_hash_map[PARENT_HASH_ID][cached_req.req_id].append(req_resource.block_parent_hashes[idx])
                    req_block_hash_map[LOCAL_HASH_ID][cached_req.req_id].append(req_resource.block_local_hashes[idx])
                    req_block_hash_map[VERIFY_HASH_ID][cached_req.req_id].append(req_resource.block_verify_hashes[idx])
            else:   
                for idx in range(0, new_block_cnt):
                    actual_idx = len(req_resource.block_hashes) - new_block_cnt + idx
                    decode_to_save_block_dict[req_resource.block_hashes[actual_idx]] = req_resource.block_ids[actual_idx]
                    req_block_hash_map[HASH_ID][cached_req.req_id].append(req_resource.block_hashes[actual_idx])
                    req_block_hash_map[PARENT_HASH_ID][cached_req.req_id].append(req_resource.block_parent_hashes[actual_idx])
                    req_block_hash_map[LOCAL_HASH_ID][cached_req.req_id].append(req_resource.block_local_hashes[actual_idx])
                    req_block_hash_map[VERIFY_HASH_ID][cached_req.req_id].append(req_resource.block_verify_hashes[actual_idx])
        return prompt_token_num


    def _create_new_req_resource(
        self,
        scheduler_output: SchedulerOutput,
        req_block_hash_map: dict[str, dict[str, list[int]]],
        to_batch_query_dict: dict[str, list[int]],
    ) -> int:
        prompt_token_num = 0
        for new_req in scheduler_output.scheduled_new_reqs:
            req_resource = RequestResource.create_from_new_request(
                new_req,
                scheduler_output.num_scheduled_tokens[new_req.req_id],
                self._vllm_config.cache_config.block_size
            )
            logger.debug(f"new request reqource: {req_resource=}")
            self._tracker.request_resource[new_req.req_id] = req_resource
            to_batch_query_dict[new_req.req_id] = req_resource.block_hashes[req_resource.full_num_computed_tokens // self._block_size:]
            prompt_token_num += len(req_resource.full_token_id)
            req_block_hash_map[HASH_ID][new_req.req_id] = []
            req_block_hash_map[PARENT_HASH_ID][new_req.req_id] = []
            req_block_hash_map[LOCAL_HASH_ID][new_req.req_id] = []
            req_block_hash_map[VERIFY_HASH_ID][new_req.req_id] = []
            start_idx = req_resource.full_num_computed_tokens // self._block_size
            for idx in range(start_idx, len(req_resource.block_hashes)):
                req_block_hash_map[HASH_ID][new_req.req_id].append(req_resource.block_hashes[idx])
                req_block_hash_map[PARENT_HASH_ID][new_req.req_id].append(req_resource.block_parent_hashes[idx])
                req_block_hash_map[LOCAL_HASH_ID][new_req.req_id].append(req_resource.block_local_hashes[idx])
                req_block_hash_map[VERIFY_HASH_ID][new_req.req_id].append(req_resource.block_verify_hashes[idx])
        return prompt_token_num


    def _update_connector_metadata_params(
        self,
        scheduler_output: SchedulerOutput,
        to_batch_query_list_key: list[str],
        query_segments_list, # list[list[KVCacheSDK.Segment]]
        prefill_to_read_block_dict: dict[str, list[tuple[int, int]]],
        prefill_to_save_block_dict: dict[int, int],
    ) -> None:
        total_update_tokens_num, adjust_update_factor_num = 0, 0
        for idx in range(0, len(to_batch_query_list_key)):
            req_id = to_batch_query_list_key[idx]
            segments = query_segments_list[idx]
            req_resource = self._tracker.request_resource[req_id]
            full_factor = 0
            for cached_req in scheduler_output.scheduled_cached_reqs:
                if cached_req.req_id != req_id:
                    continue
                if scheduler_output.num_scheduled_tokens[req_id] == len(segments) * self._block_size:
                    scheduler_output.num_scheduled_tokens[req_id] = self._block_size
                    cached_req.num_computed_tokens += (len(segments) - 1) * self._block_size
                    adjust_update_factor_num += self._block_size
                    full_factor = 1
                else:
                    scheduler_output.num_scheduled_tokens[req_id] -= len(segments) * self._block_size
                    cached_req.num_computed_tokens += len(segments) * self._block_size
                total_update_tokens_num += len(segments) * self._block_size

                to_read_block_list = []
                for read_idx in range(req_resource.full_num_computed_tokens // self._block_size,
                    req_resource.full_num_computed_tokens // self._block_size + len(segments) - full_factor):
                    to_read_block_list.append((req_resource.block_hashes[read_idx], req_resource.block_ids[read_idx]))
                prefill_to_read_block_dict[req_id] = to_read_block_list

                for write_idx in range(req_resource.full_num_computed_tokens // self._block_size + len(segments) - full_factor,
                    len(req_resource.block_hashes) - full_factor):
                    prefill_to_save_block_dict[req_resource.block_hashes[write_idx]] = req_resource.block_ids[write_idx]
                break
            
            for new_req in scheduler_output.scheduled_new_reqs:
                if new_req.req_id != req_id:
                    continue
                if scheduler_output.num_scheduled_tokens[req_id] == len(segments) * self._block_size:
                    scheduler_output.num_scheduled_tokens[req_id] = self._block_size
                    new_req.num_computed_tokens += (len(segments) - 1) * self._block_size
                    adjust_update_factor_num += self._block_size
                    full_factor = 1
                else:
                    scheduler_output.num_scheduled_tokens[req_id] -= len(segments) * self._block_size
                    new_req.num_computed_tokens += len(segments) * self._block_size
                total_update_tokens_num += len(segments) * self._block_size

                to_read_block_list = []
                for read_idx in range(req_resource.full_num_computed_tokens // self._block_size,
                    req_resource.full_num_computed_tokens // self._block_size + len(segments) - full_factor):
                    to_read_block_list.append((req_resource.block_hashes[read_idx], req_resource.block_ids[read_idx]))
                prefill_to_read_block_dict[req_id] = to_read_block_list

                for write_idx in range(req_resource.full_num_computed_tokens // self._block_size + len(segments) - full_factor,
                    len(req_resource.block_hashes) - full_factor):
                    prefill_to_save_block_dict[req_resource.block_hashes[write_idx]] = req_resource.block_ids[write_idx]
                break
        return total_update_tokens_num, adjust_update_factor_num
    

    def build_connector_meta(
            self, scheduler_output: SchedulerOutput) -> KVConnectorMetadata:
        """
        Build the connector metadata for this step.

        This function should NOT modify fields in the scheduler_output.
        Also, calling this function will reset the state of the connector.

        Args:
            scheduler_output (SchedulerOutput): the scheduler output object.
        """
        # prefill to read block dict 适配batch query需要
        prefill_to_read_block_dict: dict[str, list[tuple[int, int]]] = {}
        prefill_to_save_block_dict: dict[int, int] = {}
        decode_to_save_block_dict: dict[int, int] = {}
        req_block_hash_map: dict[str, dict[str, list[int]]] = {
            HASH_ID: {},
            PARENT_HASH_ID: {},
            LOCAL_HASH_ID: {},
            VERIFY_HASH_ID: {}
        }

        # 1. update resource tracker
        # 1.1. remove finished reqs
        for req_id in scheduler_output.finished_req_ids:
            self._tracker.request_resource.pop(req_id, None)

        to_batch_query_dict: dict[str, list[int]] = {}
        prompt_token_num: int = 0
        # 1.2. update cached reqs
        prompt_token_num += self._update_cached_req_resource(scheduler_output, req_block_hash_map,
            to_batch_query_dict, decode_to_save_block_dict)          
        
        # 1.3. create new reqs
        prompt_token_num += self._create_new_req_resource(scheduler_output, req_block_hash_map, to_batch_query_dict)
        
        # 2. get prefetch result and update
        to_batch_query_list_key = list(to_batch_query_dict.keys())
        to_batch_query_list_value = list(to_batch_query_dict.values())
        
        if prompt_token_num > g_skip_prefetch_threshold:
            block_map = KvClientConnectorV1._get_block_hash_id(req_block_hash_map)

            layer_num, start_layer, end_layer = get_layer_info(self._vllm_config.model_config,
                self._vllm_config.parallel_config,)
            tp_num = self._vllm_config.parallel_config.tensor_parallel_size if \
                not self._vllm_config.model_config.use_mla else 1
            prefetch_prefix_cache_distributed(list(block_map.values()),
                self.kv_cache_block_size * tp_num * layer_num // (end_layer - start_layer), self._vllm_config)
            query_segments_list = batch_query_prefix_cache(to_batch_query_list_value)

            total_update_tokens_num , adjust_update_factor_num = self._update_connector_metadata_params(
                scheduler_output, to_batch_query_list_key, query_segments_list,
                prefill_to_read_block_dict, prefill_to_save_block_dict)            
            
            scheduler_output.total_num_scheduled_tokens -= total_update_tokens_num - adjust_update_factor_num

        kv_client_connector_metadata = KvClientConnectorMetadata(
            decode_to_save_block_dict=decode_to_save_block_dict,
            prefill_to_read_block_dict=prefill_to_read_block_dict,
            prefill_to_save_block_dict=prefill_to_save_block_dict,
            req_block_hash_map=req_block_hash_map,
            kv_cache_block_size=self.kv_cache_block_size
        )
        logger.debug(f"generated kv_client metadata: {kv_client_connector_metadata}")
        return kv_client_connector_metadata
