
/*
 * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 * SPDX-License-Identifier: Apache-2.0
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

// we have blockNums Block, which is 3D  [PPs,TPs,(BlockIDs in one rank) tokens/tokens_per_block]

// input [PPs,TPs, BlockS] Block
// output [Blocks]Block. but each block has same tokens_per_block. so we can ignore tokens_per_block

#pragma once

#include "tensorrt_llm/executor/dataTransceiverState.h"
#include "tensorrt_llm/runtime/bufferManager.h"
#include "tensorrt_llm/runtime/iTensor.h"

#include <NvInferRuntimeBase.h>

namespace tensorrt_llm::executor::kv_cache
{

struct TargetRanksInfo
{
    int mDomainPPSize;
    int mDomainTPSize;
    int mDomainCPSize;
    std::vector<int> mIRanks;
    int mDupHeadFactor;
    int mPeerDupHeadFactor;

    // the size of the vector is equal to the mDomainPPSize. the value of the vector is the layer num should be fetched
    // from each target PP rank in domain PP.
    std::vector<int> mPeerAttentionLayerNumInDomainPP;

    int getPeerPPDomainLayerNum(int targetRankIdx) const
    {
        int ppDomainRankIdx = targetRankIdx % mDomainPPSize;
        return mPeerAttentionLayerNumInDomainPP.at(ppDomainRankIdx);
    }
};

TargetRanksInfo targetIRanks(
    kv_cache::CacheState const& peerCacheState, kv_cache::CacheState const& selfCacheState, int selfRank);

TargetRanksInfo TargetRanksInfoForDP(
    kv_cache::CacheState const& peerCacheState, kv_cache::CacheState const& selfCacheState, int selfRank);

/**
 * @brief Calculate the number of blocks allocated to a specific Context Parallelism (CP) rank.
 *
 * This function determines how many blocks should be allocated to a given CP rank when
 * distributing a total number of blocks across multiple CP ranks.
 *
 * @param cpRank The rank (index) of the current CP process. Must be in range [0, cpSize).
 * @param cpSize The total number of CP ranks/processes in the parallel group.
 * @param numTotalBlocks The total number of blocks to be distributed across all CP ranks.
 *
 * @return The number of blocks allocated to the specified CP rank.
 */
int getBlockNumAccountingForCP(int cpRank, int cpSize, int numTotalBlocks);

/**
 * @brief Convert a local block index to a global block ID when Context Parallelism (CP) is enabled.
 *
 * This function maps a local block index (within a specific CP rank) to its corresponding
 * global block ID across all CP ranks. It supports two distribution strategies controlled
 * by the environment variable TRTLLM_USE_ROUND_ROBIN_BLOCK_DIST_FOR_CP.
 *
 * @param localBlockIdx The local block index within the current CP rank (0-based).
 * @param cpSize The total number of CP ranks in the parallel group.
 * @param cpRank The rank of the current CP process. Must be in range [0, cpSize).
 * @param numTotalBlocks The total number of blocks distributed across all CP ranks.
 *
 * @return The global block ID corresponding to the local block index.
 */
int getGlobalBlockIdAccountingForCP(int localBlockIdx, int cpSize, int cpRank, int numTotalBlocks);

void concatKVCacheDispatch(runtime::ITensor::SharedPtr* inputBlocks, int inputBlockNum,
    std::vector<int> const& inputRanks, kv_cache::CacheState const& peerCacheState,
    runtime::ITensor::SharedPtr* outputBlocks, int outputBlockNum, int selfRank,
    kv_cache::CacheState const& selfCacheState, runtime::BufferManager const& bufferManager);

nvinfer1::Dims makeShapeFromCacheState(kv_cache::CacheState const& cacheState);

void splitKVCacheDispatch(std::map<SizeType32, std::vector<runtime::ITensor::SharedPtr>> const& kVCacheBlocksPerWindow,
    std::vector<runtime::ITensor::SharedPtr>& ouputSplitBlocks, kv_cache::CacheState const& peerCacheState,
    kv_cache::CacheState const& selfCacheState, int selfIdx, runtime::BufferManager const& bufferManager,
    bool isIndexerKCache = false);

void concatKvCacheV2Dispatch(std::vector<runtime::ITensor::SharedPtr> const& inputSplitBlocksPerWindow,
    std::map<SizeType32, std::vector<runtime::ITensor::SharedPtr>>& outputKvCacheBlocksPerWindow,
    kv_cache::CacheState const& peerCacheState, kv_cache::CacheState const& selfCacheState, int selfIdx,
    runtime::BufferManager const& bufferManager, bool isIndexerKCache = false);

} // namespace tensorrt_llm::executor::kv_cache
