/*
 * Copyright (c) China Telecom Cloud Technology Co., Ltd. 2024-2025. All rights reserved.
 */
#include <cerrno>
#include <cstdint>
#include <cstring>
#include <fcntl.h>
#include <omp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <unistd.h>

#include "Common.h"
#include "Logger.h"
#include "WorkerTool.h"

#if defined(USE_GPU)
#include <cuda_runtime.h>
#endif


/**
 *! @brief 根据设备类型获取对应的设备类型字符串表示
 * 
 * @param type  设备枚举类型
 * 
 * @return std::string 设备类型字符串表达
 */
std::string getDeviceString(DeviceType type) {
    if (type == DeviceType::DEVICE_TYPE_CPU) {
        return "cpu";
    } else if (type == DeviceType::DEVICE_TYPE_GPU) {
        return "gpu";
    } else if (type == DeviceType::DEVICE_TYPE_NPU) {
        return "npu";
    } else {
        return "unknown";
    }
}



/**
 *! @brief 根据设备类型和传输方向，获得具体硬件下的传输方向表达
 * 
 */
#if defined(USE_GPU)
cudaMemcpyKind getCudaDirection(DIRECTION direction) {
    switch (direction) {
    case DEVICE_TO_HOST:
        return cudaMemcpyDeviceToHost;
    case HOST_TO_DEVICE:
        return cudaMemcpyHostToDevice;
    case DEVICE_TO_DEVICE:
        return cudaMemcpyDeviceToDevice;
    case HOST_TO_HOST:
        return cudaMemcpyHostToHost;
    default:
        return cudaMemcpyDefault;
    }
}
#elif defined(USE_NPU)
aclrtMemcpyKind getNpuDirection(DIRECTION direction) {
    switch (direction) {
    case DEVICE_TO_HOST:
        return ACL_MEMCPY_DEVICE_TO_HOST;
    case HOST_TO_DEVICE:
        return ACL_MEMCPY_HOST_TO_DEVICE;
    case DEVICE_TO_DEVICE:
        return ACL_MEMCPY_DEVICE_TO_DEVICE;
    case HOST_TO_HOST:
        return ACL_MEMCPY_HOST_TO_HOST;
    default:
        return ACL_MEMCPY_DEFAULT;
    }
}
#endif




/**
 *! @brief  同步流
 * 
 * @param type    设备类型
 * @param stream  流
 */
void workerDeviceSynchronize(DeviceType type, DeviceStream *stream) {
    if (type == DEVICE_TYPE_GPU) {
#if defined(USE_GPU)
        cudaStreamSynchronize(stream->cudaStream);
#else
        throw std::runtime_error(
            "If use gpu, please add -DDEVICE=GPU to cmake args");
#endif
    } else if (type == DEVICE_TYPE_NPU) {
#if defined(USE_NPU)
        aclrtSynchronizeStream(stream->npuStream);
#else
        throw std::runtime_error(
            "If use npu, please add -DDEVICE=NPU to cmake args");
#endif
    }
}


/**
 *! @brief 设置当前进程使用的设备id
 * 
 * @param type      设备类型
 * @param deviceId  设备id
 */
void workerSetDevice(DeviceType type, int deviceId) {
    if (type == DEVICE_TYPE_GPU) {
#if defined(USE_GPU)
        cudaSetDevice(deviceId);
#else
        throw std::runtime_error(
            "If use gpu, please add -DDEVICE=GPU to cmake args");
#endif
    } else if (type == DEVICE_TYPE_NPU) {
#if defined(USE_NPU)
        aclrtSetDevice(deviceId);
#else
        LOG_ERROR_FMT("If use npu, please add -DACL=1 to cmake args");
        throw std::runtime_error(
            "If use npu, please add -DDEVICE=NPU to cmake args");
#endif
    }
}


layerBuf &WorkerTool::getLayerBuf(int32_t idx) { 
    return _layerBufs[idx]; 
}



int32_t WorkerTool::initLayerBuf(bool pinned_memory) {
    // MAP_SHARED | MAP_ANONYMOUS主要用于父子进程间共享匿名物理内存区域
    char *memStartAddr =
        (char *)mmap(NULL, TOTAL_LAYER_BUF_SIZE, PROT_READ | PROT_WRITE,
                     MAP_SHARED | MAP_ANONYMOUS, -1, 0);
    if (memStartAddr == MAP_FAILED) {
        LOG_ERROR_FMT("mmap failed! size: {}", TOTAL_LAYER_BUF_SIZE);
        return KVC_ERR_FILEOP_FAIL;
    }

    // TODO(yangxianpku: 2025.08.19): MLA的场景也是按这套逻辑来的么？ 岂不是浪费很多空间？
    for (int32_t i = 0; i < LAYER_BUF_NUM; i++) {
        _layerBufs[i].keyBuf    = (uint8_t *)memStartAddr + i * LAYER_BUF_SIZE;
        _layerBufs[i].keyBufLen = SUB_BUF_SIZE;
        _layerBufs[i].valueBuf  =
            (uint8_t *)memStartAddr + i * LAYER_BUF_SIZE + SUB_BUF_SIZE;
        
        _layerBufs[i].valueBufLen   = SUB_BUF_SIZE;
        _layerBufs[i].pinned_memory = pinned_memory;
        _layerBufs[i].blkInfo.clear();

        _layerBufs[i].blockSize   = -1;
        _layerBufs[i].numHead     = -1;
        _layerBufs[i].headSize    = -1;
        _layerBufs[i].elementSize = -1;
        _layerBufs[i].layerIdx    = -1;
        _layerBufs[i].hasValue    = false;
        _layerBufs[i].type        = _deviceType;

        if (_deviceType == DeviceType::DEVICE_TYPE_GPU) {
#if defined(USE_GPU)
            cudaStreamCreate(&_layerBufs[i].stream.cudaStream);
#else
            throw std::runtime_error(
                "If use gpu, please add -DDEVICE=GPU to cmake args");
#endif
        } else if (_deviceType == DeviceType::DEVICE_TYPE_NPU) {
#if defined(USE_NPU)
            aclrtCreateStream(&_layerBufs[i].stream.npuStream);
#else
            LOG_ERROR_FMT("If use npu, please add -DACL=1 to cmake args");
            throw std::runtime_error(
                "If use npu, please add -DDEVICE=NPU to cmake args");
#endif
        }
    }

    return KVC_OK;
}




int32_t WorkerTool::destroyLayerBuf() {
    for (int32_t i = 0; i < LAYER_BUF_NUM; i++) {
        if (_layerBufs[i].pinned_memory) {
#if defined(USE_GPU)
            cudaError_t err = cudaHostUnregister(_layerBufs[i].keyBuf);
            if (err != cudaSuccess) {
                LOG_ERROR_FMT("CUDA error: {}, i={}", cudaGetErrorString(err),
                              i);
                throw std::runtime_error("cudaHostUnregister keybuf failed");
            }
            err = cudaHostUnregister(_layerBufs[i].valueBuf);
            if (err != cudaSuccess) {
                LOG_ERROR_FMT("CUDA error: {}, i={}", cudaGetErrorString(err),
                              i);
                throw std::runtime_error("cudaHostUnregister keybuf failed");
            }
#endif
        }

        if (_deviceType == DeviceType::DEVICE_TYPE_GPU) {
#if defined(USE_GPU)
            cudaStreamDestroy(_layerBufs[i].stream.cudaStream);
#else
            throw std::runtime_error(
                "If use gpu, please add -DDEVICE=GPU to cmake args");
#endif
        } else if (_deviceType == DeviceType::DEVICE_TYPE_NPU) {
#if defined(USE_NPU)
            aclrtDestroyStream(_layerBufs[i].stream.npuStream);
#else
            LOG_ERROR_FMT("If use npu, please add -DACL=1 to cmake args");
            throw std::runtime_error(
                "If use npu, please add -DDEVICE=NPU to cmake args");
#endif
        }
    }
    munmap(_layerBufs[0].keyBuf, TOTAL_LAYER_BUF_SIZE);
    return KVC_OK;
}


/**
 *! @brief 比较两块的物理块ID大小
 * 
 * @param lhs  left hand side  第一个比较块
 * @param rhs  right hand side 第二个比较块
 * @return bool
 */
inline bool phyBlkIdAscendCmp(BlkInfo &lhs, BlkInfo &rhs) {
    return lhs.phyBlkId < rhs.phyBlkId;
}



void prepareLayerBufObj(layerBuf &obj) {
    // 当读写第0层时，需要重新计算相关信息，后续读写无需刷新
    // TODO(yangxianpku: 2025.08.19): 如果保存非满块的话，就需要每块计算了

    // 单个token的Key或Value占用字节大小
    obj.tokenBytes = obj.numHead * obj.headSize * obj.elementSize;
    // 每块中所有token占用的Key或Value自己大小
    obj.blockBytes = obj.blockSize * obj.tokenBytes;


    // 当前层的K,V Cache在整个Sgement中的地址偏移
    if (obj.hasValue) {
        const int kvNum = 2;
        obj.keyPos   = kvNum * obj.layerIdx * obj.blockBytes;
        obj.valuePos = obj.keyPos + obj.blockBytes;
        // obj.valuePos = (kvNum * obj.layerIdx + 1) * obj.blockBytes;
    } else {
        obj.keyPos   = obj.layerIdx * obj.blockBytes;
        obj.valuePos = 0;
    }

    //! MLA的时候注意设置tpNum == 1
    if (obj.tpNum == 1) {
        obj.transferUnitSize = obj.blockBytes;
        obj.headOffset = 0;
    } else {
        // TODO(yangxianpku: 2025.08.19): 这里为什么是tokenBytes？
        obj.transferUnitSize = obj.tokenBytes / obj.tpNum;
        obj.headOffset = obj.tokenBytes * obj.tpRank / obj.tpNum;
    }

    std::vector<BlkInfo> &src = obj.blkInfo;
    sort(src.begin(), src.end(), phyBlkIdAscendCmp);

    obj.indexs.assign(1, 0);
    for (int32_t i = 1; i < src.size(); i++) {
        // 相邻两块不连续
        if (src[i].phyBlkId != src[i - 1].phyBlkId + 1) {
            obj.indexs.push_back(i);
        }
    }
    obj.indexs.push_back(src.size());

    return;
}



int32_t swapOneLayerWithoutTP(std::vector<Segment> segments,
                            const void *layer_cache_ptr,
                            std::vector<int32_t> const &blockIds,
                            DIRECTION direction, 
                            DeviceType type,
                            LayCacheInfo &info) {
    // TODO(yangxianpku: 2025.08.19): layerBuf不是有这些信息么，为什么这里重新计算？
    // info.transferUnitSize是模型总共的: block_size * num_kv_heads * head_size * element_size;
    // tpTransferSize 均摊到每个TP进程
    int32_t tpTransferSize = info.transferUnitSize / info.tpNum;

    // Segment KV 组织方式: [layer, 2, info.transferUnitSize]
    int32_t keyPos =
        (info.hasValue ? 2 : 1) * info.layerIdx * info.transferUnitSize +
        tpTransferSize * info.tpRank;

    int32_t valuePos = info.hasValue
                           ? (2 * info.layerIdx + 1) * info.transferUnitSize +
                                 tpTransferSize * info.tpRank
                           : 0;

    //! vllm KV Cache Shape: 
    //  - MLA:   (num_blocks, block_size, head_size) 
    //  - Other: (2, num_blocks, block_size, num_kv_heads, head_size)
    if (direction == DIRECTION::HOST_TO_DEVICE) {
        //! 按block传输
        for (int32_t i = 0; i < blockIds.size(); i++) {
            char *keyAddr = (char *)layer_cache_ptr + blockIds[i] * tpTransferSize;
            // segment keyPos中的数据拷贝到CPU/GPU/NPU的keyAddr中
            segments[i].copyTo(type, 
                            (void *)keyAddr, 
                            keyPos, 
                            tpTransferSize,
                            (void *)info.stream
                        );

            if (info.hasValue) {
                char *valueAddr = (char *)layer_cache_ptr +
                                    (info.numBlocks + blockIds[i]) * tpTransferSize;
                segments[i].copyTo(type, 
                                (void *)valueAddr, 
                                valuePos,
                                tpTransferSize, 
                                (void *)info.stream
                            );
            }
        }
    } else {
        for (int32_t i = 0; i < blockIds.size(); i++) {
            char *keyAddr =
                (char *)layer_cache_ptr + blockIds[i] * tpTransferSize;
            segments[i].writeIn(type, (void *)keyAddr, keyPos, tpTransferSize,
                                (void *)info.stream);

            if (info.hasValue) {
                char *valueAddr =
                    (char *)layer_cache_ptr +
                    (info.numBlocks + blockIds[i]) * tpTransferSize;
                segments[i].writeIn(type, (void *)valueAddr, valuePos,
                                    tpTransferSize, (void *)info.stream);
            }
        }
    }

    return KVC_OK;
}



// int32_t WorkerTool::swap(std::vector<Segment> &segments,
//                         std::vector<void *>  const &device_cache,
//                         std::vector<int32_t> const &blockIds,
//                         int32_t direction, 
//                         int32_t num_layer,
//                         int32_t num_blocks, 
//                         int32_t block_size,
//                         int32_t num_kv_heads, 
//                         int32_t head_size,
//                         int32_t element_size, 
//                         int32_t has_value) {
//     //! 单block的KV Cache大小，这里聚合了所有的头(无TP)
//     int32_t transferUnitSize =
//         block_size * num_kv_heads * head_size * element_size;

//     // TODO(yangxianpku: 2025.08.19): 简单点
//     DIRECTION dir;
//     switch (direction) {
//     case DEVICE_TYPE_GPU:
//     case DEVICE_TYPE_NPU:
//         dir = DIRECTION::HOST_TO_DEVICE;
//         break;
//     case -DEVICE_TYPE_GPU:
//     case -DEVICE_TYPE_NPU:
//         dir = DIRECTION::DEVICE_TO_HOST;
//         break;
//     default:
//         LOG_ERROR_FMT("invalid direction: {}", direction);
//         return KVC_ERR_INVALID_ARG;
//     }

//     LayCacheInfo info = {
//                 .numBlocks        = num_blocks,
//                 .transferUnitSize = transferUnitSize,
//                 .hasValue         = has_value,
//                 .stream           = nullptr
//             };

//     int32_t ret;
//     // 遍历每一层
//     for (int32_t i = 0; i < device_cache.size(); i++) {
//         const void *layer_cache_ptr = device_cache[i];
//         info.layerIdx   = i;
//         auto swap_start = getCurrentTimestampUs();
//         ret = swapOneLayerWithoutTP(segments, 
//                                 layer_cache_ptr, 
//                                 blockIds, 
//                                 dir,
//                                 _deviceType, 
//                                 info
//                             );
//         if (ret != KVC_OK) {
//             LOG_ERROR_FMT("swapOneLayerWithoutTP fail! layerIds: {}", i);
//             return ret;
//         }
//         LOG_DEBUG_FMT("swap one layer cost time : {} us: {}",
//                       getCurrentTimestampUs() - swap_start);
//     }

//     return KVC_OK;
// }




int32_t WorkerTool::updateLayerBufInfo(std::map<int32_t, Segment> &readIdxSegmentMap,
                                    std::map<int32_t, Segment> &writeIdxSegmentMap, 
                                    int32_t startLayer,
                                    int32_t blockSize, 
                                    int32_t numHead, 
                                    int32_t headSize, 
                                    int32_t elementSize,
                                    bool hasValue, 
                                    int32_t tpRank, 
                                    int32_t tpNum) {
    //! 更新读写layerBuf共同部分
    for (int32_t i = 0; i < LAYER_BUF_NUM; i++) {
        _layerBufs[i].blockSize   = blockSize;
        _layerBufs[i].numHead     = numHead;
        _layerBufs[i].headSize    = headSize;
        _layerBufs[i].elementSize = elementSize;
        _layerBufs[i].hasValue    = hasValue;
        _layerBufs[i].tpRank      = tpRank;
        _layerBufs[i].tpNum       = tpNum;
        _layerBufs[i].type        = _deviceType;
    }

    // 更新读layerBuf独有
    for (int32_t i = 0; i < READ_LAYER_BUF_NUM; i++) {
        _layerBufs[i].layerIdx = startLayer;
        _layerBufs[i].blkInfo.clear();

        for (auto pair : readIdxSegmentMap) {
            _layerBufs[i].blkInfo.emplace_back(pair.second.getSegmentId(),
                                               pair.first, 
                                               pair.second
                                            );
        }
    }

    // 更新写layerBuf独有
    for (int32_t i = READ_LAYER_BUF_NUM; i < LAYER_BUF_NUM; i++) {
        _layerBufs[i].layerIdx = startLayer;
        _layerBufs[i].blkInfo.clear();

        for (auto pair : writeIdxSegmentMap) {
            // 如果需要写下去的segment已经被写过，则不再重复写入
            // 未触发LRU淘汰时，对相同prompt做重复推理，decode产生的数据块会进入此分支
            if (pair.second.getIsFilled()) {
                continue;
            }
            _layerBufs[i].blkInfo.emplace_back(pair.second.getSegmentId(),
                                               pair.first, pair.second
                                            );
        }
    }
    return KVC_OK;
}




int32_t WorkerTool::adjustUpdateLayerBufInfo(
    std::map<int32_t, Segment> &prefillReadIdxSegmentMap,
    std::map<int32_t, Segment> &prefillWriteIdxSegmentMap,
    std::map<int32_t, Segment> &decodeWriteIdxSegmentMap, int32_t startLayer,
    int32_t blockSize, int32_t numHead, int32_t headSize, int32_t elementSize,
    bool hasValue, int32_t tpRank, int32_t tpNum) {
    // 更新读写layerBuf共同部分
    for (int32_t i = 0; i < LAYER_BUF_NUM; i++) {
        _layerBufs[i].blockSize = blockSize;
        _layerBufs[i].numHead = numHead;
        _layerBufs[i].headSize = headSize;
        _layerBufs[i].elementSize = elementSize;
        _layerBufs[i].hasValue = hasValue;
        _layerBufs[i].tpRank = tpRank;
        _layerBufs[i].tpNum = tpNum;
        _layerBufs[i].type = _deviceType;
    }

    // 更新读layerBuf独有
    for (int32_t i = 0; i < READ_LAYER_BUF_NUM; i++) {
        _layerBufs[i].layerIdx = startLayer;
        _layerBufs[i].blkInfo.clear();
        for (auto pair : prefillReadIdxSegmentMap) {
            _layerBufs[i].blkInfo.emplace_back(pair.second.getSegmentId(),
                                               pair.first, pair.second);
        }
    }

    // 更新写layerBuf独有
    // prefill
    _layerBufs[READ_LAYER_BUF_NUM].layerIdx = startLayer;
    _layerBufs[READ_LAYER_BUF_NUM].blkInfo.clear();
    for (auto pair : prefillWriteIdxSegmentMap) {
        _layerBufs[READ_LAYER_BUF_NUM].blkInfo.emplace_back(
            pair.second.getSegmentId(), pair.first, pair.second);
    }
    // decode
    _layerBufs[READ_LAYER_BUF_NUM + WRITE_PREFILL_LAYER_BUF_NUM].layerIdx =
        startLayer;
    _layerBufs[READ_LAYER_BUF_NUM + WRITE_PREFILL_LAYER_BUF_NUM]
        .blkInfo.clear();
    for (auto pair : decodeWriteIdxSegmentMap) {
        _layerBufs[READ_LAYER_BUF_NUM + WRITE_PREFILL_LAYER_BUF_NUM]
            .blkInfo.emplace_back(pair.second.getSegmentId(), pair.first,
                                  pair.second);
    }
    return KVC_OK;
}




void WorkerTool::waitLayerFillFinish() {
    if (_execLayerCnt >= _totalLayerNum) {
        return;
    }

    // sem_wait 的主要功能是对信号量进行 P 操作（等待/获取）：
    //  - 如果信号量值 > 0：立即将信号量值减1，函数立即返回
    //  - 如果信号量值 = 0：阻塞调用线程，直到信号量值变为大于0
    sem_wait(&_readSem);
    _execLayerCnt++;
}




void checkAndSetDevice(DeviceType type, int deviceId) {
    if (deviceId == -1) {
        LOG_ERROR_FMT("Invalid deviceId(-1), please "
                    "set workertool deviceId");
    }
    workerSetDevice(type, deviceId);
    return;
}



int32_t WorkerTool::startLayerFill(std::vector<void *> &inKvCachePtrs,
                                   int32_t inNumBlocks) {
    _threadPool.submit([this, kvCachePtrs = inKvCachePtrs,
                        numBlocks = inNumBlocks]() {
        // set device
        // TODO(yangxianpku：2025.09.01)：无需每次都执行一次
        checkAndSetDevice(this->_deviceType, this->_deviceId);

        uint32_t layerNum  = kvCachePtrs.size();
        int32_t startLayer = _layerBufs[0].layerIdx;
        uint32_t currentReadPos  = 0; // the other start memcpy to cuda
        int32_t transferUnitSize = _layerBufs[currentReadPos].blockSize *
                                   _layerBufs[currentReadPos].numHead *
                                   _layerBufs[currentReadPos].headSize *
                                   _layerBufs[currentReadPos].elementSize;

        _execLayerCnt  = 0;
        _totalLayerNum = layerNum;

        if (_layerBufs[currentReadPos].blkInfo.empty()) {
            for (int i = 0; i < layerNum; ++i) {
                // 信号量值加1，可能唤醒等待线程
                sem_post(&_readSem);
            }
            return KVC_OK;
        }
        std::vector<std::vector<int32_t>> kvCacheIdxs(READ_LAYER_BUF_NUM,
                                                      std::vector<int32_t>());
        std::vector<std::vector<Segment>> segments(READ_LAYER_BUF_NUM,
                                                   std::vector<Segment>());

         // TODO(yangxianpku：2025.09.01)：为什么要使用READ_LAYER_BUF_NUM=2 ?
        for (int32_t idx = 0; idx < READ_LAYER_BUF_NUM; idx++) {
            for (BlkInfo blkInfo : _layerBufs[idx].blkInfo) {
                kvCacheIdxs[idx].push_back(blkInfo.phyBlkId);
                segments[idx].push_back(blkInfo.segment);
            }
        }

        for (int32_t idx = 0; idx < layerNum; idx++) {
            // 走简单流程，DRAM直接换入GPU
            LayCacheInfo info = {
                                .layerIdx         = startLayer + idx,
                                .numBlocks        = numBlocks,
                                .transferUnitSize = transferUnitSize,
                                .hasValue         = _layerBufs[currentReadPos].hasValue,
                                .tpNum            = _layerBufs[currentReadPos].tpNum,
                                .tpRank           = _layerBufs[currentReadPos].tpRank,
                                .stream           = &_layerBufs[currentReadPos].stream
                            };
            if (kvCacheIdxs[currentReadPos].size() > 0) {
                uint64_t start = getCurrentTimestampUs();
                swapOneLayerWithoutTP(segments[currentReadPos], 
                                    kvCachePtrs[idx],
                                    kvCacheIdxs[currentReadPos], 
                                    DIRECTION::HOST_TO_DEVICE,
                                    _deviceType, 
                                    info
                                );
                workerDeviceSynchronize(_deviceType, info.stream);
                uint64_t timecost = getCurrentTimestampUs() - start;
                LOG_DEBUG_FMT("swap one layer cost time : {} us layer idx : {}",
                              timecost, info.layerIdx);

                int64_t totalBytes =
                    kvCacheIdxs[currentReadPos].size() * transferUnitSize * 2;
                double bandwidth = ((double)totalBytes / 1024 / 1024) /
                                   ((double)timecost / 1000000); // MiB/s
            }
            // 信号量值加1，可能唤醒等待线程
            sem_post(&_readSem);
        }

        return KVC_OK;
    });
    return KVC_OK;
}




int32_t WorkerTool::singleLayerWrite(void *kvCachePtr,  
                                    int32_t numBlocks,
                                    void *event) {
    LOG_DEBUG_FMT("singleLayerWrite called with numBlocks: {}", numBlocks);
    int32_t currentWritePos = LAYER_BUF_NUM - 2;  // 2


    // TODO(yangxianpku: 2025.09.01): 为什么这里使用currentWritePos=2 ？？？
    int32_t transferUnitSize = _layerBufs[currentWritePos].blockSize *
                               _layerBufs[currentWritePos].numHead *
                               _layerBufs[currentWritePos].headSize *
                               _layerBufs[currentWritePos].elementSize;
    uint64_t start = getCurrentTimestampUs();

    std::vector<int32_t> kvCacheIdxs;
    std::vector<Segment> segments;

    if (_layerBufs[currentWritePos].blkInfo.empty()) {
        return KVC_OK;
    }

    // TODO(yangxianpku: 2025.09.01): 单独拎出去到构造函数中, 无需每次都设置
    workerSetDevice(_deviceType, _deviceId);

    for (BlkInfo blkInfo : _layerBufs[currentWritePos].blkInfo) {
        kvCacheIdxs.push_back(blkInfo.phyBlkId);
        segments.push_back(blkInfo.segment);
    }

    // 执行buffer读出
    // 走简单流程
    LayCacheInfo info = {
                        .layerIdx         = _layerBufs[currentWritePos].layerIdx,
                        .numBlocks        = numBlocks,
                        .transferUnitSize = transferUnitSize,
                        .hasValue         = _layerBufs[currentWritePos].hasValue,
                        .tpNum            = _layerBufs[currentWritePos].tpNum,
                        .tpRank           = _layerBufs[currentWritePos].tpRank,
                        .stream           = &_layerBufs[currentWritePos].stream
                    };

    if (event != nullptr) {
#if defined(USE_NPU)
        aclrtEvent waitEvent = *(aclrtEvent *)event;
        aclrtStreamWaitEvent(info.stream->npuStream, event);
#endif
    }

    LOG_DEBUG_FMT("single layer write start layerIdx : {}", info.layerIdx);

    auto swap_start = getCurrentTimestampUs();
    swapOneLayerWithoutTP(segments, 
                        kvCachePtr, 
                        kvCacheIdxs,
                        DIRECTION::DEVICE_TO_HOST, 
                        _deviceType, 
                        info
                    );
    LOG_DEBUG_FMT("single layer write layerIdx({}) cost time : {}",
                  info.layerIdx, getCurrentTimestampUs() - swap_start);
    
    if (info.layerIdx == _totalLayerNum - 1) {
        workerDeviceSynchronize(_deviceType,
                                &_layerBufs[currentWritePos].stream);
        LOG_DEBUG_FMT("last layer write layerIdx({}) end", info.layerIdx);
    }
    _layerBufs[currentWritePos].layerIdx++;

    return KVC_OK;
}




int32_t WorkerTool::allLayerWrite(std::vector<void *> &kvCachePtrs,
                                  int32_t numBlocks) {
    LOG_DEBUG_FMT("allLayerWrite start numBlocks: {}", numBlocks);
    auto start = getCurrentTimestampUs();

    //! decode阶段的KV Cache下刷
    int32_t currentWritePos  = LAYER_BUF_NUM - 1;  
    int32_t transferUnitSize = _layerBufs[currentWritePos].blockSize *
                               _layerBufs[currentWritePos].numHead *
                               _layerBufs[currentWritePos].headSize *
                               _layerBufs[currentWritePos].elementSize;

    if (_layerBufs[currentWritePos].blkInfo.empty()) {
        return KVC_OK;
    }

    workerSetDevice(_deviceType, _deviceId);

    std::vector<int32_t> kvCacheIdxs;
    std::vector<Segment> segments;

    for (BlkInfo blkInfo : _layerBufs[currentWritePos].blkInfo) {
        kvCacheIdxs.push_back(blkInfo.phyBlkId);
        segments.push_back(blkInfo.segment);
    }

    for (int32_t idx = 0; idx < kvCachePtrs.size(); idx++) {
        // 执行buffer读出
        // 走简单流程
        LayCacheInfo info = {.layerIdx         = _layerBufs[currentWritePos].layerIdx,
                             .numBlocks        = numBlocks,
                             .transferUnitSize = transferUnitSize,
                             .hasValue         = _layerBufs[currentWritePos].hasValue,
                             .tpNum            = _layerBufs[currentWritePos].tpNum,
                             .tpRank           = _layerBufs[currentWritePos].tpRank,
                             .stream           = &_layerBufs[currentWritePos].stream};

        auto layerWriteStart = getCurrentTimestampUs();
        swapOneLayerWithoutTP(segments, 
                            kvCachePtrs[idx], 
                            kvCacheIdxs,
                            DIRECTION::DEVICE_TO_HOST, 
                            _deviceType, 
                            info
                        );
        workerDeviceSynchronize(_deviceType,
                                &_layerBufs[currentWritePos].stream);
        auto layerWriteEnd = getCurrentTimestampUs();

        LOG_DEBUG_FMT("allLayerWrite layerIdx: {} cost time: {} us",
                      info.layerIdx, layerWriteEnd - layerWriteStart);
        _layerBufs[currentWritePos].layerIdx++;
    }
    auto end = getCurrentTimestampUs();

    LOG_DEBUG_FMT("allLayerWrite end cost time : {} us", end - start);

    return KVC_OK;
}
