/*
 * Copyright (c) China Telecom Cloud Technology Co., Ltd. 2024-2025. All rights reserved.
 */
#include "../include/KVCacheSDK.h"
#include "Common.h"
#include <cerrno>
#include <fcntl.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <unistd.h>

#if defined(USE_GPU)
#include <cuda_runtime.h>
#endif

KVCacheSDK::~KVCacheSDK() {
    for (auto it : shmMapInfo) {
        auto memSize = it.first;
        auto info = it.second;
        munmap(info.addr, info.len);
    }
}



int32_t KVCacheSDK::CheckAndCreateShm(uint32_t memSize, 
                                std::string shmPath) {
    auto iter         = shmMapInfo.find(memSize);
    uint8_t *mmapAddr = nullptr;
    uint64_t shmSize  = -1;

    if (iter != shmMapInfo.end()) {
        // shmPath未改变
        if (iter->second.path == shmPath) {
            return KVC_OK;
        }

        // shmPath如改变，需要munmap旧路径
        munmap(iter->second.addr, iter->second.len);

        printf("shmMapInfo erase memSize(%u) path({%s} -> {%s})", memSize,
               iter->second.path.c_str(), shmPath.c_str());
        shmMapInfo.erase(memSize);   // remove key：value pair
    }

    // 如果没找到，重新mmap新路径
    int32_t fd = open(shmPath.c_str(), O_RDWR);
    struct stat fileStat;
    if (fstat(fd, &fileStat) == -1) {
        close(fd);
        throw std::runtime_error("stat failed");
    }

    auto start = getCurrentTimestampUs();

    DeviceType type =
        isCuda ? DeviceType::DEVICE_TYPE_GPU : DeviceType::DEVICE_TYPE_NPU;
    // NPU直接在mmap时LOCK整个pool
    int mapFlags = MAP_SHARED | MAP_HUGETLB;

    if (type == DeviceType::DEVICE_TYPE_NPU) {
        mapFlags |= MAP_LOCKED;
    }
    mmapAddr = (uint8_t *)mmap(nullptr, fileStat.st_size,
                               PROT_READ | PROT_WRITE, 
                               mapFlags, 
                               fd, 
                               0
                            );
    close(fd);
    if (mmapAddr == MAP_FAILED) {
        throw std::runtime_error("Map shared memory failed");
    }

    shmSize = fileStat.st_size;
    shmMapInfo[memSize] = SDKMapInfo(shmPath, mmapAddr, shmSize);


    // GPU需要单独注册pinned memory
    // TODO(yangxianpku: 2025.08.19)： GPU不支持直接使用mmap进行pin么？
    if (type == DEVICE_TYPE_GPU) {
#if defined(USE_GPU)
        cudaError_t err =
            cudaHostRegister(mmapAddr, shmSize, cudaHostRegisterDefault);
        std::cout << "err = " << err << "\n";
        if (err != cudaSuccess && err != cudaErrorHostMemoryAlreadyRegistered) {
            throw std::runtime_error("cudaHostRegister failed");
        }
#else
        throw std::runtime_error(
            "If use gpu, please add -DDEVICE=GPU to cmake args");
#endif
    }
    return KVC_OK;
}



int32_t KVCacheSDK::sealMemory(const VecT<int64_t> &blockHashIds,
                               int32_t tpNum, 
                               bool doSave) {
    sealMemoryRequest request;
    for (auto id : blockHashIds) {
        request.add_hashids(id);
    }

    request.set_tpnum(tpNum);
    request.set_dosave(doSave);

    sealMemoryResponse response;
    grpc::ClientContext context;

    auto status = _stub->sealMemory(&context, request, &response);

    // RPC失败时，response.result()不可信，直接替换为KVC_OK，避免阻塞推理流程
    if (!status.ok()) {
        std::cerr << "[sealMemory] RPC failed!" << std::endl;
        return KVC_OK; 
    }
    return KVC_OK;
}



int32_t KVCacheSDK::PrefetchPrefixCache(const int32_t memSize,
                                const std::vector<std::vector<BlockHashId>> &seqs_blockHashIds,
                                const int32_t startLayer, 
                                const int32_t endLayer, 
                                const int32_t layerNum,
                                const int32_t layerCacheSize, 
                                int32_t tpNum) {
    PrefetchPrefixCacheRequest request;
    for (auto seq : seqs_blockHashIds) {
        auto entry = request.add_entries();
        for (auto id : seq) {
            auto blockHashId = entry->add_blockhashids();
            blockHashId->set_hashid(id.hashId);
            blockHashId->set_parenthashid(id.parentHashId);
            blockHashId->set_localhashid(id.localHashId);
            blockHashId->set_verifyhashid(id.verifyHashId, VERIFY_HASH_LENGTH);
        }
    }
    request.set_memsize(memSize);
    request.set_tpnum(tpNum);

    request.mutable_cacheinfo()->set_startlayer(startLayer);
    request.mutable_cacheinfo()->set_endlayer(endLayer);
    request.mutable_cacheinfo()->set_layernum(layerNum);
    request.mutable_cacheinfo()->set_layercachesize(layerCacheSize);

    PrefetchPrefixCacheResponse response;
    grpc::ClientContext context;

    auto status = _stub->PrefetchPrefixCache(&context, request, &response);
    if (!status.ok()) {
        std::cerr << "[PrefetchPrefixCache] RPC failed!" << std::endl;
        return KVC_OK; // RPC失败时，response.result()不可信，直接替换为KVC_OK，避免阻塞推理流程
    }
    return KVC_OK;
}



int32_t KVCacheSDK::PrefetchPrefixCache(const int32_t memSize,
                            const std::vector<std::vector<int64_t>> &seqs_hashIds,
                            const int32_t startLayer, 
                            const int32_t endLayer,
                            const int32_t layerNum, 
                            const int32_t layerCacheSize,
                            int32_t tpNum
                        ) {
    std::vector<std::vector<BlockHashId>> seqs_blockHashIds;
    for (auto seq_hashIds : seqs_hashIds) {
        std::vector<BlockHashId> seq_blockHashIds;
        for (int64_t hashId : seq_hashIds) {
            seq_blockHashIds.emplace_back(hashId, 0, 0);
        }
        seqs_blockHashIds.push_back(seq_blockHashIds);
    }

    return PrefetchPrefixCache(memSize, 
                            seqs_blockHashIds, 
                            startLayer, 
                            endLayer,
                            layerNum, 
                            layerCacheSize, 
                            tpNum
                        );
}



std::vector<std::vector<Segment>> KVCacheSDK::BatchQueryPrefixCache(
    std::vector<std::vector<int64_t>> hashIdsList) {
    // 创建一个RPC请求的request参数
    kvcache::BatchQueryPrefixCacheRequest request;


    //! 1. 添加请求参数
    int32_t totalQueryCount = 0;
    for (auto it = hashIdsList.begin(); it != hashIdsList.end(); it++) {
        kvcache::QueryHashIds *queryHashIds = request.add_queries();
        for (auto hashId : *it) {
            queryHashIds->add_hashids(hashId);
        }
        totalQueryCount += it->size();
    }

    std::vector<std::vector<Segment>> returnList;
    // query全为空时不走grpc，直接包装空返回
    if (totalQueryCount == 0) {
        for (auto hashIds : hashIdsList) {
            returnList.emplace_back();
        }
        return returnList;
    }

    kvcache::BatchQueryPrefixCacheResponse response;
    grpc::ClientContext context;  //  创建默认的上下文

    auto status = _stub->BatchQueryPrefixCache(&context, 
                                            request, 
                                            &response
                                        );

    if (status.ok()) {
        size_t memSize      = response.memsize();
        std::string shmPath = response.shmpath();

        // 检查特定大小的共享内存文件是否存在且与shmPath保持一致, 如果存在且文件
        // 一致则直接返回, 否则unmap原来的，并重新创建
        CheckAndCreateShm(memSize, shmPath);

        // 装配segment向量
        //  [DEBUG]
        auto totalRegisterTime = 0;
        for (auto segmentAttrs : response.attrlist()) {
            std::vector<Segment> returnVec;
            for (auto attr : segmentAttrs.attr()) {
                uint8_t *addr = shmMapInfo[memSize].addr + attr.offset();
                returnVec.emplace_back(attr.segmentid(), 
                                    attr.offset(), 
                                    addr, 
                                    memSize, 
                                    true,
                                    shmPath, 
                                    attr.writeinrecordsize(), 
                                    attr.startlayer(),
                                    attr.endlayer(), 
                                    attr.layernum(), 
                                    attr.layercachesize()
                                );
            }
            returnList.push_back(returnVec);
        }
    } else {
        // RPC调用失败时，仅打印失败日志，返回空segment数组，视为kvclient未命中
        std::cerr << "[BatchQueryPrefixCache] RPC failed!" << std::endl;
        for (auto hashIds : hashIdsList) {
            returnList.emplace_back();
        }
    }
    return returnList;
}



std::vector<Segment> KVCacheSDK::AllocateMemory(
    const uint32_t memSize, const std::vector<BlockHashId> &blockHashIds,
    const int32_t writeInRecordSize, const int32_t startLayer,
    const int32_t endLayer, const int32_t layerNum,
    const int32_t layerCacheSize) {
    auto iter = this->shmMapInfo.find(memSize);
    if (iter == this->shmMapInfo.end()) {
        // allocate未发现对应shm即认为kvclient-service异常
        std::cerr << "[AllocateMemory] Service not correctly init!"
                  << std::endl;
        return std::vector<Segment>();
    }

    kvcache::AllocateMemoryRequest request;
    for (auto blockHashId : blockHashIds) {
        auto blockHashIdAttr = request.add_blockhashidattrs();
        blockHashIdAttr->set_hashid(blockHashId.hashId);
        blockHashIdAttr->set_parenthashid(blockHashId.parentHashId);
        blockHashIdAttr->set_localhashid(blockHashId.localHashId);
        blockHashIdAttr->set_verifyhashid(blockHashId.verifyHashId,
                                          VERIFY_HASH_LENGTH);
    }
    request.set_memsize(memSize);
    request.set_writeinrecordsize(writeInRecordSize);

    request.mutable_cacheinfo()->set_startlayer(startLayer);
    request.mutable_cacheinfo()->set_endlayer(endLayer);
    request.mutable_cacheinfo()->set_layernum(layerNum);
    request.mutable_cacheinfo()->set_layercachesize(layerCacheSize);

    kvcache::AllocateMemoryResponse response;
    grpc::ClientContext context;

    auto status = _stub->AllocateMemory(&context, request, &response);
    if (!status.ok()) {
        // RPC调用失败时，仅打印失败日志，返回空segment数组，视为申请失败，无需刷盘
        std::cerr << "[AllocateMemory] RPC failed!" << std::endl;
        return std::vector<Segment>();
    }

    std::string shmPath = response.shmpath();

    std::vector<Segment> returnSegments;
    uint64_t totalResgiserTime = 0;
    for (int32_t i = 0; i < response.segments_size(); i++) {
        auto segmentInfo = response.segments(i);
        uint8_t *addr = this->shmMapInfo[memSize].addr + segmentInfo.offset();
        returnSegments.push_back(
            Segment(segmentInfo.segmentid(), segmentInfo.offset(), addr,
                    memSize, segmentInfo.isfilled(), shmPath, writeInRecordSize,
                    startLayer, endLayer, layerNum, layerCacheSize));
    }

    return returnSegments;
}



std::vector<Segment> KVCacheSDK::AllocateMemory(
    const uint32_t memSize, const std::vector<int64_t> &hashIds,
    const int32_t writeInRecordSize, const int32_t startLayer,
    const int32_t endLayer, const int32_t layerNum,
    const int32_t layerCacheSize) {
    std::vector<BlockHashId> blockHashIds;
    for (int64_t hashId : hashIds) {
        blockHashIds.emplace_back(hashId, 0, 0);
    }
    return AllocateMemory(memSize, blockHashIds, writeInRecordSize, startLayer,
                          endLayer, layerNum, layerCacheSize);
}



void KVCacheSDK::LockMemoryPool(const uint32_t memSize, DeviceType type) {
    kvcache::GetShmPathRequest request;
    request.set_memsize(memSize);
    kvcache::GetShmPathResponse response;
    // TODO(yangxianpku: 2025.09.01): 是否可以全局共享
    grpc::ClientContext context;

    // 从服务端获取特定大小共享内存文件路径
    auto status = _stub->GetShmPath(&context, 
                                    request, 
                                    &response
                                );
    if (!status.ok()) {
        std::cerr << "[LockMemoryPool] RPC failed!" << std::endl;
        return;
    }
    std::string shmPath = response.shmpath();
    CheckAndCreateShm(memSize, shmPath);
}



bool KVCacheSDK::CheckIfKVClientReady() {
    kvcache::CheckIfServiceReadyRequest request;
    kvcache::CheckIfServiceReadyResponse response;
    grpc::ClientContext context;
    auto status = _stub->CheckIfServiceReady(&context, request, &response);
    if (!status.ok()) {
        std::cerr << "[CheckIfKVClientReady] RPC failed!" << std::endl;
        return false;
    }
    return response.isready();
}



void KVCacheSDK::CleanMemoryPool(const int32_t memSize) {
    kvcache::CleanMemoryPoolRequest request;
    request.set_memsize(memSize);
    kvcache::CleanMemoryPoolResponse response;
    grpc::ClientContext context;

    auto status = _stub->CleanMemoryPool(&context, request, &response);
    if (!status.ok()) {
        std::cerr << "[CleanMemoryPool] RPC failed!" << std::endl;
        return;
    }
    return;
}