/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 * Description: hdds op remote update execute.
 * Author: qingjicheng
 * Create: 2022-11-28
 */

#include "op_hdcs_remote_update_executor.h"
#include "log.h"
#include "adapter_hdds.h"
#include "adapter_hal.h"
#include "dtype_common.h"
#include "transport_shm_event_pub.h"
#include "hccl/hccl_ex.h"
#include "../../../../hccl_heterog/rpc/hccl_comm_rpc.h"
#include "../hccl_heterog/rpc/hcom_rpc.h"

using namespace std;

namespace hccl {
constexpr u32 PS_INVALID_VALUE = 65536;
// 性能优化调参
constexpr u32 EACH_PS_KEY_NUM = 10;

HdcsOpRemoteUpdateExecutor::HdcsOpRemoteUpdateExecutor() : taskExeFuncTable_({
    {OpTaskType::UPDATE, std::bind(&HdcsOpRemoteUpdateExecutor::TaskUpdate, this, std::placeholders::_1)},
    {OpTaskType::REDUCE_SUM, std::bind(&HdcsOpRemoteUpdateExecutor::TaskUpdateReduceSum, this, std::placeholders::_1)},
    {OpTaskType::KEY_REDUCE, std::bind(&HdcsOpRemoteUpdateExecutor::TaskUpdateKeyReduce, this, std::placeholders::_1)},
    {OpTaskType::SEND_REQUEST, std::bind(&HdcsOpRemoteUpdateExecutor::TaskSendRequest, this, std::placeholders::_1)},
    {OpTaskType::RESET_UNIQUE_HANDLE, std::bind(&HdcsOpRemoteUpdateExecutor::TaskResetUniqueHandle, this,
        std::placeholders::_1)},
    {OpTaskType::RECV_RESPONSE, std::bind(&HdcsOpRemoteUpdateExecutor::TaskRecvResponse, this, std::placeholders::_1)}
})
{}

HdcsOpRemoteUpdateExecutor::~HdcsOpRemoteUpdateExecutor()
{
    DeInit();
}

HcclResult HdcsOpRemoteUpdateExecutor::Init(std::vector<u32> &psRankId)
{
    firstMemset_ = true;
    psRankId_ = psRankId;
    auto maxValue = *max_element(psRankId_.begin(), psRankId_.end());
    rankId2PsId_.insert(rankId2PsId_.begin(), maxValue + 1, PS_INVALID_VALUE);
    psSize_ = psRankId_.size();
    for (u32 i = 0; i < psSize_; i++) {
        rankId2PsId_[psRankId_[i]] = i;
    }
    psKeyNum_.resize(psSize_, 0);
    psOffset_.resize(psSize_, 0);
    psOffsetForShard_.resize(psSize_);
    psKeyNumForShard_.resize(psSize_);

#ifdef CCL_KERNEL
    parallelExecCpuNum_ = GetCPUNum();
    vectorReduceParams_.resize(parallelExecCpuNum_);
    realKeyMapForShard_.resize(parallelExecCpuNum_);
    threadkeyNumForShard_.resize(parallelExecCpuNum_);
    for (u32 i = 0; i < psSize_; ++i) {
        psKeyNumForShard_[i].resize(parallelExecCpuNum_);
        psOffsetForShard_[i].resize(parallelExecCpuNum_);
    }
#endif
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::DeInit()
{
    psRankId_.clear();
    if (psIdsMem_ != nullptr) {
        delete[] psIdsMem_;
        psIdsMem_ = nullptr;
    }
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ExecuteOp(HcclOpPtr opPtr)
{
    CHK_PTR_NULL(opPtr);
    HcclOperator<EmbeddingServiceParam> *ptr = static_cast<HcclOperator<EmbeddingServiceParam> *>(opPtr);

    if (ptr->param_.keyMaxNum > keyMaxNum_) {
        if (psIdsMem_ != nullptr) {
            delete[] psIdsMem_;
            psIdsMem_ = nullptr;
        }

        keyMaxNum_ = ptr->param_.keyMaxNum;
        psIdsMem_ = new (std::nothrow) u32[keyMaxNum_];
        CHK_PTR_NULL(psIdsMem_);
        HCCL_INFO("update tag[%d] alloc new mem, size[%u]", ptr->param_.tag, keyMaxNum_);
    }

    CHK_RET(ExecuteTask(ptr));

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ExecuteTask(HcclOperator<EmbeddingServiceParam> *opPtr)
{
    auto &taskQue = GetTaskQue<EmbeddingServiceParam>(opPtr);
    while (!taskQue.empty()) {
        CHK_RET(taskExeFuncTable_[taskQue.front()->taskType](taskQue.front()));
        taskQue.pop();
    }
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::TaskUpdate(OpTaskPtr &task)
{
    ReduceSumTask *taskPtr = dynamic_cast<ReduceSumTask *>(task.get());
    CHK_PTR_NULL(taskPtr->keyTransferMem);
    CHK_PTR_NULL(taskPtr->valueTransferMem);
    CHK_PTR_NULL(taskPtr->keys);
    CHK_PTR_NULL(taskPtr->value);

    CHK_RET(ReduceSum(taskPtr));

    CHK_RET(UpdateDataSend(taskPtr));

    CHK_RET(WaitUpdateFinish(taskPtr));
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::TaskUpdateReduceSum(OpTaskPtr &task)
{
    ReduceSumTask *taskPtr = dynamic_cast<ReduceSumTask *>(task.get());
    CHK_PTR_NULL(taskPtr->keyTransferMem);
    CHK_PTR_NULL(taskPtr->valueTransferMem);
    CHK_PTR_NULL(taskPtr->keys);
    CHK_PTR_NULL(taskPtr->value);

    CHK_RET(ReduceSum(taskPtr));
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::KeyReduceWithoutMappingMatrix(KeyReduceTask &params)
{
    CHK_RET(hrtGetBatchPsIds(reinterpret_cast<u64 *>(params.keys), &psIdsMem_, params.keyMaxNum));

    u32 counts[psSize_] = { 0 };
    for (u32 i = 0; i < params.keyMaxNum; i++) {
        u32 psId = psIdsMem_[i];

        s64 index = psOffsetAndNum_[psId].offset + counts[psId];
        CHK_PRT_RET((index > INT32_MAX),
            HCCL_ERROR("index[%lld] > INT32_MAX, current does not support", index), HCCL_E_NOT_SUPPORT);

        s64 *dstMem = static_cast<s64 *>(params.keyTransferMem) + index;
        *dstMem = params.keys[i];
        counts[psId]++;
    }

    return HCCL_SUCCESS;
}

#ifdef ES_DATA_DFX
void PrintClearResult(const void *buffer, u64 valueSize)
{
    u32 valueSum = 0;
    for (u64 i = 0 ; i < valueSize / sizeof(int); i++) {
        valueSum += *(reinterpret_cast<u32 *>(buffer) + i);
    }
    HCCL_ERROR("memset clear before buffer[%p] valueSize[%llu] valueSum[%u]", buffer, valueSize, valueSum);
    s32 ret = memset_s(buffer, valueSize, 0, valueSize);
    CHK_PRT_CONT(ret != EOK, HCCL_ERROR("[clear]buffer memset falied"));
    valueSum = 0;
    for (u64 i = 0 ; i < valueSize / sizeof(int); i++) {
        valueSum += *(reinterpret_cast<u32 *>(buffer) + i);
    }
    HCCL_ERROR("memset clear after buffer[%p] valueSize[%llu] valueSum[%u]", buffer, valueSize, valueSum);
}
#endif

// 同时支持paired和非paired
HcclResult HdcsOpRemoteUpdateExecutor::TaskUpdateKeyReduce(OpTaskPtr &task)
{
    KeyReduceTask *taskPtr = dynamic_cast<KeyReduceTask *>(task.get());
    CHK_PTR_NULL(taskPtr->keyTransferMem);
    CHK_PTR_NULL(taskPtr->valueTransferMem);
    CHK_PTR_NULL(taskPtr->keys);
    CHK_PTR_NULL(taskPtr->value);

#ifdef ES_DATA_DFX
    u64 valueSize = static_cast<u64>(taskPtr->keyMaxNum) * taskPtr->valueItemSize;
    PrintClearResult(taskPtr->valueTransferMem, valueSize);
#endif

    if (taskPtr->pairedMode) {
        CHK_PTR_NULL(taskPtr->indices);
        CHK_PTR_NULL(taskPtr->numUniqued);
        CHK_PTR_NULL(taskPtr->psSeg);
        CHK_PTR_NULL(taskPtr->psSegNum);
    }

    CHK_RET(GetOrPrepareMappingInfo(*taskPtr));

    if (taskPtr->pairedMode) {
        TIME_PRINT(CHK_RET(KeyReduce(*taskPtr)));
    } else {
        CHK_RET(GetPsIdInfo(*taskPtr));
        TIME_PRINT(CHK_RET(ReduceKeyAndGetMappingMatrix(*taskPtr)));
    }

    PRINT_ARRAY(reinterpret_cast<s64 *>(psOffsetAndNum_.Get()), psSize_ * DOUBLE, "UpdatedPsOffset");
    PRINT_ARRAY(taskPtr->keys, taskPtr->keyMaxNum, "UpdateKeys");
    PRINT_ARRAY(mappingMatrix_.Get(), taskPtr->keyMaxNum, "UpdateMappingMatrix");
    PRINT_ARRAY(static_cast<s64 *>(taskPtr->keyTransferMem), taskPtr->keyMaxNum * DOUBLE, "UpdatekeyTransferMem");
    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "updateValueTransferMemCleared");

    PRINT_ARRAY(static_cast<float *>(taskPtr->value), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "updateValue");

#ifndef USE_AICORE_REDUCESUM
    // 不使用aicore的算子，采用aicpu做计算
    CHK_RET(ParralValueReudceSum(*taskPtr));

    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "updateValueTransferMemAfterCPUReduceSum");
#endif

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ParralValueReudceSum(KeyReduceTask &params)
{
    u32 tidNum = parallelExecCpuNum_;
    static_cast<void>(tidNum);

#ifdef CCL_KERNEL
    ParallelFor(tidNum, 1, [&params, tidNum, this](int64_t start, int64_t end) -> HcclResult {
        for (int64_t i = start; i < end; i++) {
            // start当前标识线程索引，不会截断
            u32 tidx = static_cast<u32>(i);
            auto filtered = [&params, tidx, tidNum, this] (const u32& index) -> bool {
                u32 val = static_cast<u32>(params.keys[index] % tidNum);

                return (tidx != val);
            };

            CHK_RET(ValueReudceSum(params, filtered));
        }

        return HCCL_SUCCESS;
    });
#endif
    return HCCL_SUCCESS;
}

// 确保计算前destvalueclear了
HcclResult HdcsOpRemoteUpdateExecutor::ValueReudceSum(KeyReduceTask &params, function<bool(const u32&)> filtered)
{
    float *srcValue = static_cast<float *>(params.value);
    float *valueToSend = static_cast<float *>(params.valueTransferMem);

    // params.valueType可能存在问题
    u32 valueDim = static_cast<u32>(params.valueItemSize / sizeof(float));

    for (u32 i = 0; i < params.keyMaxNum; i++) {
        if (filtered(i)) {
            continue;
        }

        u64 valueToSendIdx = static_cast<u64>(mappingMatrix_[i]);
        if (valueToSendIdx >= params.keyMaxNum || valueToSendIdx < 0) {
            HCCL_ERROR("valueToSendIdx[%lld] of indices idx[%u], < 0 or >= keyMaxNum[%u]", valueToSendIdx, i,
                params.keyMaxNum);
            return HCCL_E_PARA;
        }

        s64 dstIdx = valueToSendIdx * valueDim;

        FloatHighValueSum(srcValue + i * valueDim, valueToSend + dstIdx, valueDim);
    }

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::GetOrPrepareMappingInfo(KeyReduceTask &params)
{
    if (!params.pairedMode) {
        // 下发unsorted_seg_sum的算子用
        void *addr = static_cast<s64 *>(params.keyTransferMem) + params.keyMaxNum;
        mappingMatrix_.Set(static_cast<s32 *>(addr));
        psOffsetAndNum_.Resize(psSize_);
        return HCCL_SUCCESS;
    }

    s64 &myPsSegNum = *(static_cast<s64 *>(params.psSegNum));
    if ((myPsSegNum != static_cast<s64>(psSize_))) {
        HCCL_ERROR("psSegNum[%lld] != psSize[%u]", myPsSegNum, psSize_);
        return HCCL_E_PARA;
    }

    mappingMatrix_.Set(static_cast<s32 *>(params.indices));
    psOffsetAndNum_.Set(static_cast<PsBufferInfo *>(params.psSeg));

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::GetPsIdInfo(const KeyReduceTask &params)
{
    CHK_RET(hrtGetBatchPsIds(reinterpret_cast<u64 *>(params.keys), &psIdsMem_, params.keyMaxNum));
    std::vector<u32> psIdsVec(psIdsMem_, psIdsMem_ + params.keyMaxNum);

    psOffset_.assign(psSize_, 0);
    // 计算出每个ps要做去重的key num
    u64 offset = 0;
    for (u32 ps = 0; ps < psSize_ - 1; ps++) {
        psOffsetAndNum_.Get()[ps].offset = static_cast<s64>(offset);
        offset += count(psIdsVec.begin(), psIdsVec.end(), ps);
    }

    psOffsetAndNum_.Get()[psSize_ - 1].offset = static_cast<s64>(offset);

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ClearValueMem(const KeyReduceTask &params)
{
    HCCL_DEBUG("Entry ClearValueMem");
    u64 valueSize = params.keyMaxNum * params.valueItemSize / sizeof(float);
    s32 sRet = FloatMemClear(static_cast<float *>(params.valueTransferMem), valueSize);
    CHK_PRT_RET(sRet != EOK, HCCL_ERROR("[FloatMemClear]memset failed "
        "mem[%p] size[%u]", params.valueTransferMem, valueSize), HCCL_E_SYSCALL);

    return HCCL_SUCCESS;
}

u32 HdcsOpRemoteUpdateExecutor::CalThreadNumEachPs(u32 keyMaxNum)
{
    // 至少一个线程
    u32 threadNumEachPs = 1;
    u32 eachPsKeyNum = keyMaxNum / psSize_;

    if (eachPsKeyNum < EACH_PS_KEY_NUM) {
        return threadNumEachPs;
    }

    threadNumEachPs = parallelExecCpuNum_ / psSize_;
    if (parallelExecCpuNum_ % psSize_ != 0) {
        threadNumEachPs++;
    }

    return threadNumEachPs;
}

HcclResult HdcsOpRemoteUpdateExecutor::FetchUniquedCounts()
{
    for (u32 i = 0; i < psSize_; i++) {
        auto &mappers = conUniqueMappersMgr_.GetMappers();
        EXECEPTION_CATCH(psOffsetAndNum_.Get()[i].count = static_cast<s64>(mappers.at(i).GetUniuqedCount()),
            return HCCL_E_NOT_FOUND);
    }

    PRINT_ARRAY(reinterpret_cast<s64 *>(psOffsetAndNum_.Get()), psSize_ * DOUBLE, "updatedPsOffsetAfterFetch");

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::KeyReduce(KeyReduceTask &params)
{
    HcclResult ret = ConUniqueMappersMgr<s64, s32>::ParallelReduceByMappingMatrixEqually(static_cast<s64 *>(
        params.keyTransferMem), params.keys, mappingMatrix_.Get(), params.keyMaxNum, parallelExecCpuNum_);
    CHK_PRT_RET(UNLIKELY(ret != HCCL_SUCCESS), HCCL_ERROR("ParallelReduceByMappingMatrixEqually failed, ret[%d]", ret),
        ret);

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ReduceKeyAndGetMappingMatrix(const KeyReduceTask &task)
{
    ConUniqueMappersInfo<s64, s32> info{};
    info.conMapperNum = psSize_;
    info.parallelNum = CalThreadNumEachPs(task.keyMaxNum);
    info.maxParallelNum = parallelExecCpuNum_;
    info.srcData.eles = task.keys;
    info.srcData.count = task.keyMaxNum;
    info.srcData.elesCapacity = task.keyMaxNum;
    info.uniqueData.eles = static_cast<s64 *>(task.keyTransferMem);
    info.uniqueData.count = 0;
    info.uniqueData.elesCapacity = task.keyMaxNum;
    info.preSlicingOffsetAndNum = psOffsetAndNum_.Get();
    info.preSlicingInfo = psIdsMem_;
    info.mapReserveNum = task.keyMaxNum / psSize_ / info.parallelNum;
    info.splitData = true;

    CHK_RET(conUniqueMappersMgr_.Cfg(info));

    Buffer<s32> mappingMatrixS32;
    mappingMatrixS32.eles = mappingMatrix_.Get();
    mappingMatrixS32.count = 0;
    mappingMatrixS32.elesCapacity = task.keyMaxNum;

    CHK_RET(conUniqueMappersMgr_.UniqueDataAndGetMappingMatrix(mappingMatrixS32));
    CHK_RET(FetchUniquedCounts());

    PRINT_ARRAY(reinterpret_cast<s64 *>(psOffsetAndNum_.Get()), psSize_ * DOUBLE, "updatedPsOffset");

    PRINT_ARRAY(static_cast<float *>(task.value), task.keyMaxNum * task.valueItemSize / sizeof(float), "updatedvalues");

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::ReduceSum(ReduceSumTask *params)
{
    for (u32 i = 0; i < parallelExecCpuNum_; ++i) {
        threadkeyNumForShard_[i].clear();
        realKeyMapForShard_[i].reserve(params->keyMaxNum);
        for (u32 j = 0; j < psSize_; ++j) {
            psKeyNumForShard_[j][i] = 0;
            psOffsetForShard_[j][i] = 0;
        }
    }
    psOffset_.assign(psSize_, 0);
    psKeyNum_.assign(psSize_, 0);

    CHK_RET(hrtGetBatchPsIds((reinterpret_cast<u64 *> (params->keys)), &psIdsMem_, params->keyMaxNum));
    for (u32 i = 0; i < params->keyMaxNum; i++) {
        psOffset_[psIdsMem_[i]]++ ;
    }

    u64 offset = 0;
    for (u32 i = 0; i < psOffset_.size(); i++) {
        u64 tmp = psOffset_[i];
        psOffset_[i] = offset;
        offset += tmp;
        HCCL_DEBUG("psOffset_[%u]=[%llu]", i, psOffset_[i]);
    }

    if (firstMemset_) {
        firstMemset_ = false;
        u64 valueSize = params->keyMaxNum * params->valueItemSize / sizeof(float);
        s32 sRet = FloatMemClear(static_cast<float *>(params->valueTransferMem), valueSize);
        CHK_PRT_RET(sRet != EOK, HCCL_ERROR("[FloatMemClear]memset failed "
            "mem[%p] size[%u]", params->valueTransferMem, valueSize), HCCL_E_SYSCALL);
    }

    // 计算每个thread要处理的key num并记录对应的index
    for (u32 keyIndex = 0; keyIndex < params->keyMaxNum; keyIndex++) {
        threadkeyNumForShard_[(*(params->keys + keyIndex)) % parallelExecCpuNum_].push_back(keyIndex);
    }

    // output先按照ps分块，每个ps的分块上再按照threadId分块，psOffsetForShard_用于维护分块后的起始offset
    for (u32 keyIndex = 0; keyIndex < params->keyMaxNum; keyIndex++) {
        psOffsetForShard_[*(psIdsMem_ + keyIndex)][(*(params->keys + keyIndex)) % parallelExecCpuNum_]++;
    }

    offset = 0;
    for (u32 psId = 0; psId < psSize_; psId++) {
        for (u32 threadId = 0; threadId < parallelExecCpuNum_; ++threadId) {
            u32 curThreadKeyNum = psOffsetForShard_[psId][threadId];
            psOffsetForShard_[psId][threadId] = offset;
            offset += curThreadKeyNum;
        }
    }

    // 准备多线程的参数
    for (u32 i = 0; i < parallelExecCpuNum_; i++) {
        vectorReduceParams_[i] = params;
    }

#ifdef CCL_KERNEL
    constexpr u32 PER_UNIT_SIZE = 1;
    ParallelFor(parallelExecCpuNum_, PER_UNIT_SIZE,
        [this](int64_t start, int64_t end) -> HcclResult {
            for (int64_t i = start; i < end; i++) {
                CHK_RET(VectorReduce(static_cast<u32>(i)));
            }

            return HCCL_SUCCESS;
        });
#endif

    // sendbuf copy start
    u32 valueCount = params->valueItemSize / sizeof(float);
    for (u32 psId = 0; psId < psSize_ ; ++psId) {
        u64 transferMemOffset = 0; // 主线程上当前ps对应的sendbuf的偏移记录
        for (u32 threadId = 0; threadId < parallelExecCpuNum_; ++threadId) {
            s64 *keySrc = static_cast<s64 *>(params->keyTransferMem) + psOffsetForShard_[psId][threadId];
            s64 *keyDst = static_cast<s64 *>(params->keyTransferMem) + psOffset_[psId] + transferMemOffset;
            u32 count = psKeyNumForShard_[psId][threadId];
            s32 sRet = S64HighSpeedMove(keyDst, keySrc, count);
            CHK_PRT_RET(sRet != EOK, HCCL_ERROR("[S64HighSpeedMove]memcpy failed "
                "src[%p] dst[%p] count[%u]", keySrc, keyDst, count), HCCL_E_SYSCALL);

            float *valueSrc = static_cast<float *>(params->valueTransferMem) +
                (psOffsetForShard_[psId][threadId]) * valueCount;
            float *valueDst = static_cast<float *>(params->valueTransferMem) +
                (psOffset_[psId] + transferMemOffset) * valueCount;
            sRet = FloatHighSpeedMove(valueDst, valueSrc, count * valueCount);
            CHK_PRT_RET(sRet != EOK, HCCL_ERROR("[FloatHighSpeedMove]memcpy failed"
                "src[%p] dst[%p] count[%u]", valueSrc, valueDst, count * valueCount), HCCL_E_SYSCALL);
            transferMemOffset += count;
            psKeyNum_[psId] += psKeyNumForShard_[psId][threadId]; // 维护主线程上每个ps去重后的实际key num
        }
    }

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::VectorReduce(u32 taskIndex)
{
    ReduceSumTask& curPara = *vectorReduceParams_[taskIndex];
    u32 curKeySize = threadkeyNumForShard_[taskIndex].size();

    u32 psId = 0;
    u32 realKeyPos = 0;
    u32 valueCount = curPara.valueItemSize / sizeof(float);
    std::pair<std::unordered_map<s64, u32>::iterator, bool> ret;
    for (u32 i = 0; i < curKeySize; ++i) {
        u32 keyIndex = threadkeyNumForShard_[taskIndex][i];
        psId = *(psIdsMem_ + keyIndex);
        realKeyPos = psKeyNumForShard_[psId][taskIndex];
        ret = realKeyMapForShard_[taskIndex].insert({*(curPara.keys + keyIndex), realKeyPos});
        realKeyPos = ret.first->second;
        float *valueSrcMem = reinterpret_cast<float *>(static_cast<u8 *>(curPara.value) +
            keyIndex * curPara.valueItemSize);
        float *valueDstMem = reinterpret_cast<float *>(static_cast<u8 *>(curPara.valueTransferMem) +
            (psOffsetForShard_[psId][taskIndex] + realKeyPos) * curPara.valueItemSize);
        if (ret.second) {
            s64 *keyDstMem = static_cast<s64 *>(curPara.keyTransferMem) +
                psOffsetForShard_[psId][taskIndex] + realKeyPos;
            *keyDstMem = *(curPara.keys + keyIndex);
            FloatHighSpeedMove(valueDstMem, valueSrcMem, valueCount);
            psKeyNumForShard_[psId][taskIndex]++;
        } else {
            FloatHighValueSum(valueSrcMem, valueDstMem, valueCount);
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::PrepareEnvelopesForRoce(void *buffer, u64 count,
    HcclDataType dataType, HcclEnvelope &envelope, u64 errorStatusAddrVal, HcclUserRequire &userRequire)
{
    TransData sendData(reinterpret_cast<u64>(buffer), reinterpret_cast<u64>(nullptr), count,
        dataType, false, userRequire.tableId, userRequire.globalStep);
    envelope.transData = sendData;

    RdmaBuffer *rdmaBuffer = reinterpret_cast<RdmaBuffer *>(envelope.rsv);
    // 保留字段传输ServiceCancel的信息
    rdmaBuffer->addr = errorStatusAddrVal;

    HCCL_DEBUG("rdma envelope, buffer[%llu], count[%llu], errorBuffAddr[%llu]",
        hash<void *>{}(buffer), count, hash<u64>{}(rdmaBuffer->addr));

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::UpdateDataSend(ReduceSumTask *params)
{
    u32 sendCompletePsNum = 0;
    std::vector<SendStatusRecord> alreadySendCountVec(psSize_);
    // 由于update当前无流水，故只用一次发送即可
    u32 maxSendTimes = 1;
    PsBufferInfo *psOffNum = psOffsetAndNum_.Get();

    HcclEsRdmaInfoForUpdate *rdmaEnveInfos = static_cast<HcclEsRdmaInfoForUpdate *>(params->rdmaEnveInfosTransferMem);
    CHK_PTR_NULL(rdmaEnveInfos);

    PRINT_ARRAY((u64 *)(rdmaEnveInfos), sizeof(HcclEsRdmaInfoForUpdate) * psSize_ / sizeof(u64),
        "UpdateDevRdmaEnveInfos");

    CHK_PTR_NULL(params->tableIdAddr);
    params->tableId = *(static_cast<u32 *>(params->tableIdAddr));
    s64 globalStep = (params->globalStepAddr == nullptr) ? DEFAULT_GLOBAL_STEP_VALUE :\
        *(static_cast<s64 *>(params->globalStepAddr));
    HcclUserRequire userRequire(params->tableId, globalStep);
    HCCL_INFO("UpdateDataSend tag[%d] tableId[%u] globalStep[%lld]", params->tag, params->tableId, globalStep);

    u32 mapEraseCount = parallelExecCpuNum_ / maxSendTimes + (parallelExecCpuNum_ % maxSendTimes != 0);
    u32 curMapIndex = 0;
    while (sendCompletePsNum < psSize_) {
        HCCL_DEBUG("sendCompletePsNum[%u] psSize_[%u]", sendCompletePsNum, psSize_);
        std::vector<HcclRequest> requestArray;
        for (u32 i = 0; i < rankId2PsId_.size(); i++) {
            u32 psId = rankId2PsId_[i];
            HCCL_DEBUG("psId[%u]", psId);
            if (rankId2PsId_[i] == PS_INVALID_VALUE || alreadySendCountVec[psId].sendCompFlag) {
                continue;
            }
            u32 rankId = i;
            u64 count = static_cast<u64>(psOffNum[psId].count) - alreadySendCountVec[psId].alreadySendCount;

            u64 keysSize = count * sizeof(s64);
            // curSendTimes的offset是元素个数的偏移
            u64 offset = static_cast<u64>(psOffNum[psId].offset);

            void *keysBuffer = static_cast<u8 *>(params->keyTransferMem) +
                (offset + alreadySendCountVec[psId].alreadySendCount) * sizeof(s64);

            u64 valueSize = count * params->valueItemSize;
            void *valueBuffer = static_cast<u8 *>(params->valueTransferMem) + (offset +
                alreadySendCountVec[psId].alreadySendCount) * params->valueItemSize;

            HCCL_DEBUG("send key and value psId[%u] rankId[%u] keysSize[%llu] valueSize[%llu] index[%u] commType[%d]"
                " psOffNum[%lld], haveRdmaConn[%d] tag[%d] tableId[%u] globalStep[%lld]",
                psId, rankId, keysSize, valueSize, i, rdmaEnveInfos[psId].commType,psOffNum[psId].count,
                params->haveRdmaConn, params->tag, params->tableId, globalStep);
#ifdef ES_DATA_DFX
            u32 valueDim = params->valueItemSize / sizeof(float);
            string printInfo = "update isend psId " + std::to_string(psId);
            EsUtils::PrintEsKvDataSummary(printInfo, static_cast<float *>(valueBuffer), count, valueDim,
                static_cast<s64 *>(keysBuffer));
#endif
            if (params->haveRdmaConn && rdmaEnveInfos[psId].commType == HcclHeterogCommType::RDMA) {
                s32 *errorStatus = &rdmaEnveInfos[psId].errorStatus;
                *errorStatus = 0;
                u64 errorStatusAddrVal = reinterpret_cast<u64>(errorStatus);

                // 分别准备key和value的发送信封，count为0也要发送信封
                CHK_RET(PrepareEnvelopesForRoce(keysBuffer, keysSize, HCCL_DATA_TYPE_INT8,
                    rdmaEnveInfos[psId].envelope, errorStatusAddrVal, userRequire));
                CHK_RET(PrepareEnvelopesForRoce(valueBuffer, valueSize, HCCL_DATA_TYPE_INT8,
                    rdmaEnveInfos[psId].envelopeValue, errorStatusAddrVal, userRequire));

                HCCL_DEBUG("psId[%u] is rdma, errorStatusAddr[%llu]", psId, hash<u64>{}(errorStatusAddrVal));
            } else if (!params->haveRdmaConn ||
                (params->haveRdmaConn && rdmaEnveInfos[psId].commType == HcclHeterogCommType::PCIE)) {
                HCCL_DEBUG("psId[%u] is pcie", psId);

                CHK_RET(Send(params, rankId, keysBuffer, keysSize, requestArray, userRequire));
                CHK_RET(Send(params, rankId, valueBuffer, valueSize, requestArray, userRequire));
            } else {
                HCCL_ERROR("psId[%u] commType[%d] is invalid", psId, rdmaEnveInfos[psId].commType);
                return HCCL_E_INTERNAL;
            }

            alreadySendCountVec[psId].alreadySendCount += count;
            if (alreadySendCountVec[psId].alreadySendCount == static_cast<u64>(psOffNum[psId].count)) {
                alreadySendCountVec[psId].sendCompFlag = true;
                sendCompletePsNum++;
            }
        }

        for (u32 i = 0; i < mapEraseCount; ++i) {
            if (curMapIndex + i >= parallelExecCpuNum_) {
                break;
            }
            realKeyMapForShard_[curMapIndex + i].erase(realKeyMapForShard_[curMapIndex + i].begin(),
                realKeyMapForShard_[curMapIndex + i].end());
        }
        curMapIndex += mapEraseCount;

        CHK_RET(WaitSomeAll(requestArray));
    }

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::Send(ReduceSumTask *params, u32 rankId,
    void *buffer, u64 count, std::vector<HcclRequest> &requestArray, HcclUserRequire &userRequire)
{
    int ret = 0;
    HcclRequest request = nullptr;
    HcclCommRpc* hcclComm = static_cast<hccl::HcclCommRpc *>(params->comm);
    CHK_PTR_NULL(hcclComm);
    ret = hcclComm->Isend(buffer, count, HCCL_DATA_TYPE_INT8, rankId, params->tag, request, userRequire);
    if (ret != HCCL_SUCCESS) {
        HCCL_ERROR("[HdcsOpRemoteUpdateExecutor][Send]HcclIsend failed!!!");
        return HCCL_E_AGAIN;
    }
    requestArray.push_back(request);
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::WaitSomeAll(std::vector<HcclRequest> &requestArray)
{
    s32 allComplete = 0;
    s32 count = requestArray.size();
    while (allComplete != count) {
        int compCount = 0;
        s32 compIndices[count] = {0};
        HcclStatus compStatus[count] = {0};
        CHK_RET(HcclWaitSomeWithLog(count, requestArray.data(), &compCount, compIndices, compStatus, false));

        allComplete += compCount;
        if (allComplete != count) {
            SaluSleep(LOOP_SLEEP_TIME_US);
        }
    }

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::TaskResetUniqueHandle(OpTaskPtr &task)
{
    // 为下一次的reduceSum计算做准备
    ReduceSumTask *params = dynamic_cast<ReduceSumTask *>(task.get());

    PRINT_ARRAY(static_cast<float *>(params->valueTransferMem), params->keyMaxNum * params->valueItemSize /
        sizeof(float), "updateValueTransferMemBeforeClearInRestHdl");

    if (!params->pairedMode) {
        // 发送完成清除去重mappingInfo
        CHK_RET(conUniqueMappersMgr_.ClearData());
    }

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::WaitUpdateFinish(ReduceSumTask *params)
{
    HcclEsRdmaInfoForUpdate *rdmaEnveInfos = static_cast<HcclEsRdmaInfoForUpdate *>(params->rdmaEnveInfosTransferMem);

    if (rdmaEnveInfos == nullptr) {
        HCCL_ERROR("rdmaEnveInfos is nullptr");
        return HCCL_E_PARA;
    }

    u32 recvCompleteNum = 0;
    for (u32 i = 0; i < rankId2PsId_.size(); i++) {
        if (rankId2PsId_[i] != PS_INVALID_VALUE) {
            recvCompleteNum++;
        }
    }

    std::vector<s32> recvCompCounts(rankId2PsId_.size(), 0);
    std::vector<u32> signals(rankId2PsId_.size(), 0);
    std::vector<HcclRequest> requestArray;

    HCCL_DEBUG("haveRdmaConn[%d]", params->haveRdmaConn);
    while (recvCompleteNum > 0) {
        for (u32 i = 0; i < rankId2PsId_.size(); i++) {
            if (rankId2PsId_[i] == PS_INVALID_VALUE || recvCompCounts[i] != 0) {
                continue;
            }

            if (params->haveRdmaConn && rdmaEnveInfos[rankId2PsId_[i]].commType == HcclHeterogCommType::RDMA) {
                HCCL_DEBUG("psId[%u] comm is Rdma, no need to RecvRespones", rankId2PsId_[i]);
                signals[i] = rdmaEnveInfos[rankId2PsId_[i]].errorStatus;
                recvCompCounts[i] = sizeof(signals[i]);
                recvCompleteNum--;
                continue;
            }

            CHK_RET(RecvRespones(params, i, signals[i], requestArray, recvCompCounts));
            if (recvCompCounts[i] != 0) {
                recvCompleteNum--;
            }
        }

        if (recvCompleteNum > 0) {
            SaluSleep(LOOP_IMPROBE_SLEEP_TIME_US);
        }
    }

    CHK_RET(WaitSomeAll(requestArray));

    for (u32 i = 0; i < rankId2PsId_.size(); i++) {
        u32 psId = rankId2PsId_[i];
        if (psId != PS_INVALID_VALUE) {
            if (IsCancelSingal(&signals[i], recvCompCounts[i])) {
                HCCL_ERROR("hdds update service cancel singal was received, remote rank id[%u] ps id[%u]", i, psId);
                return HCCL_E_INTERNAL;
            }
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::RecvRespones(ReduceSumTask *params, u32 rank, u32 &signal,
    std::vector<HcclRequest> &requestArray, std::vector<s32> &recvCompCounts)
{
    int ret = 0;
    int flag = 0;
    HcclMessage msg = nullptr;
    HcclStatus status;

    ret = HcclRPCImprobe(rank, params->tag, params->comm, &flag, &msg,  &status);
    CHK_PRT_RET(ret != 0, HCCL_ERROR("HcclRPCImprobe failed"), HCCL_E_INTERNAL);
    if (flag == HCCL_IMPROBE_COMPLETED) {
        HcclRequest request = nullptr;

        // 确认HcclRPCImprobe成功之后，msg不会为空指针
        HcclMessageInfo* hcclMsg = static_cast<HcclMessageInfo *>(msg);

        u64 count = hcclMsg->envelope.pcieEnvelope.count;
        CHK_PRT_RET(count > sizeof(signal),
            HCCL_ERROR("remote pcieEnvelope size[%llu] > signal size[%u]", count, sizeof(signal)), HCCL_E_INTERNAL);

        ret = HcclRPCImrecv(&signal, static_cast<int>(count), HCCL_DATA_TYPE_INT8, &msg, &request);
        CHK_PRT_RET(ret != 0, HCCL_ERROR("HcclRPCImrecv failed"), HCCL_E_INTERNAL);
        requestArray.push_back(request);
        ret = HcclRPCGetCount(&status, HCCL_DATA_TYPE_INT8, &(recvCompCounts[rank]));
        CHK_PRT_RET(ret != 0, HCCL_ERROR("HcclRPCGetCount failed"), HCCL_E_INTERNAL);
    } else {
        recvCompCounts[rank] = 0;
    }
    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::TaskSendRequest(OpTaskPtr &task)
{
    ReduceSumTask *taskPtr = dynamic_cast<ReduceSumTask *>(task.get());
    CHK_PTR_NULL(taskPtr->keyTransferMem);
    CHK_PTR_NULL(taskPtr->valueTransferMem);

    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "reduceSumedvalueTransferMemBeforeSend");

    CHK_RET(UpdateDataSend(taskPtr));

    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "reduceSumedvalueTransferMem");

    return HCCL_SUCCESS;
}

HcclResult HdcsOpRemoteUpdateExecutor::TaskRecvResponse(OpTaskPtr &task)
{
    ReduceSumTask *taskPtr = dynamic_cast<ReduceSumTask *>(task.get());
    CHK_PTR_NULL(taskPtr->keyTransferMem);
    CHK_PTR_NULL(taskPtr->valueTransferMem);

    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "valueTransferMemBeforeTaskRecvRsp");

    CHK_RET(WaitUpdateFinish(taskPtr));

    PRINT_ARRAY(static_cast<float *>(taskPtr->valueTransferMem), taskPtr->keyMaxNum * taskPtr->valueItemSize /
        sizeof(float), "valueTransferMemAfterTaskRecvRsp");

    return HCCL_SUCCESS;
}

}
