/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 * Description: op remote update execute.
 * Author: qingjicheng
 * Create: 2022-11-28
 */

#include <algorithm>

#include "transport_heterog_pub.h"
#include "log.h"
#include "adapter_hccp.h"
#include "adapter_rts.h"
#include "mr_manager.h"
#include "profiling_manager.h"
#include "adapter_prof.h"
#include "op_coll_remote_update_executor.h"
#include "transport_heterog_roce_pub.h"

namespace hccl {

using namespace std;

constexpr u32 KEY_TO_THREAD_ID = 1024;
constexpr u32 WORKER_NUM_ONE = 1;

OpCollRemoteUpdateExecutor::OpCollRemoteUpdateExecutor() : taskExeFuncTable_({
    {OpTaskType::SEND, std::bind(&OpCollRemoteUpdateExecutor::TaskSend, this, std::placeholders::_1)},
    {OpTaskType::RECV, std::bind(&OpCollRemoteUpdateExecutor::TaskRecv, this, std::placeholders::_1)},
    {OpTaskType::ISEND, std::bind(&OpCollRemoteUpdateExecutor::TaskIsend, this, std::placeholders::_1)},
    {OpTaskType::IRECV, std::bind(&OpCollRemoteUpdateExecutor::TaskImrecv, this, std::placeholders::_1)},
    {OpTaskType::PROBE, std::bind(&OpCollRemoteUpdateExecutor::TaskProbe, this, std::placeholders::_1)},
    {OpTaskType::WAIT, std::bind(&OpCollRemoteUpdateExecutor::TaskWait, this, std::placeholders::_1)},
    {OpTaskType::REDUCE_SUM, std::bind(&OpCollRemoteUpdateExecutor::TaskUpdate, this, std::placeholders::_1)},
    {OpTaskType::MEMCPY, std::bind(&OpCollRemoteUpdateExecutor::TaskMemcpy, this, std::placeholders::_1)}
})
{
}

OpCollRemoteUpdateExecutor::~OpCollRemoteUpdateExecutor()
{
    DeInit();
}

HcclResult OpCollRemoteUpdateExecutor::DeInit()
{
    rankTransportMap_.clear();
    {
        std::lock_guard<std::mutex> lock(cvMutex_);
        reduceShutDown_ = true;
    }

    reduceCv_.notify_all();
    for (auto &ptr : reduceSumThreads_) {
        if (ptr != nullptr && ptr->joinable()) {
            ptr->join();
            ptr = nullptr;
        }
    }

    std::unique_lock<std::mutex> lock(cvMutex_);
    if (reduceRunningflags_ != nullptr) {
        delete []reduceRunningflags_;
        reduceRunningflags_ = nullptr;
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::Init(std::map<u32, std::unique_ptr<TransportHeterog>> &transportHandleMap,
    std::map<u32, u32> &psRankIdMap, std::vector<u32> &workerList, u32 threadNum, bool needThread)
{
    for (auto &iter : transportHandleMap) {
        if (iter.second != nullptr) {
            rankTransportMap_[iter.first] = iter.second.get();
        }
    }

    auto maxValue = *max_element(workerList.begin(), workerList.end());
    rankSize_ = maxValue + 1;
    psRankIdMap_ = psRankIdMap;
    reduceThreadNum_ = threadNum;
    reduceSumThreads_.resize(threadNum);
    reduceRunningflags_ = new (std::nothrow) bool[threadNum];
    CHK_PTR_NULL(reduceRunningflags_);
    for (u32 i = 0; i < threadNum; i++) {
        reduceRunningflags_[i] = false;
    }

    reduceThreadIds_.resize(threadNum, INVALID_UINT);

    if (!needThread) {
        HCCL_DEBUG("No need to start a thread.");
        return HCCL_SUCCESS;
    }
    for (u32 i = 0; i < threadNum; i++) {
        reduceSumThreads_[i].reset(new (std::nothrow) std::thread(&OpCollRemoteUpdateExecutor::ReduceThread,\
            this, i));
        CHK_SMART_PTR_NULL(reduceSumThreads_[i]);
    }

    for (u32 i = 0; i < threadNum; i++) {
        while (reduceThreadIds_[i] == INVALID_UINT) {
            SaluSleep(ONE_MILLISECOND_OF_USLEEP);
        }
    }

    threadkeyNumForShard_.resize(reduceThreadNum_);
    realKeyMapForShard_.resize(reduceThreadNum_);
    for (u32 i = 0; i < threadNum; i++) {
        if (realKeyMapForShard_[i] == nullptr) {
            NEW_NOTHROW(realKeyMapForShard_[i], USTable(), return HCCL_E_MEMORY);
        }
    }

    keyToThreadId_.resize(KEY_TO_THREAD_ID);
    for (u32 i = 0; i < KEY_TO_THREAD_ID; ++i) {
        keyToThreadId_[i] = rand() % reduceThreadNum_;
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::ExecuteOp(HcclOpPtr opPtr)
{
    CHK_PTR_NULL(opPtr);
    HcclOperator<EmbeddingServiceParam> *ptr = static_cast<HcclOperator<EmbeddingServiceParam> *>(opPtr);
    if (ptr->param_.opType == EmbeddingServiceParam::OperatorType::ABORT_SELF) {
        blockFlag_ = false;
    } else {
        CHK_RET(ExecuteTask(ptr));
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::ExecuteTask(HcclOperator<EmbeddingServiceParam> *opPtr)
{
    std::queue<OpTaskPtr> tempTaskQue;
    auto &taskQue = GetTaskQue<EmbeddingServiceParam>(opPtr);
    while (!taskQue.empty()) {
        CHK_RET(taskExeFuncTable_[taskQue.front()->taskType](taskQue.front()));
        tempTaskQue.push(std::move(taskQue.front()));
        taskQue.pop();
    }
    while (!tempTaskQue.empty()) {
        opPtr->PushTask(std::move(tempTaskQue.front()));
        tempTaskQue.pop();
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskSend(OpTaskPtr &task)
{
    TransTask *taskPtr = dynamic_cast<TransTask *>(task.get());
    TransportHeterog *transport = dynamic_cast<TransportHeterog *>(rankTransportMap_[taskPtr->dstRank]);
    CHK_RET(Send(taskPtr, transport));
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskRecv(OpTaskPtr &task)
{
    return HCCL_E_PARA;
}

HcclResult OpCollRemoteUpdateExecutor::TaskProbe(OpTaskPtr &task)
{
    for (u32 threadId = 0; threadId < reduceThreadNum_; ++threadId) {
        threadkeyNumForShard_[threadId].resize(rankSize_);
        for (u32 rankId = 0; rankId < rankSize_; ++rankId) {
            threadkeyNumForShard_[threadId][rankId].clear();
        }
    }

    UpdateTransTask *taskPtr = dynamic_cast<UpdateTransTask *>(task.get());
    std::map<u32, TransportHeterog *> rankTransportMap;
    for (auto &rank : taskPtr->improbeRanks) {
        rankTransportMap[rank] = rankTransportMap_[rank];
    }

    u64 count = 0;
    u32 probeNum = 0;
    u32 needProbeNum = std::count((*taskPtr->rankTransCompPtr).begin(), (*taskPtr->rankTransCompPtr).end(), 1);
    auto maxValue = *max_element(taskPtr->improbeRanks.begin(), taskPtr->improbeRanks.end());
    std::vector<u32> alreadyProbeRank(maxValue + 1, 0);
    uint64_t beginTime = hrtMsprofSysCycleTime();
    while (probeNum < needProbeNum && blockFlag_) {
        for (auto &pair : rankTransportMap) {
            if (pair.second == nullptr || ((*taskPtr->rankTransCompPtr)[pair.first] <= 0) ||
                (alreadyProbeRank[pair.first] != 0)) {
                continue;
            }

            TransportHeterog *transport = dynamic_cast<TransportHeterog *>(pair.second);

            TransportEndPointInfo srcEp(0, 0, 0);
            TransportEndPointInfo dstEp(0, 0, 0);
            TransportEndPointParam epParam(srcEp, dstEp);
            HcclMessageInfo *msg = nullptr;

            s32 flag = HCCL_IMPROBE_INCOMPLETED;
            HcclStatus status;

            CHK_RET(transport->Improbe(epParam, flag, msg, status, taskPtr->recvWaitFlagMap[pair.first]));
            if (flag == HCCL_IMPROBE_COMPLETED) {
                HCCL_INFO("Improbe success: transport[%p] msg[%p] queSize[%u]", transport, msg,
                    transportMsgTempMap_[transport].size());
                transport->AddRecvEnvelopNum();
                transportMsgTempMap_[transport].push(msg);
                pair.second = nullptr;

                // 目前update暂时没有启动流水，故不必只有updateEndFlag有效时才赋值为0。
                (*taskPtr->rankTransCompPtr)[pair.first] = 0;

                RdmaBuffer *rdmaBuffer = reinterpret_cast<RdmaBuffer *>(msg->envelope.envelope.rsv);

                (*taskPtr->rdmaResponseAddrsPtr)[pair.first] = *rdmaBuffer;

                // 只有rdma情况下有效
                u64 &valueAddr = rdmaBuffer->addr;
                u64 &valueAddrKey = rdmaBuffer->buffKey;
                HCCL_DEBUG("recved valueAddr[%#llx], valueAddrKey[%#llx]", hash<u64>{}(valueAddr), valueAddrKey);

                count += status.count;
                (*taskPtr->addrInfo)[pair.first].size = status.count;
                probeNum++;
                alreadyProbeRank[pair.first]++;
                HCCL_INFO("Improbe success: rank[%u] recvSize[%u]", pair.first, (*taskPtr->addrInfo)[pair.first].size);

                u32 recvDataType = HCCL_DATA_TYPE_RESERVED;
                HcclUserRequire userRequire;
                GetProbeMsgInfo(transport, msg, recvDataType, userRequire);
                CHK_PRT_RET(recvDataType >= HCCL_DATA_TYPE_RESERVED, HCCL_ERROR("failed, recvDataType[%u] >= "
                    "HCCL_DATA_TYPE_RESERVED[%d]", recvDataType, HCCL_DATA_TYPE_RESERVED), HCCL_E_PARA);
                SetTaskOutputRegStatusInfo(taskPtr, status, recvDataType, userRequire);

                auto &profilingManager = hccl::ProfilingManager::Instance();
                CHK_RET(profilingManager.CallEsMsprofReportTaskApi(false, beginTime, ProfTaskType::TASK_EVENT_WAIT));
                beginTime = hrtMsprofSysCycleTime();
            }
        }
        SaluSleep(LOOP_IMPROBE_SLEEP_TIME_US);
    }

    if (probeNum < needProbeNum) {
        HCCL_INFO("TaskImprobeRecv execute abort self");
        return HCCL_E_AGAIN;
    }

    *static_cast<u32 *>(taskPtr->handle) = count;
    HCCL_INFO("TaskProbe success: handle[%p] value[%u]", taskPtr->handle,
        *static_cast<u32 *>(taskPtr->handle));
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskImrecv(OpTaskPtr &task)
{
    uint64_t beginTime = hrtMsprofSysCycleTime();
    auto &profilingManager = hccl::ProfilingManager::Instance();
    HCCL_DEBUG("OpCollRemoteUpdateExecutor::TaskImrecv start");

    UpdateTransTask *taskPtr = dynamic_cast<UpdateTransTask *>(task.get());
    TransportHeterog *transport = dynamic_cast<TransportHeterog *>(rankTransportMap_[taskPtr->dstRank]);
    if (transport->GetRecvEnvelopNum() == 0) {
        HCCL_WARNING("cur transport didn't receive the envelope.dstRank[%u]", taskPtr->dstRank);
        CHK_RET(profilingManager.CallEsMsprofReportTaskApi(false, beginTime, ProfTaskType::TASK_UPDATE_IMRECV));
        return HCCL_SUCCESS;
    }
    auto iter = transportMsgTempMap_.find(transport);
    if (iter == transportMsgTempMap_.end()) {
        HCCL_ERROR("[Task][Imrecv] msg is not found, dst rank[%u]", taskPtr->dstRank);
        return HCCL_E_PARA;
    }
    HcclMessageInfo *msgPtr = iter->second.front();
    HCCL_DEBUG("TaskImrecv: task imrecv transport[%p] msg[%p] queSize[%u]", transport, msgPtr, iter->second.size());
    CHK_PTR_NULL(msgPtr);
    HCCL_INFO("TaskImrecv: recv mem %p count %llu datatype %s", taskPtr->recvMem, taskPtr->count,
        GetDataTypeEnumStr(taskPtr->dataType).c_str());
    TransData recvData(reinterpret_cast<u64>(nullptr), reinterpret_cast<u64>(taskPtr->recvMem),
        taskPtr->count, taskPtr->dataType);
    HcclRequestInfo* request = nullptr;

    TransportHeterogRoce *transportRoce = dynamic_cast<TransportHeterogRoce *>(transport);
    if (transportRoce != nullptr) {
        u32 lkey = 0;
        transportRoce->RegMr(taskPtr->transferMemBegin, taskPtr->transferMemLen, lkey, false);
    }

    CHK_RET(transport->Imrecv(recvData, *msgPtr, request, false, taskPtr->needRecordFlag));
    tempRequestQue_.push_back(request);
    iter->second.pop();
    transport->SubRecvEnvelopNum();
    CHK_RET(profilingManager.CallEsMsprofReportTaskApi(false, beginTime, ProfTaskType::TASK_UPDATE_IMRECV));

    HCCL_DEBUG("OpCollRemoteUpdateExecutor::TaskImrecv success");
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskIsend(OpTaskPtr &task)
{
    TransTask *taskPtr = dynamic_cast<TransTask *>(task.get());
    TransportHeterog *transport = dynamic_cast<TransportHeterog *>(rankTransportMap_[taskPtr->dstRank]);
    CHK_PTR_NULL(transport);

    HcclRequestInfo* request = nullptr;

    TransportHeterogRoce *transportRoce = dynamic_cast<TransportHeterogRoce *>(transport);
    if (transportRoce != nullptr) {
        Stream tmpStream(nullptr);
        HCCL_DEBUG("update ps RecordNotifyWithReq SEND_NOTIFY dstRank[%u]", taskPtr->dstRank);
        CHK_RET(transportRoce->RecordNotifyWithReq(tmpStream, RdmaNotifyOp::SEND_NOTIFY, request));
    } else {
        CHK_PRT(Isend(taskPtr, transport, request));
    }

    request->tag = taskPtr->tag;

    if (taskPtr->outRequest != nullptr) {
        request->next = static_cast<HcclRequestInfo *>(*taskPtr->outRequest);
        *taskPtr->outRequest = request;
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskWait(OpTaskPtr &task)
{
    HCCL_DEBUG("OpCollRemoteUpdateExecutor::TaskWait start");
    WaitSomeTask *taskPtr = dynamic_cast<WaitSomeTask *>(task.get());
    if (taskPtr->compStatus == nullptr) {
        // PS update等待
        CHK_RET(WaitAll());
        HCCL_DEBUG("OpCollRemoteUpdateExecutor::TaskWait WaitAll success");
    } else {
        CHK_RET(WaitSome(taskPtr));
        HCCL_DEBUG("OpCollRemoteUpdateExecutor::TaskWait WaitSome success");
    }

    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::WaitSome(WaitSomeTask *taskPtr)
{
    *taskPtr->compCount = 0;
    s32 flag = HCCL_TEST_INCOMPLETED;
    // 有一个完成的request就退出循环
    while (flag != HCCL_TEST_COMPLETED) {
        for (s32 i = 0; i < taskPtr->requestCount; i++) {
            HcclRequestInfo *request = static_cast<HcclRequestInfo *>(taskPtr->requestArray[i]);
            CHK_PRT(WaitRequestList(request, flag));
            if (flag == HCCL_TEST_COMPLETED) {
                taskPtr->compIndices[*taskPtr->compCount] = i;
                HcclStatus status = {0};
                taskPtr->compStatus[*taskPtr->compCount] = status;
                (*taskPtr->compCount)++;
                break;
            }
        }
        if (flag != HCCL_TEST_COMPLETED) {
            SaluSleep(LOOP_SLEEP_TIME_US);
        }
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::WaitRequestList(HcclRequestInfo *requestList, s32 &flag)
{
    std::vector<HcclRequestInfo *> requestVec;
    HcclRequestInfo *listPtr = requestList;
    while (listPtr != nullptr) {
        requestVec.push_back(listPtr);
        listPtr = listPtr->next;
    }

    flag = HCCL_TEST_INCOMPLETED;
    while (flag != HCCL_TEST_COMPLETED) {
        flag = HCCL_TEST_COMPLETED;
        for (auto &request : requestVec) {
            if (request == nullptr) {
                continue;
            }
            TransportHeterog *transportPtr = static_cast<TransportHeterog *>(request->transportHandle);
            HcclStatus compState;
            s32 tempFlag = HCCL_TEST_INCOMPLETED;
            CHK_RET(transportPtr->Test(*request, tempFlag, compState));
            if (tempFlag == HCCL_TEST_COMPLETED) {
                request = nullptr;
            } else {
                flag = HCCL_TEST_INCOMPLETED;
            }
        }

        SaluSleep(LOOP_SLEEP_TIME_US);
    }
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::WaitAll()
{
    u32 compCount = 0;
    while (compCount < tempRequestQue_.size()) {
        HCCL_DEBUG("compCount[%u] RequestQue size[%u]", compCount, tempRequestQue_.size());
        for (auto &request : tempRequestQue_) {
            if (request == nullptr || (request->transportRequest.transData.count == 0)) {
                compCount++;
                continue;
            }
            TransportHeterog *transport = static_cast<TransportHeterog *>(request->transportHandle);
            s32 flag = HCCL_TEST_INCOMPLETED;
            HcclStatus compState;
            CHK_RET(transport->Test(*request, flag, compState));
            if (flag == HCCL_TEST_COMPLETED) {
                compCount++;
                request = nullptr;
            }
        }
        SaluSleep(LOOP_SLEEP_TIME_US);
    }
    tempRequestQue_.clear();
    return HCCL_SUCCESS;
}

HcclResult OpCollRemoteUpdateExecutor::TaskUpdate(OpTaskPtr &task)
{
    uint64_t beginTime = hrtMsprofSysCycleTime();
    PsReduceSumTask *taskPtr = dynamic_cast<PsReduceSumTask *>(task.get());

    CHK_PTR_NULL(taskPtr->keys);
    CHK_PTR_NULL(taskPtr->value);

    TIME_PRINT(CHK_RET(ReduceSum(taskPtr)));

    auto &profilingManager = hccl::ProfilingManager::Instance();
    CHK_RET(profilingManager.CallEsMsprofReportTaskApi(false, beginTime, ProfTaskType::TASK_UPDATE_GLOBAL_REDUCE));

    return HCCL_SUCCESS;
}

void OpCollRemoteUpdateExecutor::PsUpdatePrepare(u32 keySize)
{
    for (u32 threadId = 0; threadId < reduceThreadNum_; ++threadId) {
        realKeyMapForShard_[threadId]->Clear();
        realKeyMapForShard_[threadId]->Reserve(keySize);
    }
    realKeyPos_ = 0;
}

void OpCollRemoteUpdateExecutor::ClearAllTask(HcclOperator<EmbeddingServiceParam> *opPtr)
{
    auto &taskQue = GetTaskQue<EmbeddingServiceParam>(opPtr);
    while (!taskQue.empty()) {
        taskQue.pop();
    }
}

HcclResult OpCollRemoteUpdateExecutor::ReduceSum(PsReduceSumTask *params)
{
    if (*params->updateKeysSize == 0 || *params->updateValuesSize == 0) {
        HCCL_INFO("[OpCollRemoteUpdateExecutor][ReduceSum]updateKeysSize 0");
        return HCCL_SUCCESS;
    }

    std::vector<MemoryStartAndSize> &keyAddrInfo = *params->keyAddrInfo;
    std::vector<MemoryStartAndSize> &valueAddrInfo = *params->valueAddrInfo;
    if (params->workNum == WORKER_NUM_ONE) {
        u32 rankId = keyAddrInfo.size();
        for (u32 idx = 0; idx < keyAddrInfo.size(); ++idx) {
            if (keyAddrInfo[idx].startAddr != nullptr) {
                rankId = idx;
                break;
            }
        }

        CHK_PRT_RET(UNLIKELY(rankId == keyAddrInfo.size()),
            HCCL_ERROR("all keyAddrInfo startAddrs are null"), HCCL_E_PARA);

        // 当前获取不到实际的内存容量
        CHK_SAFETY_FUNC_RET(memcpy_s(params->keys, *params->updateKeysSize,
            keyAddrInfo[rankId].startAddr, *params->updateKeysSize));

        u64 quotient = *params->updateValuesSize / SECUREC_MEM_MAX_LEN;
        u64 remainder = *params->updateValuesSize % SECUREC_MEM_MAX_LEN;

        for (u64 i = 0; i < quotient; i++) {
            TIME_PRINT(CHK_SAFETY_FUNC_RET(memcpy_s(static_cast<float *>(params->value) + (i * SECUREC_MEM_MAX_LEN),
                SECUREC_MEM_MAX_LEN, valueAddrInfo[rankId].startAddr, SECUREC_MEM_MAX_LEN)));
        }

        if (remainder != 0) {
            TIME_PRINT(CHK_SAFETY_FUNC_RET(memcpy_s(static_cast<float *>(params->value) + \
                (quotient * SECUREC_MEM_MAX_LEN), remainder, valueAddrInfo[rankId].startAddr, remainder)));
        }

        *params->actualKeyCount = *params->updateKeysSize / SIZE_TABLE[params->keyType];
        *params->actualValueCount = *params->updateValuesSize / SIZE_TABLE[params->valueType];

        PRINT_ARRAY(params->keys, *params->actualKeyCount, "UpdateKey1Worker");
        PRINT_ARRAY(static_cast<float *>(params->value), *params->actualValueCount, "UpdateKey1Worker");

        return HCCL_SUCCESS;
    }

    *params->updateKeysSize = *params->updateKeysSize / SIZE_TABLE[params->keyType];
    *params->updateValuesSize = *params->updateValuesSize / SIZE_TABLE[params->valueType];
    u32 valueItemSize = ((*params->updateValuesSize) / (*params->updateKeysSize)) * SIZE_TABLE[params->valueType];
    PsUpdatePrepare(*params->updateKeysSize);

    for (u32 rankId = 0; rankId < keyAddrInfo.size(); ++rankId) {
        if (keyAddrInfo[rankId].startAddr == nullptr) {
            continue;
        }

        s64 *keyStartAddr = static_cast<s64 *>(keyAddrInfo[rankId].startAddr);
        u64 count = keyAddrInfo[rankId].size / SIZE_TABLE[params->keyType];
        for (u64 keyIndex = 0; keyIndex < count; ++keyIndex) {
            s64 key = *(keyStartAddr + keyIndex);
            threadkeyNumForShard_[keyToThreadId_[static_cast<u64>(key) % KEY_TO_THREAD_ID]][rankId].push_back(keyIndex);
        }
    }
    // 多线程去重
    tempKeys_ = HostMem::create(params->keys, params->keyMaxNum * SIZE_TABLE[params->keyType]);
    tempValues_ = HostMem::create(params->value, params->keyMaxNum * valueItemSize *
        SIZE_TABLE[params->valueType]);

    compThreadNum_ = 0;
    tempReduceTaskPtr_ = params;

    {
        std::unique_lock<std::mutex> lock(cvMutex_);
        for (u32 i = 0; i < reduceThreadNum_; i++) {
            reduceRunningflags_[i] = true;
        }
    }

    reduceCv_.notify_all();

    {
        std::unique_lock<std::mutex> lock(reduceMutex_);
        reduceEndCv_.wait(lock, [this] {return compThreadNum_ == reduceThreadNum_;});
    }
    *params->actualKeyCount = realKeyPos_;
    *params->actualValueCount = *params->actualKeyCount * valueItemSize / SIZE_TABLE[params->valueType];

    PRINT_ARRAY(params->keys, *params->actualKeyCount, "UpdateKeyMultiWorkers");
    PRINT_ARRAY(static_cast<float *>(params->value), *params->actualValueCount, "UpdateKeyMultiWorkers");
    return HCCL_SUCCESS;
}

void OpCollRemoteUpdateExecutor::VectorReduce(volatile PsReduceSumTask *params, u32 serialNum)
{
    std::vector<MemoryStartAndSize> &keyAddrInfo = *params->keyAddrInfo;
    std::vector<MemoryStartAndSize> &valueAddrInfo = *params->valueAddrInfo;

    u32 valueItemSize = ((*params->updateValuesSize) / (*params->updateKeysSize)) * SIZE_TABLE[params->valueType];
    u32 valueCount = valueItemSize / SIZE_TABLE[params->valueType];
    for (u32 rankId = 0; rankId < keyAddrInfo.size(); rankId++) {
        u32 curKeySize = threadkeyNumForShard_[serialNum][rankId].size();
        for (u32 keyIndex = 0; keyIndex < curKeySize; ++keyIndex) {
            s64 key = *(static_cast<s64 *>(keyAddrInfo[rankId].startAddr) +
                threadkeyNumForShard_[serialNum][rankId][keyIndex]);
            auto result = realKeyMapForShard_[serialNum]->InsertOrFind(key);
            if (result.second == InsertResult::NEW_INSERTED) {
                *(result.first) = realKeyPos_++;
            }

            s64 *keyDstMem = reinterpret_cast<s64 *>(tempKeys_.ptr()) + *(result.first);
            if (key != *keyDstMem) {
                *(keyDstMem) = key;
            }
            float *valueSrc = reinterpret_cast<float *>(static_cast<u8 *>(valueAddrInfo[rankId].startAddr) +
                threadkeyNumForShard_[serialNum][rankId][keyIndex] * valueItemSize);
            float *valueDest = reinterpret_cast<float *>(static_cast<u8 *>(tempValues_.ptr()) +
                *(result.first) * valueItemSize);

            if (result.second == InsertResult::NEW_INSERTED) {            
                s32 ret = memcpy_sp(valueDest, valueItemSize, valueSrc, valueItemSize);
                CHK_PRT_RET(ret != 0, HCCL_ERROR("memcpy_sp failed, valueItemSize[%u], ret[%d]", valueItemSize, ret),);
            } else {
                FloatHighValueSum(valueSrc, valueDest, valueCount);
            }
        }
    }
}

void OpCollRemoteUpdateExecutor::ReduceThread(u32 serialNum)
{
    reduceThreadIds_[serialNum] = SalGetTid();
    while (!reduceShutDown_) {
        {
            std::unique_lock<std::mutex> lock(cvMutex_);
            reduceCv_.wait(lock, [this, &serialNum] {return (reduceRunningflags_[serialNum] || reduceShutDown_);});
        }

        if (reduceShutDown_) {
            break;
        }

        VectorReduce(tempReduceTaskPtr_, serialNum);

        {
            std::unique_lock<std::mutex> lock(cvMutex_);
            reduceRunningflags_[serialNum] = false;
        }

        {
            std::unique_lock<std::mutex> lock(reduceMutex_);
            compThreadNum_++;
        }

        reduceEndCv_.notify_all();
    }
    delete realKeyMapForShard_[serialNum];
    realKeyMapForShard_[serialNum] = nullptr;
}

HcclResult OpCollRemoteUpdateExecutor::TaskMemcpy(OpTaskPtr &task)
{
    MemTask *taskPtr = dynamic_cast<MemTask *>(task.get());
    void *dstMem = taskPtr->dstMem;
    u64 maxSize = taskPtr->maxSize;

    if (taskPtr->stream == nullptr) {
        TIME_PRINT(CHK_RET(hrtDrvMemCpy(dstMem, maxSize, taskPtr->srcMem, taskPtr->size)));
    } else {
        TIME_PRINT(CHK_RET(hrtMemAsyncCopy(dstMem, maxSize, taskPtr->srcMem, taskPtr->size,
            HcclRtMemcpyKind::HCCL_RT_MEMCPY_KIND_DEVICE_TO_DEVICE, taskPtr->stream)));
    }

    return HCCL_SUCCESS;
}

} // namespace hccl
