/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 * Description: 集合通信算子信息库
 * Author: lilianlin
 * Create: 2019-11-28
 */

#include <nlohmann/json.hpp>
#include "hcom_ops_kernel_builder.h"
#include "hcom_graph_optimizer.h"
#include <securec.h>
#include <functional>
#include <vector>
#include <algorithm>
#include <memory>
#include <mutex>
#include "graph/utils/tensor_utils.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/ge_local_context.h"
#include "framework/memory/memory_api.h"
#include "framework/common/ge_types.h" // ge对外options

#include "hccl/hcom.h"
#include "hcom_pub.h"
#include "ops_kernel_builder_registry.h"
#include "hcom_op_utils.h"
#include "offline_build_config_parse.h"
#include "param_check_pub.h"
#include "config.h"
#include "workflow_pub.h"
#include "coll_alg_utils.h"
#include "transport_heterog_def.h"
#include "externalinput_pub.h"
#include "adapter/adapter_rts.h"

using namespace std;

namespace hccl {
const u32 DEFAULT_TASK_NUM = 254;
const std::string NO_CALCULATION = "_NO_CALCULATION";
static std::mutex g_taskNumCalModeMutex;
REGISTER_OPS_KERNEL_BUILDER(HCCL_OPS_LIB_NAME, hccl::HcomOpsKernelBuilder);
HcomOpsKernelBuilder::HcomOpsKernelBuilder()
{
}

HcomOpsKernelBuilder::~HcomOpsKernelBuilder()
{
}

// 返回运行参数，包括workspace 、stream数量以及atomic标志位
ge::Status HcomOpsKernelBuilder::CalcOpRunningParam(ge::Node& node)
{
    HcclWorkflowMode lastWorkflowMode = GetWorkflowMode();
    SetWorkflowMode(HcclWorkflowMode::HCCL_WORKFLOW_MODE_OPS_KERNEL_INFO_LIB);
    CHK_PRT_RET(!node.GetOpDesc(), HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] GetOpDesc failed. null ptr.", \
        HCOM_ERROR_CODE(HCCL_E_PTR)), ge::INTERNAL_ERROR);

    HcclResult ret = MarkRemoteAccessMemoryType(node);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] Mark Remote Access Memory Type failed.",
            HCOM_ERROR_CODE(ret)), ge::INTERNAL_ERROR);

    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] set node[%s] mem attr failed.",
        HCOM_ERROR_CODE(ret), node.GetName().c_str()), ge::INTERNAL_ERROR);

    bool unknownShapeNode = false;
    CHK_PRT_RET((ge::NodeUtils::GetNodeUnknownShapeStatus(node, unknownShapeNode) != ge::GRAPH_SUCCESS),
        HCCL_ERROR("[Calc][OpRunningParam]node[%s] get node unknown status failed", node.GetName().c_str()),
        ge::INTERNAL_ERROR);
    if (unknownShapeNode) {
        HCCL_INFO("node:%s is unknown shape, does not need to Calc Op Running Param", node.GetName().c_str());
        SetWorkflowMode(lastWorkflowMode);
        return ge::SUCCESS;
    }

    ret = HcomCalcOpRunningParam(node);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] Calc Op Running Params failed.",
            HCOM_ERROR_CODE(ret)), ge::INTERNAL_ERROR);
    SetWorkflowMode(lastWorkflowMode);
    return ge::SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetNeedMapRankFromDesc(const ge::OpDescPtr &op, bool &needMapRank)
{
    // ATTR_NAME_NEED_MAP_RANK_ID
    if (ge::AttrUtils::HasAttr(op, ge::ATTR_NAME_NEED_MAP_RANK_ID)) {
        if (ge::AttrUtils::GetBool(op, ge::ATTR_NAME_NEED_MAP_RANK_ID, needMapRank) == false) {
            HCCL_ERROR("[Get][needMapRank]errNo[0x%016llx]: get need map rank failed. get \"need map rank\" from"
                "opDesc failed", HCOM_ERROR_CODE(HCCL_E_PARA));
            return HCCL_E_PARA;
        }
    }
    HCCL_INFO("[Get][needMapRank] needMapRank[%u] success.", needMapRank);
    return HCCL_SUCCESS;
}

ge::Status HcomOpsKernelBuilder::GenerateTask(const ge::Node &node, ge::RunContext &runContext,
    std::vector<domi::TaskDef> &taskDefList)
{
    bool unknownShapeNode = false;
    CHK_PRT_RET((ge::NodeUtils::GetNodeUnknownShapeStatus(node, unknownShapeNode) != ge::GRAPH_SUCCESS),
        HCCL_ERROR("[Generate][Task]node[%s] get node unknown status failed", node.GetName().c_str()), HCCL_E_PARA);
    if (unknownShapeNode) {
        HCCL_INFO("op:%s is unknown shape, does not need to generate Task.", node.GetName().c_str());
        return HCCL_SUCCESS;
    }

    CHK_PRT_RET(!node.GetOpDesc(), HCCL_ERROR("[Generate][Task]errNo[0x%016llx] opDesc is null.", \
        HCOM_ERROR_CODE(HCCL_E_PTR)), ge::INTERNAL_ERROR);
    std::string sCollectiveType = node.GetOpDesc()->GetType();
    HcclResult ret = CheckSupportedOP(sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Generate][Task]errNo[0x%016llx] op type[%s] is not supported.",
        HCOM_ERROR_CODE(ret), sCollectiveType.c_str()), ge::INTERNAL_ERROR);

    // 获取 hcom 必需的参数
    HCCL_KERNEL_INFO_PRIVATE_DEF privateDefBuf;
    std::string nodeName = node.GetOpDesc()->GetName();
    CHK_PRT_RET(nodeName.empty(), HCCL_ERROR("[Generate][Task]errNo[0x%016llx] op[%s] get tag name failed. node name"
        "is empty.", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), ge::INTERNAL_ERROR);
    std::hash<std::string> hashString;
    // node name长度不确定，将node name转换为hash值
    privateDefBuf.nodeNameHash = hashString(nodeName);

    ret = GetRootGraphID(node, privateDefBuf.graphId);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Generate][Task]errNo[0x%016llx] node[%s] find root graph failed",
        HCOM_ERROR_CODE(ret), nodeName.c_str()), ge::INTERNAL_ERROR);

    std::string sGroup;
    int64_t hcomComm = 0;
    ret = GetCommFromOpDesc(node.GetOpDesc(), hcomComm, sGroup);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get comm and group failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    if (hcomComm != 0) {
        privateDefBuf.comm = hcomComm;
    } else {
        s32 sret = memcpy_s(&privateDefBuf.group[0], sizeof(privateDefBuf.group), sGroup.c_str(),
                            (sGroup.length() + 1));
        CHK_PRT_RET(sret != EOK, HCCL_ERROR("[Generate][Task]errNo[0x%016llx] memcpy failed. ret[%d],"
            "params:destMaxSize[%zu],count[%zu]", HCOM_ERROR_CODE(HCCL_E_MEMORY), sret, sizeof(privateDefBuf.group),
            (sGroup.length() + 1)), ge::INTERNAL_ERROR);
    }

    // aicpu/mc2算子统一设置group属性，GE保证通信域内aicpu和mc2的kernel展开时序
    std::vector<std::string> groupList(1, sGroup);
    CHK_PRT_RET(!ge::AttrUtils::SetListStr(node.GetOpDesc(), "_hccl_group_id_list", groupList),
        HCCL_ERROR("[Generate][Task]Set group id list attr for current node failed, group:%s", sGroup.c_str()),
        ge::INTERNAL_ERROR);

    ret = GetSrcRankFromDesc(node.GetOpDesc(), privateDefBuf.srcRank, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get src_rank failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = GetDestRankFromDesc(node.GetOpDesc(), privateDefBuf.destRank, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get dest_rank failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = GetSrTagFromDesc(node.GetOpDesc(), privateDefBuf.srTag, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get sr_tag failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = GetOriginalGraphShapeTypeFromDesc(node.GetOpDesc(), privateDefBuf.originalGraphShapeType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get shapeType failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = GetRemoteLookupAttr(node, privateDefBuf.esInfo, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get lookup params failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, privateDefBuf.dataType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] conversion op data type failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    privateDefBuf.privateDefSize = sizeof(HCCL_KERNEL_INFO_PRIVATE_DEF);

    privateDefBuf.tensorNum = 0;
    constexpr const char *kCleanSeparately = "1";
    std::string atomic_clean_policy;
    bool needCleanSeparately = (ge::GetThreadLocalContext().GetOption(ge::ATOMIC_CLEAN_POLICY, atomic_clean_policy)\
        == ge::GRAPH_SUCCESS) && (atomic_clean_policy == kCleanSeparately);
    if (needCleanSeparately && ((sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) ||\
        (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE) || (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCE))) {
        // 获取Tensor的个数
        privateDefBuf.tensorNum = node.GetOpDesc()->GetInputsSize();
    }
    ret = GetNeedMapRankFromDesc(node.GetOpDesc(), privateDefBuf.needMapRank);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] get need map rank failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    if (IsOfflineCompilation()) {
        privateDefBuf.isOfflineComp = true;
        CHK_RET(GetOffDeviceTypeWithoutDev(privateDefBuf.devType));
        HCCL_DEBUG("GenerateTask: isOfflineComp[%u] devType[%u]", privateDefBuf.isOfflineComp, privateDefBuf.devType);
    }

    HCCL_RUN_INFO("GenerateTask: graph[%u], node[%s]-hash[%zu], opType[%s], opID[%d], comm[%lld], group[%s], "
        "srcRank[%u], dstRank[%u], srTag[%u], dataType[%s].", privateDefBuf.graphId, nodeName.c_str(),
        privateDefBuf.nodeNameHash, sCollectiveType.c_str(), node.GetOpDesc()->GetId(), hcomComm, sGroup.c_str(),
        privateDefBuf.srcRank, privateDefBuf.destRank, privateDefBuf.srTag,
        GetDataTypeEnumStr(privateDefBuf.dataType).c_str());

    domi::TaskDef taskDef;
    ret = GenerateTaskPrivateDef(node, privateDefBuf, taskDef, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] generate taskprivatedef failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);

    ret = GenerateTaskDef(node, privateDefBuf, taskDef);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Generate][Task]errNo[0x%016llx] generate taskdef failed.", HCOM_ERROR_CODE(ret)),
        ge::INTERNAL_ERROR);
    taskDefList.push_back(taskDef);
    return ge::SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GenerateTaskPrivateDef(const ge::Node &node,
    HCCL_KERNEL_INFO_PRIVATE_DEF &privateDefBuf, domi::TaskDef &taskDef,
    const std::string sCollectiveType)
{
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALL) {
        HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF alltoallvPrivateDefBuf(privateDefBuf);
        CHK_RET(SetAlltoAllVParams(node, alltoallvPrivateDefBuf, sCollectiveType));
        taskDef.set_private_def(&alltoallvPrivateDefBuf, sizeof(HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTERV) {
        HCCL_REDUCESCATTERV_KERNEL_INFO_PRIVATE_DEF reducescattervPrivateDefBuf(privateDefBuf);
        CHK_RET(SetReduceScatterVParams(node, reducescattervPrivateDefBuf));
        taskDef.set_private_def(&reducescattervPrivateDefBuf, sizeof(HCCL_REDUCESCATTERV_KERNEL_INFO_PRIVATE_DEF));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLGATHERV) {
        HCCL_ALLGATHERV_KERNEL_INFO_PRIVATE_DEF allgathervPrivateDefBuf(privateDefBuf);
        CHK_RET(SetAllGatherVParams(node, allgathervPrivateDefBuf));
        taskDef.set_private_def(&allgathervPrivateDefBuf, sizeof(HCCL_ALLGATHERV_KERNEL_INFO_PRIVATE_DEF));
    } else {
        if (privateDefBuf.tensorNum == 0) {
            HCCL_DEBUG("[Generate][TaskPrivateDef]Not Delivering a Clearing Task.");
            taskDef.set_private_def(&privateDefBuf, sizeof(HCCL_KERNEL_INFO_PRIVATE_DEF));
        } else {
            HCCL_DEBUG("[Generate][TaskPrivateDef]Delivering Clearing Task.");
            CHK_RET(SetPrivateDefWithTensorInfo(node, privateDefBuf, taskDef));
        }
    }

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::CalAndSetOpWorkerSpaceForKnowShape(ge::Node& node, const std::string &sCollectiveType,
    u64 &opMemSize)
{
    HcclResult ret;
    u32 shapeType = ORIGINAL_GRAPH_KNOWNSHAPE_TYPE;
    CHK_RET(GetOriginalGraphShapeTypeFromDesc(node.GetOpDesc(), shapeType));
    // 获取并设定memSize 大小
    ret = GetOpWorkspaceMemSize(node, sCollectiveType, opMemSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] get op[%s] workspace size failed.",
        HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);
    std::vector<int64_t> workspaceBytes;
    workspaceBytes.push_back(opMemSize);
    node.GetOpDesc()->SetWorkspaceBytes(workspaceBytes);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::HcomCalcOpRunningParam(ge::Node& node)
{
    // 获取需回传的信息
    u64 streamNum = 0;
    u64 opMemSize = 0;
    HcclResult ret;
    HCCL_INFO("calculate hccl runing parameters start.");

    std::string sCollectiveType = node.GetOpDesc()->GetType();
    std::string nodeName = node.GetName();
    if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST || sCollectiveType == HCCL_KERNEL_OP_TYPE_GATHER) &&
        nodeName.find(NO_CALCULATION) != std::string::npos) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "used_stream_num", streamNum) == false) {
            HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] op[%s]: set stream number[%llu] to OpDesc failed.",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), streamNum);
            return HCCL_E_INTERNAL;
        }
        CHK_RET(CalAndSetOpWorkerSpaceForKnowShape(node, sCollectiveType, opMemSize));
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_hccl_task_num", DEFAULT_TASK_NUM) == false) {
            HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] op[%s]: set _hccl_task_num to OpDesc failed.",
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
        HCCL_INFO("node[%s] no need calcute hccl runing parameters. stream num:[%llu], workspace size:[%llu]bytes",
            nodeName.c_str(), streamNum, opMemSize);
        return HCCL_SUCCESS;
    }

    ret = CheckSupportedOP(sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] op type[%s] is not supported.",
            HCOM_ERROR_CODE(ret), sCollectiveType.c_str()), HCCL_E_NOT_SUPPORT);

    std::string groupListString;
    std::string curGroup;
    int64_t hcomComm = 0;
    s32 backloggedGroupSize = 0;
    s32 deviceNumPerServer = 0;
    s32 serverNum = 0;
    bool multiModuleDiffDeviceNumMode = false;
    std::vector<u32> groupRanks;
    CHK_RET(HcomOpUtils::GetGroupFromOpDesc(node.GetOpDesc(), curGroup));
    auto iter = HCCL_OPTYPE_NAME_MAP.find(sCollectiveType);
    HcclCMDType opType = (iter != HCCL_OPTYPE_NAME_MAP.end()) ? iter->second : HcclCMDType::HCCL_CMD_INVALID;
    u64 opDataSize = 0;
    CHK_RET(GetOpDataSize(node, opDataSize));

    // 获取并设定stream 数量
    // 如果是离线编译则进入离线编译的流程去获取从流个数
    if (IsOfflineCompilation()) {
        CHK_RET(GetStreamNumOfflineComp(sCollectiveType, curGroup, streamNum));
    } else if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST, groupListString) ==
        ge::GRAPH_SUCCESS) {
        CHK_RET(GetDeviceAndServerNumFromGroupList(groupListString, curGroup, serverNum, deviceNumPerServer,
            multiModuleDiffDeviceNumMode));
        CHK_RET(GetSubStreamNum(deviceNumPerServer, streamNum, serverNum));
    } else if ((HcomGetbackloggedByGroup(curGroup.c_str(), groupRanks, backloggedGroupSize) == HCCL_SUCCESS) &&
        (backloggedGroupSize != 0)) {
        CHK_RET(GetServerAndDevNumFromGroup(groupRanks, serverNum, deviceNumPerServer, multiModuleDiffDeviceNumMode));
        CHK_RET(GetSubStreamNum(deviceNumPerServer, streamNum, serverNum));
    } else if (ge::AttrUtils::HasAttr(node.GetOpDesc(), "comm")) {
        bool bRet = ge::AttrUtils::GetInt(node.GetOpDesc(), "comm", hcomComm);
        CHK_PRT_RET(!bRet, HCCL_ERROR("errNo[0x%016llx] get attr \"comm\" failed. ", HCOM_ERROR_CODE(HCCL_E_PARA)),\
            HCCL_E_PARA);
        if (hcomComm != static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
            CHK_RET(HcclCommGraphGetWorkspaceSubStreamNum(hcomComm, streamNum, opDataSize, opType));
            HCCL_INFO("[HcomCalcOpRunningParam][GetComm][%d].", hcomComm);
        } else {
            CHK_RET(HcomGetWorkspaceSubStreamNum(curGroup.c_str(), streamNum, opDataSize, opType));
        }
    } else {
        CHK_RET(HcomGetWorkspaceSubStreamNum(curGroup.c_str(), streamNum, opDataSize, opType));
    }

    // embadding service场景下，lookup算子流水化执行需要3条从流
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED) {
        s32 maxNum = 0;
        CHK_RET(GetOpIntAttr(node.GetOpDesc(), "max_num", maxNum));
        if (maxNum > ES_PIPELINE_THRESHOLD) {
            streamNum = HCCL_SUB_STREAM_ES_LOOKUP;
            HCCL_INFO("[HcomCalcOpRunningParam] node[%s]key num > PIPELINE_THRESHOLD, run pipeline mode.",
                node.GetName().c_str());
        } else {
            streamNum = 0;
        }
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND ||
               sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
        // send recv 算子不需要配置从流
        streamNum = 0;
    }

    if (ge::AttrUtils::SetInt(node.GetOpDesc(), "used_stream_num", streamNum) == false) {
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] op[%s]: set stream number[%llu] to OpDesc failed.",
                   HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), streamNum);
        return HCCL_E_INTERNAL;
    }

    CHK_RET(CalAndSetOpWorkerSpaceForKnowShape(node, sCollectiveType, opMemSize));
    // 设置内存属性
    ret = SetOpMemAttr(node, node.GetOpDesc()->GetType(), opMemSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
                HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] set node[%s] mem attr failed.",
                    HCOM_ERROR_CODE(ret), node.GetName().c_str()),
                HCCL_E_INTERNAL);

    // 设置output size 大小
    ret = SetOpOutputMemSize(node, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] set op[%s] output size failed.",
            HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);

    // 设定atomic index参数
    ret = SetOpAtomicInputIndex(node, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] set op[%s] atomic input index failed.",
            HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);

    CHK_RET(GetAndSetTaskNum(node, sCollectiveType, streamNum));
    CHK_RET(SetAttachedStreamInfoList(node, curGroup));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetAndSetTaskNum(const ge::Node &node, const std::string &sCollectiveType,
    const u64 &streamNum)
{
    u32 masterTaskNum = 0;
    u32 slaveTaskNum = 0;
    u32 piplineTaskNum = 0;
    u32 taskNum = 0;
    HcclUs startut = TIME_NOW();
    HCCL_DEBUG("[HcomOpsKernelBuilder] Entry-GetAndSetTaskNum sCollectiveType[%s] streamNum[%llu]",
        sCollectiveType.c_str(), streamNum);

    if (!HcomOpUtils::IsNeedCalTaskNum(sCollectiveType)) {
        if (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND || sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
            taskNum = SEND_RECEIVE_TASK_NUM;
        } else {
            taskNum = OP_DEFAULT_TASK_NUM;
        }
    } else {
        s32 deviceNumPerServer = 0;
        s32 serverNum = 0;
        bool multiModuleDiffDeviceNumMode = false;
        AlgType algType;

        std::unique_lock<std::mutex> lock(g_taskNumCalModeMutex);
        // 设置为获取tasknum模式
        SetTaskNumCalMode(true);
        // 获取deviceNumPerServer & serverNum
        CHK_RET(HcomOpUtils::GetDeviceAndServerNum(node, deviceNumPerServer, serverNum, multiModuleDiffDeviceNumMode));
        SetTaskNumCalMode(false);
        lock.unlock();

        // 获取通信算法
        auto iter = HCCL_OPTYPE_NAME_MAP.find(sCollectiveType);
        HcclCMDType opType = (iter != HCCL_OPTYPE_NAME_MAP.end()) ? iter->second : HcclCMDType::HCCL_CMD_INVALID;
        auto cmdStrIter = HCOM_CMD_TYPE_STR_MAP.find(opType);
        std::string opTypeStr = (cmdStrIter != HCOM_CMD_TYPE_STR_MAP.end()) ? cmdStrIter->second : "invalid";
        CHK_RET(HcomOpUtils::GetAlgType(node, deviceNumPerServer, serverNum, opTypeStr, algType));

        // 如果在线编译没有获取到ranktable file,则返回默认task数量
        if ((deviceNumPerServer == 0) && (serverNum == 0)) {
            taskNum = OP_DEFAULT_TASK_NUM;
        } else {
            // 计算Server间pipline切分数量
            u64 opDataSize = 0;
            CHK_RET(GetOpDataSize(node, opDataSize));

            DevType devType;
            CHK_RET(GetOffDeviceTypeWithoutDev(devType));
            u64 piplineSliceNum = CalculatePiplineSliceNum(opType, opDataSize, algType,
                devType, deviceNumPerServer, serverNum);

            // 计算清零task数量
            CHK_RET(HcomOpUtils::GetTensorCleanTaskNum(node, sCollectiveType, masterTaskNum));
            // 计算DFX校验task数量
            CHK_RET(HcomOpUtils::GetDfxTaskNum(sCollectiveType, masterTaskNum));
            // 计算与从stream同步task数量
            CHK_RET(HcomOpUtils::GetToSlaveStreamTaskNum(sCollectiveType, streamNum, piplineSliceNum, masterTaskNum));
            // 计算与主stream同步task数量
            CHK_RET(HcomOpUtils::GetToMasterStreamTaskNum(sCollectiveType, slaveTaskNum));
            // 计算Server间Pipline从stream和主stream同步的task数量
            piplineTaskNum += (piplineSliceNum >= MIN_PIPLINE_SLICE_NUM) ?
                piplineSliceNum * PIPLINE_STREAM_EVENT_NUM * COM_STEP_NUM : 0;

            u32 intraTaskNum = 0;
            u32 interTaskNum = 0;
            // 获取Server内通信task数量
            u64 count = 0;
            u64 totalSize = 0;
            u32 dataTypeSize;
            HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
            HcclResult ret = HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType);
            CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data type failed. ret[%d]",
                sCollectiveType.c_str(), ret), ret);

            ret = GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count);
            CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get count failed. ret[%d]",
                sCollectiveType.c_str(), ret), ret);

            ret = SalGetDataTypeSize(dataType, dataTypeSize);
            CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data size failed. ret[%d]",
                sCollectiveType.c_str(), ret), ret);

            if (multiModuleDiffDeviceNumMode) {
                // 获取打平拓扑通信task数量
                CHK_RET(HcomOpUtils::GetCombineComTaskNum(sCollectiveType, serverNum, deviceNumPerServer, intraTaskNum,
                    interTaskNum));
            } else {
                totalSize = count * dataTypeSize;
                CHK_RET(HcomOpUtils::GetIntraComTaskNum(sCollectiveType, deviceNumPerServer, streamNum,
                    algType, intraTaskNum, totalSize));
                // 获取Server间通信task数量, 从stream没有server间task
                CHK_RET(HcomOpUtils::GetInterComTaskNum(sCollectiveType, serverNum, deviceNumPerServer, devType,
                    interTaskNum));
            }

            // 计算通信task
            if (piplineSliceNum >= MIN_PIPLINE_SLICE_NUM) {
                masterTaskNum += intraTaskNum * piplineSliceNum;
                slaveTaskNum += intraTaskNum * piplineSliceNum;
                piplineTaskNum += interTaskNum * piplineSliceNum;
            } else {
                masterTaskNum += intraTaskNum + interTaskNum;
                slaveTaskNum += intraTaskNum;
            }
        }
    }
    if (taskNum == 0) {
        taskNum = std::max(masterTaskNum, std::max(slaveTaskNum, piplineTaskNum));
    }

    if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_hccl_task_num", taskNum) == false) {
        HCCL_ERROR("[HcomCalc][OpRunningParam]errNo[0x%016llx] op[%s]: set _hccl_task_num to OpDesc failed.",
            HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
        return HCCL_E_PARA;
    }
    HCCL_INFO("GetAndSetTaskNum success, cost time[%lld]us taskNum[%u]", DURATION_US(TIME_NOW() - startut), taskNum);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetCrackParamsInfo(const ge::Node& node,
    u32 tensorNum, int64_t* tensorOffset, int64_t* tensorSize,
    int64_t* crackOffset, int64_t* crackSize)
{
    // 获取缝隙的offset和size
    for (u32 i = 0; i < tensorNum; i++) {
        // crackOffset基于LoadTask的inputaddr偏移，而不是基于基地址偏移
        crackOffset[i] = tensorOffset[i] + tensorSize[i] - tensorOffset[0];
        int64_t tensorSizeTemp = 0;
        tensorSizeTemp = (tensorSize[i] + TENSOR_ALIGNMENT_32 - 1) / TENSOR_ALIGNMENT_32 * TENSOR_ALIGNMENT_32\
            + TENSOR_ALIGNMENT_32;
        tensorSizeTemp = (tensorSizeTemp + TENSOR_ALIGNMENT_512 - 1) / TENSOR_ALIGNMENT_512 * TENSOR_ALIGNMENT_512;
        tensorSizeTemp = tensorSizeTemp - tensorSize[i];
        crackSize[i] = tensorSizeTemp;
    }

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetTensorParamsInfo(const ge::Node& node,
    u32 tensorNum, int64_t* tensorOffset, int64_t* tensorSize)
{
    std::vector<int64_t> tensorOffsetTemp;
    std::vector<int64_t> tensorSizeTemp;

    auto op = node.GetOpDesc();
    // 获取tensor的偏移
    tensorOffsetTemp = op->GetInputOffset();
    for (size_t i = 0; i < tensorOffsetTemp.size(); i++) {
        HCCL_DEBUG("[HcomOpsKernelBuilder] node[%s] has %u inputs, input[%u] addr %lld.", op->GetName().c_str(),
            op->GetInputsSize(), i, tensorOffsetTemp[i]);
    }

    // 获取tensor的大小
    CHK_RET(HcomOpUtils::GetAllTensorSize(op, tensorNum, tensorSizeTemp));

    if (tensorOffsetTemp.size() == 0 || tensorSizeTemp.size() == 0) {
        HCCL_WARNING("[HcomOpsKernelBuilder] The value of tensorOffset or tensorSize is 0.");
        return HCCL_SUCCESS;
    }

    CHK_SAFETY_FUNC_RET(memcpy_s(tensorOffset, tensorOffsetTemp.size() * sizeof(int64_t), tensorOffsetTemp.data(),
        tensorOffsetTemp.size() * sizeof(int64_t)));
    CHK_SAFETY_FUNC_RET(memcpy_s(tensorSize, tensorSizeTemp.size() * sizeof(int64_t), tensorSizeTemp.data(),
        tensorSizeTemp.size() * sizeof(int64_t)));

    std::string name = op->GetName();
    HCCL_DEBUG("GetTensorParamsInfo name [%s].", name.c_str());
    return HCCL_SUCCESS;
}


HcclResult HcomOpsKernelBuilder::SetPrivateDefWithTensorInfo(const ge::Node &node,
    HCCL_KERNEL_INFO_PRIVATE_DEF &privateDefBuf, domi::TaskDef &taskDef)
{
    // 在set_private_def之前，获取tensorInfo
    int64_t tensorOffset[privateDefBuf.tensorNum] = {0};
    int64_t tensorSize[privateDefBuf.tensorNum] = {0};

    CHK_RET(GetTensorParamsInfo(node, privateDefBuf.tensorNum, tensorOffset, tensorSize));
    for (u32 i = 0; i < privateDefBuf.tensorNum; i++) {
        HCCL_DEBUG("[Builder][GetTensorParamsInfo] tensorOffset[%u] %lld tensorSize[%u] %lld.",\
            i, tensorOffset[i], i, tensorSize[i]);
    }

    // 获取tensor间缝隙的offset和size
    int64_t crackOffset[privateDefBuf.tensorNum] = {0};
    int64_t crackSize[privateDefBuf.tensorNum] = {0};
    CHK_RET(GetCrackParamsInfo(node, privateDefBuf.tensorNum, tensorOffset, tensorSize, crackOffset, crackSize));

    for (u32 i = 0; i < privateDefBuf.tensorNum; i++) {
        HCCL_DEBUG("[Builder][SetPrivateDefWithTensorInfo] crackOffset[%u] %lld crackSize[%u] %lld.",\
            i, crackOffset[i], i, crackSize[i]);
    }

    // 将获取的tensorInfo，拼接到privateDefBuf数据后
    void *privateDefPtr = nullptr;
    size_t privateDefBufSize = sizeof(HCCL_KERNEL_INFO_PRIVATE_DEF) + sizeof(crackOffset) + sizeof(crackSize);
    privateDefPtr = malloc(privateDefBufSize);
    CHK_PTR_NULL(privateDefPtr);

    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefPtr, sizeof(privateDefBuf), &privateDefBuf, sizeof(privateDefBuf)));
    CHK_SAFETY_FUNC_RET(memcpy_s(reinterpret_cast<int64_t *>(static_cast<s8 *>(privateDefPtr) +\
        sizeof(privateDefBuf)), sizeof(crackOffset), crackOffset, sizeof(crackOffset)));
    CHK_SAFETY_FUNC_RET(memcpy_s(reinterpret_cast<int64_t *>(static_cast<s8 *>(privateDefPtr) +\
        sizeof(privateDefBuf) + sizeof(crackOffset)), sizeof(crackSize), crackSize, sizeof(crackSize)));

    taskDef.set_private_def(privateDefPtr, privateDefBufSize);

    free(privateDefPtr);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GenerateTaskDef(const ge::Node &node,
    HCCL_KERNEL_INFO_PRIVATE_DEF &privateDefBuf, domi::TaskDef &taskDef)
{
    taskDef.clear_kernel_hccl();
    domi::KernelHcclDef *kernelDefHccl = taskDef.mutable_kernel_hccl();
    CHK_PRT_RET((kernelDefHccl == nullptr), HCCL_ERROR("[Generate][Task]node[%s]: kernelDefHccl is null.",
        node.GetOpDesc()->GetName().c_str()), HCCL_E_PTR);

    taskDef.set_type(RT_MODEL_TASK_HCCL);
    taskDef.set_stream_id(node.GetOpDesc()->GetStreamId());

    kernelDefHccl->set_hccl_type(node.GetOpDesc()->GetType());
    kernelDefHccl->set_op_index(node.GetOpDesc()->GetId());
    std::string sCollectiveType = node.GetOpDesc()->GetType();
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP) {
        for (uint32_t i = 0; i < static_cast<uint32_t>(node.GetOpDesc()->GetInputsSize()); i++) {
            kernelDefHccl->add_input_zero_copy_flag(ZERO_COPY_USED);
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetSupportedOP(std::vector<std::string> &hcclSupportOp) const
{
    hcclSupportOp.assign(HCOM_SUPPORTED_OP_TYPE.begin(), HCOM_SUPPORTED_OP_TYPE.end());
    return HCCL_SUCCESS;
}

bool HcomOpsKernelBuilder::UseOneLayerAlltoAllv()
{
    return (GetExternalInputHcclAlgoConfig(HcclCMDType::HCCL_CMD_ALLTOALLV)[0] == HcclAlgoType::HCCL_ALGO_TYPE_NA &&
        GetExternalInputHcclAlgoConfig(HcclCMDType::HCCL_CMD_ALLTOALLV)[1] == HcclAlgoType::HCCL_ALGO_TYPE_PAIRWISE);
}

void HcomOpsKernelBuilder::GetUniqueTagFromNode(const ge::Node& node, s32 tag, string &uniqueTag)
{
    auto ownerGraph = node.GetOwnerComputeGraphBarePtr();
    u64 sessionId = ownerGraph->GetSessionID();
    u32 graphId = ownerGraph->GetGraphID();
    uniqueTag = std::to_string(sessionId) + std::to_string(graphId) + std::to_string(tag);
    HCCL_INFO("get sessionId[%llu] graphId[%u] tag[%d] uniqueTag[%s]",
        sessionId, graphId, tag, uniqueTag.c_str());
}

HcclResult HcomOpsKernelBuilder::GetLookupUpdateWorkspace(ge::Node& node, u64 &opMemSize, s32 flags)
{
    s32 tag = 0;
    string uniqueTag{};
    ge::OpDescPtr op = node.GetOpDesc();
    CHK_RET(GetOpIntAttr(op, "tag", tag));
    GetUniqueTagFromNode(node, tag, uniqueTag);

    if (nodeTagopMemSizeMap_.find(uniqueTag) != nodeTagopMemSizeMap_.end()) {
        opMemSize = 0;
        HCCL_INFO("uniqueTag[%s] has been get Workspace size[%llu]",
            uniqueTag.c_str(), nodeTagopMemSizeMap_[uniqueTag]);
        return HCCL_SUCCESS;
    }

    s32 maxNum = 0;
    CHK_RET(GetOpIntAttr(op, "max_num", maxNum));

    s32 maxEmbeddingDim = 0;
    CHK_RET(GetOpIntAttr(op, "_embedding_dim", maxEmbeddingDim));
    s32 maxValueItemSize = maxEmbeddingDim * sizeof(float);
    HCCL_INFO("GetLookupUpdateWorkspace get maxEmbeddingDim[%d]", maxEmbeddingDim);

    // +1是为table id申请的内存
    s64 keyMemSize = (((maxNum + 1) * SIZE_TABLE[HCCL_DATA_TYPE_INT64] / IPC_MEM_ALIGNMENT_BYTE) + 1) *
        IPC_MEM_ALIGNMENT_BYTE;
    // 为key的中转申请2倍内存,1份用作shard多线程去重，1份用作send key buf
    keyMemSize += keyMemSize;
    s64 valueMemSize = ((maxNum * maxValueItemSize / IPC_MEM_ALIGNMENT_BYTE) + 1) * IPC_MEM_ALIGNMENT_BYTE;

    opMemSize = keyMemSize + valueMemSize;

    if (flags == ES_FLAGS_ENABLE_COUNTER) {
        u64 counterSize = ((static_cast<u64>(maxNum) * ES_KEY_COUNTER_MEM_BYTES_SIZE + IPC_MEM_ALIGNMENT_BYTE - 1) /
            IPC_MEM_ALIGNMENT_BYTE) * IPC_MEM_ALIGNMENT_BYTE;
        opMemSize += counterSize;
    }

    u64 rdmaEnvelopeMemSize = Align<u64>(sizeof(HcclEsRdmaInfo) * ES_MAX_PS_NUM, IPC_MEM_ALIGNMENT_BYTE);
    opMemSize += rdmaEnvelopeMemSize;

    nodeTagopMemSizeMap_[uniqueTag] = opMemSize;
    HCCL_INFO("keyMemSize[%lld], valueMemSize[%lld], rdmaEnvelopeMemSize[%llu], opMemSize[%llu]",
        keyMemSize, valueMemSize, rdmaEnvelopeMemSize, opMemSize);

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetOpWorkspaceMemSize(ge::Node& node, const std::string &sCollectiveType,
    u64 &opMemSize)
{
    HcclResult ret;
    u64 count = 0;
    std::string sGroup;
    int64_t hcomComm = 0;
    u32 dataTypeSize;
    HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
    const u32 alignSize = HCCL_ALIGN_SIZE;
    s32 flags = 0;
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED) {
        CHK_RET(GetOpIntAttr(node.GetOpDesc(), "flags", flags));
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED) {
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP) {
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    }

    ret = HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[GetOp][WorkspaceMemSize]op[%s]: get data type failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[GetOp][WorkspaceMemSize]op[%s]: get count failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = GetCommFromOpDesc(node.GetOpDesc(), hcomComm, sGroup);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[GetOp][WorkspaceMemSize]op[%s]: GetCommFromOpDesc failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = SalGetDataTypeSize(dataType, dataTypeSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[GetOp][WorkspaceMemSize]op[%s]: get data size failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    s32 rankSize = 0;
    s32 serverNum = 0;
    bool multiModuleDiffDeviceNumMode = false;
    if (!IsOfflineCompilation()) {
        if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) {
            CHK_RET(GetRanksize(hcomComm, sGroup, rankSize));
        }

        if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) {
            CHK_RET(HcomOpUtils::CheckAlltoAllvcRank(node, hcomComm, sGroup));
        }
    } else {
        std::string groupListString;
        if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST, groupListString) !=
            ge::GRAPH_SUCCESS) {
            HCCL_ERROR("offline compile need have OPTION_EXEC_HCOM_GROUPLIST.please make true");
            return HCCL_E_PARA;
        }
        CHK_RET(GetDeviceAndServerNumFromGroupList(groupListString, sGroup, serverNum, rankSize,
            multiModuleDiffDeviceNumMode));
    }

    if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) || (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLGATHER)) {
        CHK_PRT_RET((!ge::AttrUtils::GetInt(node.GetOpDesc(), HCOM_ATTR_RANK_SIZE, rankSize)),
            HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s] get "\
            "attr[%s] failed.", sCollectiveType.c_str(), HCOM_ATTR_RANK_SIZE.c_str()), HCCL_E_PARA);

        CHK_PRT_RET((rankSize <= 0), HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: rank_size[%d] should be "\
            "greater than 0.", sCollectiveType.c_str(), rankSize), HCCL_E_PARA);
    }

    u64 getMemSize = 0;
    CHK_RET(GetOpScratchMemSize(node, sCollectiveType, getMemSize, count, dataTypeSize, dataType, rankSize,
        hcomComm, sGroup, serverNum));

    // 算子所需的内存大小，加上固定32kb长度，并按对齐后回传
    opMemSize = HCCL_WORKSPACE_MEM_32_KB;
    opMemSize += getMemSize;
    opMemSize = (opMemSize + alignSize - 1) / alignSize * alignSize;

    HCCL_INFO("workspace memory size: node Name[%s], op[%s], data type[%s], count[%llu], comm[%lld], group[%s],"\
        "rank size[%u], size[%llu], mem size[%llu].", node.GetName().c_str(), sCollectiveType.c_str(),
        GetDataTypeEnumStr(dataType).c_str(), count, hcomComm, sGroup.c_str(), rankSize, getMemSize, opMemSize);

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetOpScratchMemSize(ge::Node& node, const std::string &sCollectiveType,
    u64 &opMemSize, u64 count, u32 dataTypeSize, HcclDataType dataType, s32 rankSize, int64_t hcomComm,
    std::string sGroup, s32 serverNum)
{
    // 是否需要额外申请scratch mem
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) {
        // ReduceScatter 所需workspace memory: count * 单个数据的size * rank_size
        opMemSize += count * dataTypeSize * rankSize;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALL) {
        // alltoall 所需workspace memory ：input mem size
        opMemSize += count * dataTypeSize;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST) {
        if (count * dataTypeSize <= HCCL_MID_COUNT_32_MB) {
            opMemSize += count * dataTypeSize * HCCL_MEMSIZE_HD_FACTOR;
        }
    } else if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) &&
        !UseOneLayerAlltoAllv() && (u32)rankSize > HCCL_ALLTOALLV_P2P_SIZE) {
        // 离线编译场景需要重新计算
        if (IsOfflineCompilation()) {
            if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV) {
                HCCL_ERROR("[GetOpScratchMemSize] offline compilation is not support HcomAllToAllV");
                return HCCL_E_PARA;
            }
        }
        s32 deviceLogicId = 0;
        rtContext_t runContext = nullptr;
        if (!IsOfflineCompilation()) {
            // 获取deviceLogicID
            CHK_RET(HcomOpUtils::GetDevId(hcomComm, sGroup, deviceLogicId));
            CHK_RET(hrtCtxGetCurrent(&runContext));
            CHK_RET(hrtSetDevice(deviceLogicId));
        }
        if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV) {
            CHK_RET(GetAlltoAllvStagedScratchMemSize(node, hcomComm, sGroup, rankSize, opMemSize));
        } else {
            CHK_RET(HcomOpUtils::GetAlltoAllvcStagedScratchMemSize(node, hcomComm, sGroup, rankSize, opMemSize));
        }
        if (!IsOfflineCompilation()) {
            CHK_RET(hrtResetDevice(deviceLogicId));
            CHK_RET(hrtCtxSetCurrent(runContext));
        }
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE) {
        bool isDeterministic = false;
        CHK_RET(GetDeterministic(isDeterministic));
        if (isDeterministic) {
            CHK_RET(HcomOpUtils::GetAllReduceScratchMemSize(sGroup, node, serverNum, rankSize, count, dataType,
                opMemSize));
        } else {
            if (count * dataTypeSize <= HCCL_MID_COUNT_16_MB) {
                opMemSize += count * dataTypeSize * HCCL_MEMSIZE_HD_FACTOR;
            }
        }
    }

    HCCL_INFO("workspace memory size: op[%s], scratch mem size[%llu]", sCollectiveType.c_str(), opMemSize);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetAlltoAllvStagedScratchMemSize(ge::Node& node, const int64_t &hcomComm,
    const string &sGroup, u32 rankSize, u64 &getMemSize)
{
    if (rankSize > ALLTOALLV_RANK_MAX_NUM) {
        HCCL_ERROR("[GetAlltoAllvStagedScratchMemSize] Invalid rankSize[%u]", rankSize);
        return HCCL_E_PARA;
    }
    u64 memSize = 0;
    HcclDataType sendType;
    HcclDataType recvType;
    CHK_RET(HcomOpUtils::GetAlltoAllDataType(node.GetOpDesc(), sendType, recvType));

    std::vector<int64_t> sendCounts;
    std::vector<int64_t> sendDispls;
    std::vector<int64_t> recvCounts;
    std::vector<int64_t> recvDispls;
    auto op = node.GetOpDesc();
    if (ge::AttrUtils::HasAttr(op, "send_counts")) {
        CHK_RET(GetAlltoAllCountsDispl(op, sendCounts, sendDispls, recvCounts, recvDispls));
    } else {
        CHK_RET(GetAlltoAllCountsDispl(node, sendCounts, sendDispls, recvCounts, recvDispls));
    }

    rankSize = sendCounts.size();
    HCCL_INFO("[Get][AlltoAllvStagedScratchMemSize] Get rankSize from sendCounts size[%u]",
        rankSize);

    std::vector<u64> sendCountsUnsigned(rankSize, 0);
    std::vector<u64> sendDisplsUnsigned(rankSize, 0);
    std::vector<u64> recvCountsUnsigned(rankSize, 0);
    std::vector<u64> recvDisplsUnsigned(rankSize, 0);

    for (u32 i = 0; i < rankSize; i++) {
        sendCountsUnsigned[i] = sendCounts[i];
        sendDisplsUnsigned[i] = sendDispls[i];
        recvCountsUnsigned[i] = recvCounts[i];
        recvDisplsUnsigned[i] = recvDispls[i];
    }

    if (hcomComm == static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
        CHK_RET(HcomGetAlltoAllStagedWorkSpaceMemSize(sGroup.c_str(),
            sendCountsUnsigned.data(), sendDisplsUnsigned.data(), sendType,
            recvCountsUnsigned.data(), recvDisplsUnsigned.data(), recvType,
            memSize));
    } else {
        CHK_RET(HcclCommGraphGetAlltoAllStagedWorkSpaceMemSize(hcomComm,
            sendCountsUnsigned.data(), sendDisplsUnsigned.data(), sendType,
            recvCountsUnsigned.data(), recvDisplsUnsigned.data(), recvType,
            memSize));
    }
    getMemSize += memSize;

    return HCCL_SUCCESS;
}

HcclResult GetAlltoALLvcOffStagedScratchMemSize(ge::Node& node, std::vector<int64_t> &sendCounts,
    HcclDataType &sendType, u64 &opMemSize)
{
    s32 deviceNumPerServer = 0;
    u32 meshAggregationRankSize = 0;
    DevType devType = DevType::DEV_TYPE_COUNT;
    RankTable_t clusterInfo;
    std::vector<OffSendRecvInfo> allMeshAggregationSendRecvInfo;
    std::vector<int64_t> sendLength(sendCounts.size(), 0);
    for (u32 i = 0; i < sendCounts.size(); i++) {
        OffSendRecvInfo sendRecvInfo;
        sendRecvInfo.sendLength.resize(sendCounts.size());
        for (u32 j = 0; j < sendCounts.size(); j++) {
            sendRecvInfo.sendLength[j] = sendCounts[j] * SIZE_TABLE[sendType];
        }
        allMeshAggregationSendRecvInfo.push_back(std::move(sendRecvInfo));
    }
    // 获取每个server内的device数量
    CHK_RET(GetClusterInfoAndDeviceNum(clusterInfo, deviceNumPerServer));

    // 获取当前rankid
    u32 alltoallvcRank = 0;
    bool bRet = ge::AttrUtils::GetInt(node.GetOpDesc(), "rank", alltoallvcRank);
    CHK_PRT_RET(!bRet, HCCL_ERROR("errNo[0x%016llx] get alltoallvc rank failed. no \"rank\" in opDesc", \
        HCOM_ERROR_CODE(HCCL_E_PARA)), HCCL_E_PARA);
    CHK_RET(GetOffDeviceTypeWithoutDev(devType));
    if (devType == DevType::DEV_TYPE_910) {
        if (deviceNumPerServer < HCCL_DEVICE_NUM_TWO) {
            meshAggregationRankSize = 1;
        } else {
            meshAggregationRankSize = deviceNumPerServer / DEFAULT_SERVER_NUM;
        }
    } else {
        meshAggregationRankSize = deviceNumPerServer;
    }

    opMemSize = 0;
    u32 meshAggregationIndex = alltoallvcRank / meshAggregationRankSize;
    u32 meshAggregationRankBegin = meshAggregationIndex * meshAggregationRankSize;
    for (u32 infoIndex = alltoallvcRank % meshAggregationRankSize; infoIndex < clusterInfo.rankNum;
        infoIndex += meshAggregationRankSize) {
        for (u32 k = meshAggregationRankBegin; k < meshAggregationRankBegin + meshAggregationRankSize; k++) {
            opMemSize += allMeshAggregationSendRecvInfo[k].sendLength[infoIndex];
        }
    }
    HCCL_INFO("[GetAlltoALLvcOffStagedScratchMemSize] get offline Staged Scratch MemSize[%llu]", opMemSize);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetRootGraphID(const ge::Node& node, uint32_t &graphId)
{
    std::string nodeName = node.GetOpDesc()->GetName();
    auto ownerGraph = node.GetOwnerComputeGraph();
    CHK_PRT_RET((!ownerGraph), HCCL_ERROR("[Get][RootGraphID]node[%s] get owner graph failed",
        nodeName.c_str()), HCCL_E_PARA);

    auto rootGraph = ge::GraphUtils::FindRootGraph(ownerGraph);
    CHK_PRT_RET((!rootGraph), HCCL_ERROR("[Get][RootGraphID]node[%s] get root graph failed",
        nodeName.c_str()), HCCL_E_PARA);

    graphId = rootGraph->GetGraphID();
    return HCCL_SUCCESS;
}

// 返回HCCL的入参：comm and group
HcclResult HcomOpsKernelBuilder::GetCommFromOpDesc(const ge::OpDescPtr &op, int64_t &hcomComm, std::string &sGroup)
{
    if (ge::AttrUtils::HasAttr(op, "comm")) {
        if (ge::AttrUtils::GetInt(op, "comm", hcomComm) == false) {
            HCCL_ERROR("[GetComm][OpDesc]errNo[0x%016llx]: get comm failed. get \"comm\" from opDesc failed", \
                HCOM_ERROR_CODE(HCCL_E_PARA));
            return HCCL_E_PARA;
        } else if (hcomComm == static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
            HCCL_INFO("[HcomOpsKernelBuilder]get comm equal to 0, should get group.");
            CHK_RET(HcomOpUtils::GetGroupFromOpDesc(op, sGroup));
        } else {
            HCCL_INFO("[HcclCommGraph][Type]get comm name[%lld] success.", hcomComm);
        }
    } else {
        CHK_RET(HcomOpUtils::GetGroupFromOpDesc(op, sGroup));
        HCCL_INFO("%s get group[%s] success", __func__, sGroup.c_str());
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetOpMemAttr(ge::Node& node, const std::string &sCollectiveType, const u64 &opMemSize)
{
    bool bRet =  false;

    // ATTENTION: 算子在IR定义时input/output同名场合（参考HcomRemoteRefRead算子）会隐式设置reference属性为TRUE,
    //   此处只对IR定义中input/output不同名且需要复用内存的算子，进行内存复用配置。
    //   后续有类似算子实现建议在IR定义时将input/output配置为相同name。
    // broadcast算子因为输入/输出为同一内存Ref属性为true
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST) {
        bRet = ge::AttrUtils::SetBool(node.GetOpDesc(), ge::ATTR_NAME_REFERENCE, true);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: set  reference attr[%d] to"
            "OpDesc failed.", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), true), HCCL_E_PARA);
        bRet = node.GetOpDesc()->UpdateOutputName(node.GetOpDesc()->GetAllInputName());
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: update output name failed.", \
            HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);
        HCCL_INFO("node[%s] set attr [reference]: %u", node.GetName().c_str(), true);

        // 算子属性为reference时，为减少GE的内存分配，设置 ouput 复用 input 内存
        for (uint32_t i = 0; i < static_cast<uint32_t>(node.GetOpDesc()->GetOutputsSize()); i++) {
            auto outDescPtr = node.GetOpDesc()->MutableOutputDesc(i);
            CHK_SMART_PTR_NULL(outDescPtr);
            ge::TensorUtils::SetReuseInput(*outDescPtr, true);
            ge::TensorUtils::SetReuseInputIndex(*outDescPtr, i);
        }
    } else {
        HCCL_INFO("node[%s] set attr [reference]: skip", node.GetName().c_str());
    }

    if (ES_OPS.find(sCollectiveType) != ES_OPS.end()) {
        std::vector<bool> workspaceReuseFlag(1, false);
        bRet = ge::AttrUtils::SetListBool(node.GetOpDesc(), "workspace_reuse_flag", workspaceReuseFlag);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: set workspace_reuse_flag[%d] to"
            "OpDesc failed.", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), false), HCCL_E_PARA);
        HCCL_INFO("node[%s] set attr [workspace_reuse_flag]: false", node.GetName().c_str());
    }

    bRet = ge::AttrUtils::SetBool(node.GetOpDesc(), ge::ATTR_NAME_IS_FIXED_ADDR_PRIOR, true);
    CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: set is_fixed_addr_prior[%d] to"
        "OpDesc failed.", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), true), HCCL_E_PARA);
    HCCL_INFO("node[%s] set attr [is_fixed_addr_prior]: %d", node.GetName().c_str(), true);

    u32 heterogeneousFlag;
    CHK_RET(hrtGetIsHeterogenous(heterogeneousFlag));
    if (heterogeneousFlag == 0) {
        // 能获取rankMapping，就走离线编译流程获取deviceType
        DevType devType;
        string rankMappingString;
        s32 groupSize = 0;
        std::vector<u32> groupRanks;
        const u32 NUM_SIZE_TWO = 2;
        std::string sGroup;
        bool isHcomInit = false;
        CHK_RET(HcomOpUtils::GetGroupFromOpDesc(node.GetOpDesc(), sGroup));
        bool withRemoteOp = (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED);
        bool withoutImplCompile = IsOfflineCompilation() || withRemoteOp ||
            (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST, rankMappingString) ==
            ge::GRAPH_SUCCESS) ||
            ((HcomGetbackloggedByGroup(sGroup.c_str(), groupRanks, groupSize) == HCCL_SUCCESS) && (groupSize != 0));
        if (IsOfflineCompilation()) {
            CHK_RET(GetOffDeviceTypeWithoutDev(devType));
            if (devType == DevType::DEV_TYPE_310P3 || devType == DevType::DEV_TYPE_310P1) {
                // 离线编译,异构场景，不需要设置ATTR_NAME_INPUT_MEM_TYPE_LIST
                HCCL_INFO("cur is offline heterogeneous");
                return HCCL_SUCCESS;
            }
        } else if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_RANK_MAPPING, rankMappingString) ==
            ge::GRAPH_SUCCESS) {
            CHK_RET(GetOffDeviceTypeWithoutDev(devType));
        } else {
            CHK_RET(GetHcclCommInitStatus(isHcomInit));
            if (isHcomInit || withoutImplCompile) {
                devType = HcomGetDeviceType();
            } else {
                CHK_RET(GetDeviceType(sGroup.c_str(), devType));
            }
        }
        if (devType == DevType::DEV_TYPE_310P3 || devType == DevType::DEV_TYPE_310P1) {
            u32 numHccsLink = 0;
            u32 rankSize = 0;
            if (!withoutImplCompile) {
                CHK_RET(GetRankSize(sGroup.c_str(), &rankSize));
                CHK_RET(GetHccsLinkNum(sGroup.c_str(), numHccsLink));
            }
            //只有HELPER_RES_FILE_PATH存在的时候，才是1951soc型态，用此环境变量是否存在，区分SOC和板卡型态
            bool isRemoteBoard = (SalGetEnv("HELPER_RES_FILE_PATH") == "EmptyString") && withRemoteOp;
            HCCL_INFO("[Set][OpMemAttr]: rankSize is [%u], numHccsLink is [%u] isRemoteBoard[%u]",
                rankSize, numHccsLink, isRemoteBoard);
            // 针对310p duo卡 2p场景申请内存为普通内存，不需要单独设置，其余场景需要设置申请为p2p内存
            // 板卡推理不需要设置申请内存为p2p
            if ((withoutImplCompile || !(rankSize == NUM_SIZE_TWO  && numHccsLink == NUM_SIZE_TWO)) && !isRemoteBoard) {
                vector<int64_t> memTypeInput(node.GetOpDesc()->GetInputsSize(), RT_MEMORY_P2P_DDR);
                vector<int64_t> memTypeOutput(node.GetOpDesc()->GetOutputsSize(), RT_MEMORY_P2P_DDR);
                vector<int64_t> memTypeWorkSpace(1, RT_MEMORY_P2P_DDR);
                bool ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_INPUT_MEM_TYPE_LIST, memTypeInput);
                CHK_PRT_RET(!ret,
                    HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set input mem addr failed. op[%s]",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);

                ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memTypeOutput);
                CHK_PRT_RET(!ret,
                    HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set output mem addr failed. op[%s]",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);

                if (opMemSize != 0) {
                    ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_WORKSPACE_TYPE_LIST,
                        memTypeWorkSpace);
                    CHK_PRT_RET(!ret,
                        HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set workspace mem addr failed. op[%s]",
                        HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);
                }
                HCCL_INFO("[Set][OpMemAttr] Set memType RT_MEMORY_P2P_DDR");
            }
        }
    } else {
        // Helper 场景
        // GE不区分DDR/HBM, 实际310中使用DDR 910中使用HBM
    }

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetOpAtomicInputIndex(ge::Node& node, const std::string &sCollectiveType)
{
    // allreduce，reduce 算子设定atomic Input Index属性
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE || sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCE) {
        vector<int64_t> atomicInputIndex(1, -1); // 回传vector的值为-1，作为标志位
        if (!ge::AttrUtils::SetListInt(node.GetOpDesc(), "atomic_input_index", atomicInputIndex)) {
            HCCL_ERROR("[Set][OpAtomicInputIndex]errNo[0x%016llx]: set op[%s] atomic index failed.",
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetCountFromOpDesc(const ge::OpDescPtr &op, const std::string &sCollectiveType, \
    HcclDataType dataType, u64 &count)
{
    HcclResult ret;
    u64 totalSize = 0;
    u32 dataTypeSize = 0;
    CHK_RET(SalGetDataTypeSize(dataType, dataTypeSize));
    CHK_PRT_RET(dataTypeSize == 0, HCCL_ERROR("[Get][CountFromOpDesc]dataType size is zero."), HCCL_E_PARA);

    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
        ret = GetHcomReceiveOpOutputSize(op, dataTypeSize, totalSize);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Get][Count]get op[%s] output size failed. ret[%d]", sCollectiveType.c_str(), ret), ret);
    } else {
        for (u64 i  = 0; i < op->GetInputsSize(); i++) {
            u64 blockSize;
            CHK_SMART_PTR_NULL(op->GetInputDescPtr(i));
            if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) {
                // ReduceScatter 算子的 count 为输出数据的个数，count = (input的size / rank_size) / dataTypeSize
                s32 rankSize = 0;
                CHK_PRT_RET((!ge::AttrUtils::GetInt(op, HCOM_ATTR_RANK_SIZE, rankSize)),
                    HCCL_ERROR("[Get][Count]op[%s] get "\
                    "attr[%s] failed.", sCollectiveType.c_str(), HCOM_ATTR_RANK_SIZE.c_str()), HCCL_E_PARA);

                CHK_PRT_RET((rankSize <= 0), HCCL_ERROR("[Get][Count]errNo[0x%016llx] in reducescatter op,"
                    "rank_size[%d] should be greater than 0.", HCOM_ERROR_CODE(HCCL_E_PARA), rankSize), HCCL_E_PARA);

                u64 shapeSize = 0;
                if ((u64)op->GetInputDescPtr(i)->GetShape().IsScalar()) {
                    shapeSize = 1;
                } else {
                    shapeSize = (u64)op->GetInputDescPtr(i)->GetShape().GetShapeSize();
                }
                CHK_PRT_RET((shapeSize > INVALID_U64 / dataTypeSize), HCCL_ERROR("[Get][Count]op[%s] shape size[%llu]"
                    "is overflow.", sCollectiveType.c_str(), shapeSize), HCCL_E_PARA);
                // reduce-scatter 融合场景：reduce-scatter算子的每个输入tensor均有补齐处理。
                // mindspore 补齐规则：(size + 32  -1 + 512) / 512 * 512
                // 因此，此处每个输入额外多申请 1024 bytes 的workspace memory。
                const u32 paddingLen = 1024; // 每个输入额外多申请 1024 bytes 的workspace memory。
                blockSize = (shapeSize * dataTypeSize + paddingLen) / rankSize;
            } else {
                const u32 alignSize = 512; // 以512 Byte 对齐
                int64_t inputSize = 0;
                CHK_PRT_RET((ge::GRAPH_SUCCESS != ge::TensorUtils::GetSize(*op->GetInputDescPtr(i), inputSize)), \
                    HCCL_ERROR("[Get][Count]errNo[0x%016llx] get workspace bytes failed. get size from TensorDesc"
                        "failed, op : %s"\
                        ", input index : %llu", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), i), HCCL_E_PARA);
                CHK_PRT_RET(((u64)inputSize > INVALID_U64 - alignSize), HCCL_ERROR("op[%s] input size[%llu] is "\
                    "overflow.", sCollectiveType.c_str(), (u64)inputSize), HCCL_E_PARA);
                blockSize = ((u64)inputSize + alignSize - 1) / alignSize * alignSize;
            }
            totalSize = totalSize + blockSize;
        }
    }
    count = totalSize / dataTypeSize;
    HCCL_INFO("op[%s] get count[%llu] success.", sCollectiveType.c_str(), count);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetHcomReceiveOpOutputSize(const ge::OpDescPtr &op, u32 dataTypeSize, \
    u64 &outputSize)
{
    CHK_PRT_RET(dataTypeSize == 0, HCCL_ERROR("[Get][ReceiveOpOutputSize]dataType size is zero."), HCCL_E_PARA);

    std::string sCollectiveType = op->GetType();
    CHK_PRT_RET((!ge::AttrUtils::HasAttr(op, HCOM_ATTR_SHAPE)), \
        HCCL_ERROR("[Get][ReceiveOpOutputSize]op[%s] has no attr[%s].", sCollectiveType.c_str(),
            HCOM_ATTR_SHAPE.c_str()), HCCL_E_PARA);

    vector<int64_t> shapeDims;
    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, HCOM_ATTR_SHAPE, shapeDims)), \
        HCCL_ERROR("[Get][ReceiveOpOutputSize]op[%s] get attr[%s] failed.", sCollectiveType.c_str(),
            HCOM_ATTR_SHAPE.c_str()), HCCL_E_PARA);

    u64 shapeSize = 0;
    if (shapeDims.empty()) {
        // HcomReceive算子标量的话将shapeSize设置为1
        shapeSize = 1;
    } else {
        shapeSize = static_cast<u64>(ge::Shape(shapeDims).GetShapeSize());
    }
    const u32 alignSize = 512; // 以512 Byte 对齐
    CHK_PRT_RET((shapeSize > (INVALID_U64 - alignSize) / dataTypeSize), \
        HCCL_ERROR("[Get][ReceiveOpOutputSize]op[%s] shape size[%llu] is overflow.", sCollectiveType.c_str(),
            shapeSize), HCCL_E_PARA);
    outputSize = (static_cast<u64>(shapeSize * dataTypeSize) + alignSize - 1) / alignSize * alignSize;
    return HCCL_SUCCESS;
}

// 返回HCCL的入参:srTag
HcclResult HcomOpsKernelBuilder::GetSrTagFromDesc(const ge::OpDescPtr &op, u32 &srTag,
    const std::string &sCollectiveType)
{
    srTag = 0;
    if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) ||
        (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND)) {
        if (ge::AttrUtils::HasAttr(op, "sr_tag")) {
            if (ge::AttrUtils::GetInt(op, "sr_tag", srTag) == false) {
                HCCL_ERROR("[Get][SrTag]errNo[0x%016llx] op[%s]: get srTag failed. get \"sr_tag\" from opDesc failed",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
                return HCCL_E_PARA;
            }
        } else {
            HCCL_ERROR("[Get][SrTag]errNo[0x%016llx] op[%s]: get srTag failed. no \"sr_tag\" in opDesc", \
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
    }
    HCCL_INFO("get srTag[%u] success.", srTag);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetOriginalGraphShapeTypeFromDesc(const ge::OpDescPtr &op, u32 &shapeType)
{
    if (ge::AttrUtils::HasAttr(op, ORIGINAL_GRAPH_SHAPE_TYPE)) {
        if (ge::AttrUtils::GetInt(op, ORIGINAL_GRAPH_SHAPE_TYPE, shapeType) == false) {
            HCCL_ERROR("[Get][OriginalGraphShapeType]errNo[0x%016llx]: get shapeType failed. get \"shapeType\" from"
                "opDesc failed", HCOM_ERROR_CODE(HCCL_E_PARA));
            return HCCL_E_PARA;
        }
    } else {
        shapeType = (u32)ORIGINAL_GRAPH_KNOWNSHAPE_TYPE;
    }
    HCCL_INFO("get shapeType [%u] success.", shapeType);
    return HCCL_SUCCESS;
}

// 返回HCCL的入参：destRank
HcclResult HcomOpsKernelBuilder::GetDestRankFromDesc(const ge::OpDescPtr &op, u32 &destRank,
    const std::string &sCollectiveType)
{
    destRank = 0;
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND) {
        if (ge::AttrUtils::HasAttr(op, "dest_rank")) {
            if (ge::AttrUtils::GetInt(op, "dest_rank", destRank) == false) {
                HCCL_ERROR("[Get][DestRank]errNo[0x%016llx] op[%s]: get dest rank failed. get \"dest_rank\" from"
                    "opDesc failed", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
                return HCCL_E_PARA;
            }
        } else {
            HCCL_ERROR("[Get][DestRank]errNo[0x%016llx] op[%s]: get dest rank failed. no \"dest_rank\" in opDesc", \
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
    }
    HCCL_INFO("get dest rank[%u] success.", destRank);
    return HCCL_SUCCESS;
}

// 返回HCCL的入参：srcRank
HcclResult HcomOpsKernelBuilder::GetSrcRankFromDesc(const ge::OpDescPtr &op, u32 &srcRank,
    const std::string &sCollectiveType)
{
    srcRank = 0;
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
        if (ge::AttrUtils::HasAttr(op, "src_rank")) {
            if (ge::AttrUtils::GetInt(op, "src_rank", srcRank) == false) {
                HCCL_ERROR("[Get][SrcRank]errNo[0x%016llx] op[%s]: get src rank failed. no \"src_rank\" in opDesc", \
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
                return HCCL_E_PARA;
            }
        } else {
            HCCL_ERROR("[Get][SrcRank]errNo[0x%016llx] op[%s]: get src rank failed. no \"src_rank\" in opDesc", \
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
    }
    HCCL_INFO("get src rank[%u] success.", srcRank);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::MarkRemoteAccessMemoryType(ge::Node& node)
{
    CHK_PRT_RET(!node.GetOpDesc(), HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] GetOpDesc failed. null"
        "ptr.", HCOM_ERROR_CODE(HCCL_E_PTR)), HCCL_E_PTR);

    std::string sCollectiveType = node.GetOpDesc()->GetType();
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_READ) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_output_memory_type", ge::RDMA_HBM) == false) {
            HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] op[%s]: set MEMORY TYPE to OpDesc failed.",
                       HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
        HCCL_INFO("Mark HcomRemoteRead output memory RDMA_HBM.");
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_WRITE ||
               sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_SCATTER_WRITE) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_input_memory_type", ge::RDMA_HBM) == false) {
            HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] op[%s]: set MEMORY TYPE to OpDesc failed.",
                       HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
        HCCL_INFO("Mark HcomRemoteWrite input memory RDMA_HBM.");
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetAlltoAllCountsDispl(const ge::OpDescPtr &op, std::vector<int64_t> &sendCounts,
    std::vector<int64_t> &sendDispls, std::vector<int64_t> &recvCounts, std::vector<int64_t> &recvDispls)
{
    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, "send_counts", sendCounts)),
        HCCL_ERROR(
            "[Set][AlltoAllVParams]op[%s] get attr[%s] failed.", HCCL_KERNEL_OP_TYPE_ALLTOALLV.c_str(), "send_counts"),
        HCCL_E_PARA);
    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, "send_displacements", sendDispls)),
        HCCL_ERROR("[Set][AlltoAllVParams]op[%s] get attr[%s] failed.",
            HCCL_KERNEL_OP_TYPE_ALLTOALLV.c_str(),
            "send_displacements"),
        HCCL_E_PARA);

    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, "recv_counts", recvCounts)),
        HCCL_ERROR(
            "[Set][AlltoAllVParams]op[%s] get attr[%s] failed.", HCCL_KERNEL_OP_TYPE_ALLTOALLV.c_str(), "recv_counts"),
        HCCL_E_PARA);
    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, "recv_displacements", recvDispls)),
        HCCL_ERROR("[Set][AlltoAllVParams]op[%s] get attr[%s] failed.",
            HCCL_KERNEL_OP_TYPE_ALLTOALLV.c_str(),
            "recv_displacements"),
        HCCL_E_PARA);

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetAlltoAllCountsDispl(ge::Node& node, std::vector<int64_t> &sendCounts,
    std::vector<int64_t> &sendDispls, std::vector<int64_t>& recvCounts, std::vector<int64_t>& recvDispls)
{
    // 1、判断是否是const 节点 NodeUtils::IsConst(*really_parent_node)) 是走老流程，不是判断是否是data节点，是去获取对应输入父节点，不是
    auto op = ge::OpDescUtils::CreateOperatorFromNode(node.shared_from_this());

    std::vector<ge::ConstGeTensorPtr> alltoallvInputVec;
    const auto& op_desc = ge::OpDescUtils::GetOpDescFromOperator(op);
    ge::AttrUtils::GetListTensor(op_desc, "alltoallvInputVec", alltoallvInputVec);

    u32 vecSize = ALLTOALLV_INPUT_VEC_SIZE;
    CHK_PRT_RET(alltoallvInputVec.size() != vecSize,
        HCCL_ERROR("alltoallvInputVec size must be equal to 4"), HCCL_E_PARA);

    auto sendCountsTensor = alltoallvInputVec[0U].get();
    auto sendDisplsTensor = alltoallvInputVec[1U].get();
    auto recvCountsTensor = alltoallvInputVec[2U].get();
    auto recvDisplsTensor = alltoallvInputVec[3U].get();

    CHK_RET(HcomOpUtils::GetVectorFromTensor(sendCountsTensor, sendCounts));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(sendDisplsTensor, sendDispls));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(recvCountsTensor, recvCounts));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(recvDisplsTensor, recvDispls));

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetReduceScatterVCountsDispl(ge::Node& node, std::vector<int64_t> &sendCounts,
    std::vector<int64_t> &sendDispls, std::vector<int64_t>& recvCount)
{
    // 1、判断是否是const 节点 NodeUtils::IsConst(*really_parent_node)) 是走老流程，不是判断是否是data节点，是去获取对应输入父节点
    auto op = ge::OpDescUtils::CreateOperatorFromNode(node.shared_from_this());

    std::vector<ge::ConstGeTensorPtr> vInputVec;
    const auto& op_desc = ge::OpDescUtils::GetOpDescFromOperator(op);
    ge::AttrUtils::GetListTensor(op_desc, "vInputVec", vInputVec);

    auto recvCountTensor = vInputVec[0U].get();
    auto sendCountsTensor = vInputVec[1U].get();
    auto sendDisplsTensor = vInputVec[2U].get();

    CHK_RET(HcomOpUtils::GetVectorFromTensor(recvCountTensor, recvCount));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(sendCountsTensor, sendCounts));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(sendDisplsTensor, sendDispls));

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetAllGatherVCountsDispl(ge::Node& node, std::vector<int64_t> &sendCount,
    std::vector<int64_t> &recvCounts, std::vector<int64_t>& recvDispls)
{
    auto op = ge::OpDescUtils::CreateOperatorFromNode(node.shared_from_this());

    std::vector<ge::ConstGeTensorPtr> vInputVec;
    const auto& op_desc = ge::OpDescUtils::GetOpDescFromOperator(op);
    ge::AttrUtils::GetListTensor(op_desc, "vInputVec", vInputVec);

    auto recvCountsTensor = vInputVec[0U].get();
    auto recvDisplsTensor = vInputVec[1U].get();
    auto sendCountTensor = vInputVec[2U].get();

    CHK_RET(HcomOpUtils::GetVectorFromTensor(sendCountTensor, sendCount));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(recvCountsTensor, recvCounts));
    CHK_RET(HcomOpUtils::GetVectorFromTensor(recvDisplsTensor, recvDispls));

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetAlltoAllVParams(const ge::Node& node,
    HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf, const std::string &sCollectiveType)
{
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV) {
        auto op = node.GetOpDesc();
        CHK_RET(SetAlltoAllVDataTypeToDef(op, privateDefBuf));
        CHK_RET(CopyAlltoAllVParamsToDef(node, privateDefBuf));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) {
        auto op = node.GetOpDesc();
        CHK_RET(SetAlltoAllVCDataTypeToDef(op, privateDefBuf));
        CHK_RET(CopyAlltoAllVCParamsToDef(node, privateDefBuf));
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetReduceScatterVParams(const ge::Node& node,
    HCCL_REDUCESCATTERV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    CHK_RET(CopyReduceScatterVParamsToDef(node, privateDefBuf));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::CopyReduceScatterVParamsToDef(const ge::Node& node,
    HCCL_REDUCESCATTERV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    std::vector<int64_t> sendCounts;
    std::vector<int64_t> sendDispls;
    std::vector<int64_t> recvCount;
    CHK_RET(GetReduceScatterVCountsDispl(const_cast<ge::Node&>(node), sendCounts, sendDispls, recvCount));
 
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.sendCounts, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        sendCounts.data(), sendCounts.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.sendDispls, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        sendDispls.data(), sendDispls.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.recvCounts, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        recvCount.data(), recvCount.size() * sizeof(u64)));
 
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetAllGatherVParams(const ge::Node& node,
    HCCL_ALLGATHERV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    CHK_RET(CopyAllGatherVParamsToDef(node, privateDefBuf));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::CopyAlltoAllVParamsToDef(const ge::Node& node,
    HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    std::vector<int64_t> sendCounts;
    std::vector<int64_t> sendDispls;
    std::vector<int64_t> recvCounts;
    std::vector<int64_t> recvDispls;

    auto op = node.GetOpDesc();
    if (ge::AttrUtils::HasAttr(op, "send_counts")) {
        CHK_RET(GetAlltoAllCountsDispl(op, sendCounts, sendDispls, recvCounts, recvDispls));
    } else {
        CHK_RET(GetAlltoAllCountsDispl(const_cast<ge::Node&>(node), sendCounts, sendDispls, recvCounts,
            recvDispls));
    }

    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.sendCounts, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        sendCounts.data(), sendCounts.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.sendDispls, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        sendDispls.data(), sendDispls.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.recvCounts, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        recvCounts.data(), recvCounts.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.recvDispls, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        recvDispls.data(), recvDispls.size() * sizeof(u64)));

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::CopyAllGatherVParamsToDef(const ge::Node& node,
    HCCL_ALLGATHERV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    std::vector<int64_t> sendCount;
    std::vector<int64_t> recvCounts;
    std::vector<int64_t> recvDispls;
    CHK_RET(GetAllGatherVCountsDispl(const_cast<ge::Node&>(node), sendCount, recvCounts, recvDispls));

    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.sendCount, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        sendCount.data(), sendCount.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.recvCounts, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        recvCounts.data(), recvCounts.size() * sizeof(u64)));
    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.paramsInfo.recvDispls, ALLTOALLV_RANK_MAX_NUM * sizeof(u64),
        recvDispls.data(), recvDispls.size() * sizeof(u64)));

    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::CopyAlltoAllVCParamsToDef(const ge::Node& node,
    HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    std::vector<int64_t> sendCountMatrix;
    auto op = node.GetOpDesc();
    if (ge::AttrUtils::HasAttr(op, "send_count_matrix")) {
        CHK_RET(HcomOpUtils::GetAlltoAllCountMatrix(op, sendCountMatrix));
    } else {
        CHK_RET(HcomOpUtils::GetAlltoAllCountMatrix(const_cast<ge::Node&>(node), sendCountMatrix));
    }

    CHK_SAFETY_FUNC_RET(memcpy_s(privateDefBuf.cparamsInfo.sendCountMatrix,
        ALLTOALLVC_RANK_MAX_NUM * ALLTOALLVC_RANK_MAX_NUM * sizeof(u64),
        sendCountMatrix.data(), sendCountMatrix.size() * sizeof(u64)));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetAlltoAllVDataTypeToDef(const ge::OpDescPtr &op,
    HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    CHK_RET(HcomOpUtils::GetAlltoAllDataType(op, privateDefBuf.paramsInfo.sendType,
        privateDefBuf.paramsInfo.recvType));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetAlltoAllVCDataTypeToDef(const ge::OpDescPtr &op,
    HCCL_ALLTOALLV_KERNEL_INFO_PRIVATE_DEF &privateDefBuf)
{
    CHK_RET(HcomOpUtils::GetAlltoAllDataType(op, privateDefBuf.cparamsInfo.sendType,
        privateDefBuf.cparamsInfo.recvType));
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetRanksize(const int64_t &hcomComm, const string &sGroup, s32 &rankSize)
{
    if (hcomComm == static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
        CHK_RET(HcomGetRankSize(sGroup.c_str(), reinterpret_cast<u32 *>(&rankSize)));
    } else {
        CHK_RET(HcclCommGraphGetRankSize(hcomComm, reinterpret_cast<u32 *>(&rankSize)));
    }
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetRemoteLookupAttr(const ge::Node &node, EmbeddingServiceParams &esInfo,
    const std::string &sCollectiveType)
{
    ge::OpDescPtr op = node.GetOpDesc();
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP) {
        CHK_RET(GetOpIntAttr(op, "tag", esInfo.tag));
        CHK_RET(GetOpIntAttr(op, "insert_flag", esInfo.insertFlag));
        CHK_RET(GetOpIntAttr(op, "max_num", esInfo.maxNum));
        CHK_RET(GetOpIntAttr(op, "embedding_dim", esInfo.embeddingDim));
        CHK_RET(GetOpIntAttr(op, "_embedding_dim", esInfo.maxEmbeddingDim));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED) {
        CHK_RET(GetOpIntAttr(op, "tag", esInfo.tag));
        CHK_RET(GetOpIntAttr(op, "max_num", esInfo.maxNum));
        CHK_RET(GetOpIntAttr(op, "embedding_dim", esInfo.embeddingDim));
        CHK_RET(GetOpIntAttr(op, "_embedding_dim", esInfo.maxEmbeddingDim));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED) {
        CHK_RET(GetOpIntAttr(op, "tag", esInfo.tag));
        CHK_RET(GetOpIntAttr(op, "insert_option", esInfo.insertFlag));
        CHK_RET(GetOpIntAttr(op, "max_num", esInfo.maxNum));
        CHK_RET(GetOpIntAttr(op, "embedding_dim", esInfo.embeddingDim));
        CHK_RET(GetOpIntAttr(op, "flags", esInfo.flags));
        CHK_RET(GetOpIntAttr(op, "_embedding_dim", esInfo.maxEmbeddingDim));
    }
    string uniqueTag{};
    GetUniqueTagFromNode(node, esInfo.tag, uniqueTag);
    CHK_SAFETY_FUNC_RET(memcpy_s(esInfo.uniqueTag, UNIQUE_TAG_MAX_LEN, uniqueTag.c_str(), uniqueTag.size()));

    HCCL_INFO("GetRemoteLookupAttr get maxEmbeddingDim[%d] uniqueTag[%s]",
        esInfo.maxEmbeddingDim, esInfo.uniqueTag);
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::GetOpDataSize(const ge::Node &node, u64 &opDataSize)
{
    u64 count = 0;
    HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
    std::string sCollectiveType = node.GetOpDesc()->GetType();
    CHK_RET(CheckSupportedOP(sCollectiveType));
    CHK_RET(HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType));
    CHK_RET(GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count));
    opDataSize = count * SIZE_TABLE[dataType];
    return HCCL_SUCCESS;
}

HcclResult HcomOpsKernelBuilder::SetAttachedStreamInfoList(ge::Node &node, const string &group)
{
    static std::atomic<int> streamSeq{0};
    ge::GeAttrValue::NAMED_ATTRS attachedStream;
    std::string attachedStreamName = "hccl_attached_stream_";
    // 因为HCCL中需要确保不同的主流对应的从流不一样，所以以group作为reuseKey
    std::string reuseKey = attachedStreamName + group;
    // 目前HCCL是否必须要申请从流，真表示如果从流申请失败则失败
    bool required = true;

    // 暂时规避使用附属从流
    HCCL_INFO("[HcomOpsKernelBuilder][SetAttachedStreamInfoList] don't use aicpu mode, so don't alloc attachedStream");
    return HCCL_SUCCESS;

    CHK_RET(InitExternalInput());
    if (!GetExternalInputHcclAicpuUnfold()) {
        HCCL_INFO("[HcomOpsKernelBuilder][SetAttachedStreamInfoList] don't use aicpu mode, so don't alloc attachedStream");
        return HCCL_SUCCESS;
    }

    (void)ge::AttrUtils::SetStr(attachedStream, ge::ATTR_NAME_ATTACHED_RESOURCE_NAME, attachedStreamName);
    (void)ge::AttrUtils::SetStr(attachedStream, ge::ATTR_NAME_ATTACHED_RESOURCE_REUSE_KEY, reuseKey);
    // ge::ATTR_NAME_ATTACHED_RESOURCE_DEPEND_VALUE_LIST_INT 暂时未设置，使用默认值
    (void)ge::AttrUtils::SetBool(attachedStream, ge::ATTR_NAME_ATTACHED_RESOURCE_REQUIRED_FLAG, required);
    HCCL_INFO("[HcomOpsKernelBuilder][SetAttachedStreamInfoList] name[%s], reuse_key[%s], required[%d].",
        attachedStreamName.c_str(), reuseKey.c_str(), required);

    std::vector<ge::GeAttrValue::NAMED_ATTRS> attachedStreamInfo;
    attachedStreamInfo.emplace_back(attachedStream);

    ge::AttrUtils::SetListNamedAttrs(node.GetOpDesc(), ge::ATTR_NAME_ATTACHED_STREAM_INFO_LIST, attachedStreamInfo);
    return HCCL_SUCCESS;
}
}