/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 * Description: 集合通信算子图优化
 * Author: lilianlin
 * Create: 2019-12-7
 */

#include <nlohmann/json.hpp>
#include "hcom_graph_optimizer.h"
#include "hcom_all_reduce_fusion.h"
#include "hcom_broadcast_fusion.h"
#include "hcom_reduce_fusion.h"
#include "hcom_ops_kernel_info_store.h"
#include "hcom_op_utils.h"
#include "comm.h"
#include "hcom_pub.h"
#include "hcom.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/ge_local_context.h"
#include "framework/memory/memory_api.h"
#include "external/ge/ge_api_types.h" // ge对内options
#include "framework/common/ge_types.h" // ge对外options
#include "adapter/adapter_rts.h"
#include "trace_manager.h"
#include "offline_build_config_parse.h"
#include "graph/ge_context.h"
#include "workflow_pub.h"
#include "coll_alg_utils.h"
#include "transport_heterog_def.h"

using namespace std;

namespace hccl {
const std::string NO_CALCULATION = "_NO_CALCULATION";
std::mutex g_setTaskNumCalModeLock;
HcomGraphOptimizer::HcomGraphOptimizer() : fusionTensorSizeLimit_(0), hcomMultiMode_(0),
    optionFeatureBaseRefreshable_(0)
{
}

HcomGraphOptimizer::~HcomGraphOptimizer()
{
}

const unordered_set<string> HcomGraphOptimizer::ES_OPS = { HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED,
    HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP, HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED,
    HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE, HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED};

ge::Status HcomGraphOptimizer::Initialize(const std::map<std::string, std::string>& options,
                                          ge::OptimizeUtility *const optimizeUtility)
{
    HCCL_INFO("init hccl graph optimizer.");

    HcclResult ret = HcomGraphOptimizeInitialize(options, optimizeUtility);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Init][HcomGraphOptimizer] hcom init failed"),
        ge::INTERNAL_ERROR);

    return ge::SUCCESS;
}

HcclResult HcomGraphOptimizer::HcomGraphOptimizeInitialize(const map<std::string, std::string>& options,
                                                           ge::OptimizeUtility *const optimizeUtility)
{
    auto iter = options.find(ge::FUSION_TENSOR_SIZE);
    if (iter != options.end()) {
        u64 value = 0;
        HcclResult ret = SalStrToULonglong(iter->second, HCCL_BASE_DECIMAL, value);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Init][HcomGraphOptimizer]FUSION_TENSOR_SIZE[%s] is not a valid interger",
                iter->second.c_str()), HCCL_E_PARA);
        fusionTensorSizeLimit_ = value;
        HCCL_INFO("Initialize: FUSION_TENSOR_SIZE[%llu] is setted.", fusionTensorSizeLimit_);
    } else {
        fusionTensorSizeLimit_ = 524288000; // 默认融合tensor大小限制 524288000 = 500 * 1024 * 1024 = 500MB
        HCCL_INFO("Initialize: FUSION_TENSOR_SIZE is unsetted, default[%llu].", fusionTensorSizeLimit_);
    }

    auto iterMultiMode = options.find(ge::HCOM_MULTI_MODE);
    if (iterMultiMode != options.end()) {
        HcclResult ret = SalStrToInt(iterMultiMode->second, HCCL_BASE_DECIMAL, hcomMultiMode_);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Init][HcomGraphOptimizer]HCOM_MULTI_MODE[%s] is not a valid interger",
                iterMultiMode->second.c_str()), HCCL_E_PARA);
        if ((hcomMultiMode_ < 0) || (hcomMultiMode_ > 1)) {
            HCCL_ERROR("[Init][HcomGraphOptimizer]Initialize: HCOM_MULTI_MODE[%d] is invaild.", hcomMultiMode_);
            return HCCL_E_PARA;
        }
        HCCL_INFO("Initialize: HCOM_MULTI_MODE is %d.", hcomMultiMode_);
    } else {
        hcomMultiMode_ = 0;
        HCCL_INFO("Initialize: HCOM_MULTI_MODE is unsetted, default[%d].", hcomMultiMode_);
    }

    return HCCL_SUCCESS;
}

ge::Status HcomGraphOptimizer::OptimizeGraphPrepare(ge::ComputeGraph& graph)
{
    HCCL_INFO("start hccl graph optimizer prepare.");

    GroupParaLabel groupLabels;
    std::string group;
    HcclResult ret;
    int label = 0;
    ge::TraceManager::GetInstance().SetTraceOwner("HCCL", "OptimizeGraphPrepare", graph.GetName());
    for (auto nodePtr : graph.GetAllNodes()) {
        if (!nodePtr) {
            HCCL_WARNING("OptimizeGraphPrepare: null node exists.");
            continue;
        }
        auto opDescPtr = nodePtr->GetOpDesc();
        if (!opDescPtr) {
            HCCL_WARNING("OptimizeGraphPrepare: desc of node[%s] is null.", nodePtr->GetName().c_str());
            continue;
        }

        // 集合通信的算子，设定format_Agnostic属性, 均设置成格式不敏感
        if (CheckSupportedOP(opDescPtr->GetType()) == HCCL_SUCCESS) {
            // 设置其他相关属性
            ret = SetHcomOpAttrs(opDescPtr);
            CHK_PRT_RET(ret != HCCL_SUCCESS,
                HCCL_ERROR("[Optimize][Graph]node[%s] set hcom op attrs failed, op type[%s]",
                    opDescPtr->GetName().c_str(), opDescPtr->GetType().c_str()), ge::INTERNAL_ERROR);
            // 若节点没有group
            bool bRet = ge::AttrUtils::HasAttr(opDescPtr, "group");
            if (!bRet) {
                group = "hccl_world_group";
            } else {
                bRet = ge::AttrUtils::GetStr(opDescPtr, "group", group);
                CHK_PRT_RET(!bRet, HCCL_ERROR("[Optimize][Graph]node[%s] get attr \"group\" failed. ",
                    nodePtr->GetName().c_str()), ge::INTERNAL_ERROR);
                CHK_PRT_RET(group.empty(), HCCL_ERROR("[Optimize][Graph]node[%s] get group is empty.", \
                    nodePtr->GetName().c_str()), ge::INTERNAL_ERROR);
            }
            std::string setLabel = "hcom_op_";
            auto iterNodeLabel = groupLabels.find(group);
            if (iterNodeLabel == groupLabels.end()) {
                label++;
                setLabel += std::to_string(label);
                groupLabels.insert(std::make_pair(group, setLabel));
            } else {
                setLabel = iterNodeLabel->second;
            }
            ret = SetHcomOpParallelLabel(*nodePtr, setLabel);
            CHK_PRT_RET(ret != HCCL_SUCCESS,
                HCCL_ERROR("[Optimize][Graph]node[%s] set group para label attr failed, op type[%s]",
                    opDescPtr->GetName().c_str(), opDescPtr->GetType().c_str()), ge::INTERNAL_ERROR);
        }
    }
    ge::TraceManager::GetInstance().ClearTraceOwner();
    ge::GraphUtils::DumpGEGraph(graph.shared_from_this(), "HcclAfterOptimizeGraphPrepare");
    ge::GraphUtils::DumpGEGraphToOnnx(graph, "HcclAfterOptimizeGraphPrepare");
    HCCL_INFO("end hccl graph optimizer prepare.");
    return ge::SUCCESS;
}

ge::Status HcomGraphOptimizer::Finalize()
{
    HCCL_INFO("finalize hccl graph optimizer.");
    return ge::SUCCESS;
}

HcclResult HcomGraphOptimizer::UpdateFusionTensorSizeLimit(bool unknownShape, u64 &fusionTensorSize)
{
    // unkonwshape图，tensorsize不能大于200MB
    if (unknownShape) {
        u64 maxSize = 0;
        CHK_RET(HcomGetCCLBufferAvailableSize(maxSize));
        if (static_cast<u64>(fusionTensorSizeLimit_) > maxSize) {
            fusionTensorSize = maxSize;
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::HcomOptimizeOriginalGraph(ge::ComputeGraph& graph, bool& uknownShapeGraph)
{
    HcclResult ret;
    // 更新下fusionTensorSize，使用实时更新获取的最大数值,在静态图下不用更新，动态图更新
    u64 fusionTensorSize = fusionTensorSizeLimit_;
    ret = UpdateFusionTensorSizeLimit(uknownShapeGraph, fusionTensorSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: UpdateFusionTensorSizeLimit graph"
            "failed. ret[%d]", graph.GetName().c_str(), ret), HCCL_E_PARA);

    ret = FuseHcomAllReduceNode(graph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: fuse HcomBroadcast node failed. ret[%d]",
            graph.GetName().c_str(), ret), HCCL_E_PARA);

    ret = FuseHcomBroadcastNode(graph, fusionTensorSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: fuse HcomReduce node failed. ret[%d]",
            graph.GetName().c_str(), ret), HCCL_E_PARA);

    ret = FuseHcomReduceNode(graph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: fuse HcomReduce node failed. ret[%d]",
            graph.GetName().c_str(), ret), HCCL_E_PARA);

    return HCCL_SUCCESS;
}

ge::Status HcomGraphOptimizer::OptimizeOriginalGraph(ge::ComputeGraph& graph)
{
    HcclResult ret;
    bool uknownShapeGraph = false;

    ge::GraphUtils::DumpGEGraph(graph.shared_from_this(), "HcclBeforeOptimizeOriginalGraph");
    ge::GraphUtils::DumpGEGraphToOnnx(graph, "HcclBeforeOptimizeOriginalGraph");
    ge::TraceManager::GetInstance().SetTraceOwner("HCCL", "OptimizeOriginalGraph", graph.GetName());
    ret = OriginalGraphShapeTypeCfg(graph, uknownShapeGraph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: OriginalGraphShapeTypeCfg failed. ret[%d]",
            graph.GetName().c_str(), ret), ge::INTERNAL_ERROR);
    ret = SetUnknownShapeAttr(graph, uknownShapeGraph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: SetUnknownShapeAttr failed. ret[%d]",
            graph.GetName().c_str(), ret), ge::INTERNAL_ERROR);
    ret = HcomOptimizeOriginalGraph(graph, uknownShapeGraph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OriginalGraph]graph[%s]: Original Optimize failed. ret[%d]",
            graph.GetName().c_str(), ret), ge::INTERNAL_ERROR);
    ge::TraceManager::GetInstance().ClearTraceOwner();
    ge::GraphUtils::DumpGEGraph(graph.shared_from_this(), "HcclAfaterOptimizeOriginalGraph");
    ge::GraphUtils::DumpGEGraphToOnnx(graph, "HcclAfaterOptimizeOriginalGraph");
    HCCL_INFO("graph[%s] end fusion HcomReduce Op.", graph.GetName().c_str());
    return ge::SUCCESS;
}

ge::Status HcomGraphOptimizer::OptimizeFusedGraph(ge::ComputeGraph& graph)
{
    bool uknownShapeGraph = false;

    HcclResult ret = OriginalGraphShapeTypeCfg(graph, uknownShapeGraph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Optimize][OptimizeFusedGraph]graph[%s]: OriginalGraphShapeTypeCfg failed. ret[%d]",
            graph.GetName().c_str(), ret), ge::INTERNAL_ERROR);

    ge::TraceManager::GetInstance().SetTraceOwner("HCCL", "OptimizeFusedGraph", graph.GetName());
    for (auto nodePtr : graph.GetDirectNode()) {
        if (!nodePtr) {
            HCCL_WARNING("null node exists.");
            continue;
        }
        auto opDescPtr = nodePtr->GetOpDesc();
        if (!opDescPtr) {
            HCCL_WARNING("desc of node[%s] is null.", nodePtr->GetName().c_str());
            continue;
        }

        if (CheckSupportedOP(opDescPtr->GetType()) != HCCL_SUCCESS) {
            continue;
        }
        HcclResult ret = CalcOpRunningParam(*nodePtr, uknownShapeGraph);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Optimize][FusedGraph]errNo[0x%016llx] Calc Op Running Params failed.",
                HCOM_ERROR_CODE(ret)), ge::INTERNAL_ERROR);
    }
    ge::TraceManager::GetInstance().ClearTraceOwner();
    return ge::SUCCESS;
}

ge::Status HcomGraphOptimizer::OptimizeWholeGraph(ge::ComputeGraph& graph)
{
    return ge::SUCCESS;
}

ge::Status HcomGraphOptimizer::GetAttributes(ge::GraphOptimizerAttribute& attrs) const
{
    attrs.engineName = HCCL_OPS_ENGIN;
    attrs.scope = ge::UNIT;
    HCCL_DEBUG("hccl graph optimizer get attr success. engine[%s] scope[%d]", attrs.engineName.c_str(), attrs.scope);
    return ge::SUCCESS;
}

HcclResult HcomGraphOptimizer::FuseHcomAllReduceNode(ge::ComputeGraph& graph)
{
    HcomAllReduceFusion fusionHcomAllReduceOp;
    HCCL_INFO("graph[%s] start fusion HcomAllReduce node.", graph.GetName().c_str());
    HcclResult ret = fusionHcomAllReduceOp.Run(graph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Fuse][HcomAllReduceNode]graph[%s]: fuse HcomAllReduce node failed. ret[%d]",
            graph.GetName().c_str(), ret), ret);

    std::vector<std::shared_ptr<ge::ComputeGraph>> subgraph;
    subgraph = graph.GetAllSubgraphs();
    for (u32 index = 0; index < subgraph.size(); index++) {
        HcomAllReduceFusion fusionSubGraphOp;
        ret = fusionSubGraphOp.Run(*subgraph[index]);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Fuse][HcomAllReduceNode]fuse HcomAllReduce op failed in subgraph[%s]. ret[%d]",
                (*subgraph[index]).GetName().c_str(), ret), ret);
    }

    HCCL_INFO("graph[%s] with[%d] subgraphs: end fusion HcomAllReduce node.", graph.GetName().c_str(), subgraph.size());
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::FuseHcomBroadcastNode(ge::ComputeGraph& graph, u64 fusionTensorSize)
{
    HcomBroadcastFusion fusionHcomBroadcastOp;
    HCCL_INFO("graph[%s] start fusion HcomBroadcast node.", graph.GetName().c_str());
    HcclResult ret = fusionHcomBroadcastOp.Run(graph, fusionTensorSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Fuse][HcomBroadcastNode]graph[%s]: fuse HcomBroadcast node failed. ret[%d]",
            graph.GetName().c_str(), ret), ret);

    std::vector<std::shared_ptr<ge::ComputeGraph>> subgraph;
    subgraph = graph.GetAllSubgraphs();
    for (u32 index = 0; index < subgraph.size(); index++) {
        HcomBroadcastFusion fusionSubGraphOp;
        CHK_RET(fusionSubGraphOp.Run(*subgraph[index], fusionTensorSize));
    }

    HCCL_INFO("graph[%s] with [%d] subgraphs: end fusion HcomBroadcast node.",
        graph.GetName().c_str(), subgraph.size());
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetHcomOpAttrs(ge::OpDescPtr &opDescPtr)
{
    // 连续内存属性从图编译移至图优化
    bool bRet = false;
    if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLREDUCE ||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_BROADCAST ||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCE) {
        // Note: 需要融合的算子需设定输入/输出连续属性
        bRet = ge::AttrUtils::SetBool(opDescPtr, ge::ATTR_NAME_CONTINUOUS_INPUT, true);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpAttrs]node[%s] set continuous input attr to OpDesc failed", \
            opDescPtr->GetName().c_str()), HCCL_E_INTERNAL);
        bRet = ge::AttrUtils::SetBool(opDescPtr, ge::ATTR_NAME_CONTINUOUS_OUTPUT, true);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpAttrs]node[%s] set continuous output attr to OpDesc failed.", \
            opDescPtr->GetName().c_str()), HCCL_E_INTERNAL);
        }

    if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLREDUCE ||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLGATHER ||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCESCATTER ||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCE) {
        bRet = ge::AttrUtils::SetBool(opDescPtr, "_input_mutable", true);
        HCCL_DEBUG("node[%s] op type [%s] input mutable attr is set",
            opDescPtr->GetName().c_str(), opDescPtr->GetType().c_str());
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpAttrs]node[%s] SetBool _input_mutable failed, op type[%s]",
            opDescPtr->GetName().c_str(), opDescPtr->GetType().c_str()),
            HCCL_E_INTERNAL);
        }

    if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLREDUCE||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCE||
        opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) {
        // 设置溢出检测属性
        int globalWorkSpaceSize = 1;
        int globalWorkSpaceType = static_cast<int>(GlobalWorkSpaceType::OVERFLOW_DETECT_MODE);
        bRet = ge::AttrUtils::SetInt(opDescPtr, "globalworkspace_size", globalWorkSpaceSize);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpAttrs]node[%s] set globalworkspace_size failed", \
            opDescPtr->GetName().c_str()), HCCL_E_INTERNAL);
        bRet = ge::AttrUtils::SetInt(opDescPtr, "globalworkspace_type", globalWorkSpaceType);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpAttrs]node[%s] set globalworkspace_type failed", \
            opDescPtr->GetName().c_str()), HCCL_E_INTERNAL);
        }

    HcclResult ret = SetHcomOpFormat(opDescPtr);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Set][HcomOpAttrs]node[%s] set format failed, op type[%s]",
        opDescPtr->GetName().c_str(), opDescPtr->GetType().c_str()), HCCL_E_INTERNAL);

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::FuseHcomReduceNode(ge::ComputeGraph& graph)
{
    HcomReduceFusion fusionHcomReduceOp;
    HCCL_INFO("graph[%s] start fusion HcomReduce node.", graph.GetName().c_str());
    HcclResult ret = fusionHcomReduceOp.Run(graph);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Fuse][HcomReduceNode]graph[%s]: fuse HcomReduce node failed. ret[%d]",
            graph.GetName().c_str(), ret), ret);

    std::vector<std::shared_ptr<ge::ComputeGraph>> subgraph;
    subgraph = graph.GetAllSubgraphs();
    for (u32 index = 0; index < subgraph.size(); index++) {
        HcomReduceFusion fusionSubGraphOp;
        ret = fusionSubGraphOp.Run(*subgraph[index]);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Fuse][HcomReduceNode]fuse HcomReduce op failed in subgraph[%s]. ret[%d]",
                (*subgraph[index]).GetName().c_str(), ret), ret);
    }

    HCCL_INFO("graph[%s] with [%d] subgraphs: end fusion HcomReduce node.", graph.GetName().c_str(),
        subgraph.size());
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CheckSupportedOP(const std::string &sCollectiveType) const
{
    std::vector<std::string>::const_iterator it = std::find(HCOM_SUPPORTED_OP_TYPE.begin(),
        HCOM_SUPPORTED_OP_TYPE.end(), sCollectiveType);
    return (it != HCOM_SUPPORTED_OP_TYPE.end()) ? HCCL_SUCCESS : HCCL_E_PARA;
}

bool HcomGraphOptimizer::IsSubgraphMultiBatch(ge::ComputeGraph& graph)
{
    for (const auto &node : graph.GetAllNodes()) {
        if (!node) {
            HCCL_WARNING("null node exists.");
            continue;
        }
        auto opDescPtr = node->GetOpDesc();
        if (!opDescPtr) {
            HCCL_WARNING("desc of node[%s] is null.", node->GetName().c_str());
            continue;
        }
        // 如果有ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE,则为判断自动分档
        if (ge::AttrUtils::HasAttr(opDescPtr, ge::ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE)) {
            HCCL_INFO("graph[%s] node [%s] has attr _subgraph_multi_dims_input_shape",
                graph.GetName().c_str(), node->GetName().c_str());
            return true;
        }
    }
    return false;
}

HcclResult HcomGraphOptimizer::OriginalGraphShapeTypeCfg(ge::ComputeGraph& graph, bool &uknownShapeGraph)
{
    // 遍历原图所有算子，如果是自动分档，则默认整图动态shape
    if (IsSubgraphMultiBatch(graph)) {
        uknownShapeGraph = false;
        return HCCL_SUCCESS;
    }

    /* 遍历原图所有算子 */
    for (auto nodePtr : graph.GetAllNodes()) {
        if (!nodePtr) {
            HCCL_WARNING("null node exists.");
            continue;
        }
        auto opDescPtr = nodePtr->GetOpDesc();
        if (!opDescPtr) {
            HCCL_WARNING("desc of node[%s] is null.", nodePtr->GetName().c_str());
            continue;
        }

        bool unknownShapeNode = false;
        /* 判断算子是不是unknownShapeNode，有一个算子是unknown，原图就是unknown */
        CHK_PRT_RET((ge::NodeUtils::GetNodeUnknownShapeStatus(*nodePtr, unknownShapeNode) != ge::GRAPH_SUCCESS),
            HCCL_ERROR("[Original][GraphShapeTypeCfg]node[%s] get node unknown status failed",
                nodePtr->GetName().c_str()), HCCL_E_PARA);
        if (unknownShapeNode) {
            uknownShapeGraph = true;
            break;
        }
    }

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetUnknownShapeAttr(ge::ComputeGraph &graph, bool uknownShapeGraph)
{
    std::string iterRefreShable = "0";
    ge::graphStatus status = ge::GetContext().GetOption(ge::OPTION_FEATURE_BASE_REFRESHABLE, iterRefreShable);

    bool statusFlag = (status == ge::GRAPH_SUCCESS) && (iterRefreShable.compare("1") == 0);
    if (statusFlag) {
        HcclResult ret = SalStrToInt(iterRefreShable, HCCL_BASE_DECIMAL, optionFeatureBaseRefreshable_);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Init][HcomGraphOptimizer]OPTION_FEATURE_BASE_REFRESHABLE[%s] is not a valid interger",
                iterRefreShable.c_str()), HCCL_E_PARA);
        if ((optionFeatureBaseRefreshable_ < 0) || (optionFeatureBaseRefreshable_ > 1)) {
            HCCL_ERROR("[Init][HcomGraphOptimizer]Initialize: OPTION_FEATURE_BASE_REFRESHABLE[%d] is invaild.",
                optionFeatureBaseRefreshable_);
            return HCCL_E_PARA;
        }
        HCCL_INFO("Initialize: OPTION_FEATURE_BASE_REFRESHABLE is %d.", optionFeatureBaseRefreshable_);
    } else {
        optionFeatureBaseRefreshable_ = 0;
        HCCL_INFO("Initialize: OPTION_FEATURE_BASE_REFRESHABLE is unsetted, default[%d].",
            optionFeatureBaseRefreshable_);
    }

    if (!uknownShapeGraph && optionFeatureBaseRefreshable_ == 0) {
        HCCL_DEBUG("graph[%s] is known shap", graph.GetName().c_str());
        return HCCL_SUCCESS;
    }
    /* 遍历原图所有算子 */
    for (auto nodePtr : graph.GetAllNodes()) {
        if (!nodePtr) {
            HCCL_WARNING("null node exists.");
            continue;
        }
        auto opDescPtr = nodePtr->GetOpDesc();
        if (!opDescPtr) {
            HCCL_WARNING("desc of node[%s] is null.", nodePtr->GetName().c_str());
            continue;
        }

        if (CheckSupportedOP(opDescPtr->GetType()) != HCCL_SUCCESS) {
            continue;
        }

        bool bRet = false;
        bRet = ge::AttrUtils::SetInt(opDescPtr, ORIGINAL_GRAPH_SHAPE_TYPE, ORIGINAL_GRAPH_UNKNOWNSHAPE_TYPE);
        CHK_PRT_RET(!bRet,
            HCCL_ERROR("[Set][UnknownShapeAttr]graph[%s]: node [%s] SetInt unkown shap failed, op type[%s]",
                graph.GetName().c_str(), nodePtr->GetName().c_str(), opDescPtr->GetType().c_str()), HCCL_E_PARA);

        bool unknownShapeNode = false;
        /* 判断算子是不是unknownShapeNode */
        CHK_PRT_RET((ge::NodeUtils::GetNodeUnknownShapeStatus(*nodePtr, unknownShapeNode) != ge::GRAPH_SUCCESS),
            HCCL_ERROR("[Set][UnknownShapeAttr]node[%s] get node unknown status failed",
                nodePtr->GetName().c_str()), HCCL_E_PARA);

        if (unknownShapeNode) {
            continue;
        }

        // alltoallv、alltoallvc、alltoall暂不支持走动态shape下沉
        if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLTOALLV ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLTOALL ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLGATHERV ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_REDUCESCATTERV) {
            bRet = ge::AttrUtils::SetBool(opDescPtr, ge::ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
            HCCL_DEBUG("graph[%s]: node [%s] op type [%s] unkown shap value is set", graph.GetName().c_str(),
                nodePtr->GetName().c_str(), opDescPtr->GetType().c_str());
            CHK_PRT_RET(!bRet,
                HCCL_ERROR("[Set][UnknownShapeAttr]graph[%s]: node [%s] SetBool unkown shap failed, op type[%s]",
                    graph.GetName().c_str(), nodePtr->GetName().c_str(), opDescPtr->GetType().c_str()), HCCL_E_PARA);
            continue;
        }

        DevType devType = DevType::DEV_TYPE_COUNT;
        CHK_RET(GetOffDeviceTypeWithoutDev(devType));

        /* 对动态shap下的集合通信known算子内存进行检查，如果大于ccl
         * buf，就把算子修改为unkown。GE会按照单算子模式调用 */
        uint64_t memSize = 0;
        if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLGATHER ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_RECEIVE ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_GATHER) {
            CHK_RET(HcomOpUtils::GetAllOutputsTensorMemSize(opDescPtr, memSize));
        } else {
            CHK_RET(HcomOpUtils::GetAllInputsTensorMemSize(opDescPtr, memSize));
        }

        u64 maxSize = 0;
        CHK_RET(HcomGetCCLBufferAvailableSize(maxSize));
        u64 opMemSize = 0;
        CHK_RET(GetOpWorkspaceMemSize(*nodePtr, opDescPtr->GetType(), opMemSize));

        // 适配1,2包，RTS不支持二级地址偏移拷贝
        bool notSupportSecAddrCopyWithOffset = HcomGetSecAddrCopyFlag();
        HCCL_INFO("[SetUnknownShapeAttr] notSupportSecAddrCopyWithOffset %d", notSupportSecAddrCopyWithOffset);
        if (static_cast<u64>(memSize) > maxSize) {
            if ((devType == DevType::DEV_TYPE_910B || devType == DevType::DEV_TYPE_910) && \
                !notSupportSecAddrCopyWithOffset) {
                HCCL_DEBUG("Supports level-2 address offset copy.");
            } else {
                bRet = ge::AttrUtils::SetBool(opDescPtr, ge::ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
                HCCL_DEBUG("graph[%s]: node [%s] op type [%s] unkown shap value is set", graph.GetName().c_str(),
                    nodePtr->GetName().c_str(), opDescPtr->GetType().c_str());
                CHK_PRT_RET(!bRet,
                    HCCL_ERROR("[Set][UnknownShapeAttr]graph[%s]: node [%s] SetBool unkown shap failed, op type[%s]",
                        graph.GetName().c_str(), nodePtr->GetName().c_str(), opDescPtr->GetType().c_str()),
                            HCCL_E_PARA);
            }
        }

        if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLTOALLVC && opMemSize > maxSize) {
            bRet = ge::AttrUtils::SetBool(opDescPtr, ge::ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
            HCCL_DEBUG("graph[%s]: node [%s] op type [%s] unkown shap value is set", graph.GetName().c_str(),
                nodePtr->GetName().c_str(), opDescPtr->GetType().c_str());
            CHK_PRT_RET(!bRet,
                HCCL_ERROR("[Set][UnknownShapeAttr]graph[%s]: node [%s] SetBool unkown shap failed, op type[%s]",
                    graph.GetName().c_str(), nodePtr->GetName().c_str(), opDescPtr->GetType().c_str()), HCCL_E_PARA);
        }
    }

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetHcomOpFormat(ge::OpDescPtr &opDescPtr)
{
    CHK_SMART_PTR_NULL(opDescPtr);
    bool bRet = false;
    if (hcomMultiMode_ == 0) {
        if (opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_ALLREDUCE ||
            opDescPtr->GetType() == HCCL_KERNEL_OP_TYPE_BROADCAST) {
            bRet = ge::AttrUtils::SetInt(opDescPtr, "_format_agnostic", HCCL_FORMAT_PAIRED_INPUT_OUTPUT);
            HCCL_DEBUG("op type[%s] format agnostic value is set", opDescPtr->GetType().c_str());
            CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpFormat]SetBool format_Agnostic failed, op type[%s]",
                opDescPtr->GetType().c_str()), HCCL_E_PARA);
        }
    } else {
        // npu为了加速计算在不同的rank上可能将算子的逻辑format转换为不同的物理format进行运算，
        // 同一tensor在不同format时，内存布局、length均可能不同。
        // 为避免在不同rank上算子的物理format不一致，导致集合通信结果异常，需要将所有节点上的通信算子format设为同一类型。
        // 此处先行将所有的通信算子的format固定设置为NHWC，后续计划引入节点间format协商机制。
        size_t inputSize = opDescPtr->GetAllInputsSize();
        for (size_t i = 0; i < inputSize; ++i) {
            auto inTensorDescPtr = opDescPtr->MutableInputDesc(i);
            inTensorDescPtr->SetFormat(ge::FORMAT_NHWC);
            HCCL_DEBUG("input[%zu / %zu] has been setted foramt with NHWC.", i, inputSize);
        }
        size_t outputSize = opDescPtr->GetOutputsSize();
        for (size_t i = 0; i < outputSize; ++i) {
            auto outTensorDescPtr = opDescPtr->MutableOutputDesc(i);
            outTensorDescPtr->SetFormat(ge::FORMAT_NHWC);
            HCCL_DEBUG("output[%zu / %zu] has been setted foramt with NHWC.", i, outputSize);
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetLookupUpdateWorkspace(ge::Node& node, u64 &opMemSize, s32 flags)
{
    s32 maxNum = 0;
    ge::OpDescPtr op = node.GetOpDesc();
    CHK_RET(GetOpIntAttr(op, "max_num", maxNum));

    s32 embeddingDim = 0;
    CHK_RET(GetOpIntAttr(op, "embedding_dim", embeddingDim));
    s32 valueItemSize = embeddingDim * sizeof(float);

    // +1是为table id申请的内存
    s64 keyMemSize = (((maxNum + 1) * SIZE_TABLE[HCCL_DATA_TYPE_INT64] / IPC_MEM_ALIGNMENT_BYTE) + 1) *
        IPC_MEM_ALIGNMENT_BYTE;
    // 为key的中转申请2倍内存,1份用作shard多线程去重，1份用作send key buf
    keyMemSize += keyMemSize;
    s64 valueMemSize = ((maxNum * valueItemSize / IPC_MEM_ALIGNMENT_BYTE) + 1) * IPC_MEM_ALIGNMENT_BYTE;

    opMemSize = keyMemSize + valueMemSize;

    if (flags == ES_FLAGS_ENABLE_COUNTER) {
        u64 counterSize = ((static_cast<u64>(maxNum) * ES_KEY_COUNTER_MEM_BYTES_SIZE + IPC_MEM_ALIGNMENT_BYTE - 1) /
            IPC_MEM_ALIGNMENT_BYTE) * IPC_MEM_ALIGNMENT_BYTE;
        opMemSize += counterSize;
    }

    u64 rdmaEnvelopeMemSize = Align<u64>(sizeof(HcclEsRdmaInfo) * ES_MAX_PS_NUM, IPC_MEM_ALIGNMENT_BYTE);
    opMemSize += rdmaEnvelopeMemSize;

    HCCL_INFO("keyMemSize[%lld], valueMemSize[%lld], rdmaEnvelopeMemSize[%llu], opMemSize[%llu]",
        keyMemSize, valueMemSize, rdmaEnvelopeMemSize, opMemSize);

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetOpWorkspaceMemSize(ge::Node& node, const std::string &sCollectiveType,
    u64 &opMemSize)
{
    HcclResult ret;
    u64 count = 0;
    std::string sGroup;
    int64_t hcomComm = 0;
    u32 dataTypeSize;
    HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
    const u32 alignSize = HCCL_ALIGN_SIZE;
    u64 getMemSize = 0;
    s32 flags = 0;
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED) {
        CHK_RET(GetOpIntAttr(node.GetOpDesc(), "flags", flags));
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED) {
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP) {
        CHK_RET(GetLookupUpdateWorkspace(node, opMemSize, flags));
        HCCL_INFO("hccl embedding service workspace mem size %llu.", opMemSize);
        return HCCL_SUCCESS;
    }

    ret = HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data type failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get count failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = GetCommFromOpDesc(node.GetOpDesc(), hcomComm, sGroup);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: GetGroupFromOpDesc failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    ret = SalGetDataTypeSize(dataType, dataTypeSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data size failed. ret[%d]",
        sCollectiveType.c_str(), ret), ret);

    s32 rankSize = 0;
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) {
        if (hcomComm == static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
            CHK_RET(HcomGetRankSize(sGroup.c_str(), reinterpret_cast<u32 *>(&rankSize)));
        } else {
            CHK_RET(HcclCommGraphGetRankSize(hcomComm, reinterpret_cast<u32 *>(&rankSize)));
        }
        CHK_RET(HcomOpUtils::CheckAlltoAllvcRank(node, hcomComm, sGroup));
    }

    if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) || (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLGATHER)) {
        CHK_PRT_RET((!ge::AttrUtils::GetInt(node.GetOpDesc(), HCOM_ATTR_RANK_SIZE, rankSize)),
            HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s] get "\
            "attr[%s] failed.", sCollectiveType.c_str(), HCOM_ATTR_RANK_SIZE.c_str()), HCCL_E_PARA);

        CHK_PRT_RET((rankSize <= 0), HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: rank_size[%d] should be "\
            "greater than 0.", sCollectiveType.c_str(), rankSize), HCCL_E_PARA);
    }

    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) {
        // ReduceScatter 所需workspace memory: count * 单个数据的size * rank_size
        getMemSize = count * dataTypeSize * rankSize;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALL) {
        // alltoall 所需workspace memory ：input mem size
        getMemSize += count * dataTypeSize;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLGATHER) {
        getMemSize = 0;
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST) {
        if (count * dataTypeSize <= HCCL_MID_COUNT_32_MB) {
            opMemSize += count * dataTypeSize * HCCL_MEMSIZE_HD_FACTOR;
        }
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) {
        CHK_RET(HcomOpUtils::GetAlltoAllvcStagedScratchMemSize(node, hcomComm, sGroup, rankSize, opMemSize));
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE) {
        // 尝试获取serverNum
        bool isDeterministic = false;
        CHK_RET(GetDeterministic(isDeterministic));
        if (isDeterministic) {
            s32 deviceNumPerServer = 0;
            s32 serverNum = 0;
            bool multiModuleDiffDeviceNumMode = false;
            std::string groupListString;
            if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST, groupListString) ==
                ge::GRAPH_SUCCESS) {
                std::string curGroup;
                CHK_RET(HcomOpUtils::GetGroupFromOpDesc(node.GetOpDesc(), curGroup));
                CHK_RET(GetDeviceAndServerNumFromGroupList(groupListString, curGroup, serverNum, deviceNumPerServer,
                    multiModuleDiffDeviceNumMode));
            }

            // 计算scratch大小
            CHK_RET(HcomOpUtils::GetAllReduceScratchMemSize(sGroup, node, serverNum, rankSize, count,
                dataType, getMemSize));
        } else {
            if (count * dataTypeSize <= HCCL_MID_COUNT_16_MB) {
                opMemSize += count * dataTypeSize * HCCL_MEMSIZE_HD_FACTOR;
            }
        }
    } else {
        getMemSize = 0;
    }

    // 算子所需的内存大小，加上固定32kb长度，并按对齐后回传
    opMemSize = (HCCL_WORKSPACE_MEM_32_KB + getMemSize + alignSize - 1) / alignSize * alignSize;

    HCCL_INFO("workspace memory size: node Name[%s], op[%s], data type[%s], count[%llu], comm[%lld], group[%s],"\
        "rank size[%u], size[%llu], mem size[%llu].", node.GetName().c_str(), sCollectiveType.c_str(),
        GetDataTypeEnumStr(dataType).c_str(), count, hcomComm, sGroup.c_str(), rankSize, getMemSize, opMemSize);

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetHcomOpParallelLabel(ge::Node &node, std::string groupLabel)
{
    auto opDesc = node.GetOpDesc();
    if (ge::AttrUtils::HasAttr(opDesc, ge::ATTR_NAME_PARALLEL_GROUP)) {
        // 如果框架已指定算子的并行分组方式，则通信库不再重新指定。
        string currentLabel;
        if (ge::AttrUtils::GetStr(opDesc, ge::ATTR_NAME_PARALLEL_GROUP, currentLabel)) {
            HCCL_INFO("[Set][HcomOpParallelLabel] attr \"_parallel_group\" (%s) has existed.", currentLabel.c_str());
        } else {
            HCCL_INFO("[Set][HcomOpParallelLabel] attr \"_parallel_group\" has existed.");
        }
        return HCCL_SUCCESS;
    }

    auto type = opDesc->GetType();
    if (ES_OPS.find(type) != ES_OPS.end()) {
        s32 tag{};
        string tagName = "tag";

        if (ge::AttrUtils::HasAttr(opDesc, tagName) && ge::AttrUtils::GetInt(opDesc, tagName, tag)) {
            groupLabel += (tagName + to_string(tag));
            HCCL_INFO("[Set][HcomOpParallelLabel] optype[%s] groupLabel with tag is \"%s\".", type.c_str(),
                groupLabel.c_str());
        } else {
            HCCL_WARNING("[Set][HcomOpParallelLabel] optype[%s] tag is not exist. groupLabel"
                " is \"%s\"", type.c_str(), groupLabel.c_str());
        }
    }

    ge::graphStatus geRet = ge::NodeUtils::SetNodeParallelGroup(node, groupLabel.c_str());
    CHK_PRT_RET(geRet, HCCL_ERROR("[Set][OpParallelLabel]errNo[0x%016llx] node[%s] op[%s] set para label[%s] failed",
        HCCL_ERROR_CODE(HCCL_E_INTERNAL), opDesc->GetName().c_str(), opDesc->GetType().c_str(),
        groupLabel.c_str()), HCCL_E_INTERNAL);
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetCountFromOpDesc(const ge::OpDescPtr &op, const std::string &sCollectiveType, \
    HcclDataType dataType, u64 &count)
{
    HcclResult ret;
    u64 totalSize = 0;
    u32 dataTypeSize = 0;

    CHK_RET(SalGetDataTypeSize(dataType, dataTypeSize));
    CHK_PRT_RET(dataTypeSize == 0, HCCL_ERROR("[Get][CountFromOpDesc]dataType size is zero."), HCCL_E_PARA);

    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
        ret = GetHcomReceiveOpOutputSize(op, dataTypeSize, totalSize);
        CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][Count]get op[%s] output size failed. ret[%d]",
            sCollectiveType.c_str(), ret), ret);
    } else {
        for (u64 i  = 0; i < op->GetInputsSize(); i++) {
            u64 blockSize;
            CHK_SMART_PTR_NULL(op->GetInputDescPtr(i));
            if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER) {
                s32 rankSize = 0;
                // ReduceScatter 算子的 count 为输出数据的个数，count = (input的size / rank_size) / dataTypeSize
                CHK_PRT_RET((!ge::AttrUtils::GetInt(op, HCOM_ATTR_RANK_SIZE, rankSize)),
                    HCCL_ERROR("[Get][Count]op[%s] get attr[%s] failed.", sCollectiveType.c_str(),
                        HCOM_ATTR_RANK_SIZE.c_str()), HCCL_E_PARA);
                CHK_PRT_RET((rankSize <= 0), HCCL_ERROR("[Get][Count]errNo[0x%016llx] in reducescatter op,"
                    "rank_size[%d] should be greater than 0.", HCOM_ERROR_CODE(HCCL_E_PARA), rankSize), HCCL_E_PARA);
                u64 shapeSize = 0;
                if ((u64)op->GetInputDescPtr(i)->GetShape().IsScalar()) {
                    shapeSize = 1;
                } else {
                    shapeSize = (u64)op->GetInputDescPtr(i)->GetShape().GetShapeSize();
                }
                CHK_PRT_RET((shapeSize > INVALID_U64 / dataTypeSize), HCCL_ERROR("[Get][Count]op[%s] shape size[%llu]"
                    "is overflow.", sCollectiveType.c_str(), shapeSize), HCCL_E_PARA);
                // reduce-scatter 融合场景：reduce-scatter算子的每个输入tensor均有补齐处理。
                // mindspore 补齐规则：(size + 32  -1 + 512) / 512 * 512
                // 因此，此处每个输入额外多申请 1024 bytes 的workspace memory。
                const u32 paddingLen = 1024; // 每个输入额外多申请 1024 bytes 的workspace memory。
                blockSize = (shapeSize * dataTypeSize + paddingLen) / rankSize;
            } else {
                const u32 alignSize = 512; // 以512 Byte 对齐
                int64_t inputSize = 0;
                CHK_PRT_RET((ge::GRAPH_SUCCESS != ge::TensorUtils::GetSize(*op->GetInputDescPtr(i), inputSize)), \
                    HCCL_ERROR("[Get][Count]errNo[0x%016llx] get workspace bytes failed. get size from TensorDesc"
                        "failed, op : %s"\
                        ", input index : %llu", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), i), HCCL_E_PARA);
                CHK_PRT_RET(((u64)inputSize > INVALID_U64 - alignSize), HCCL_ERROR("[Get][Count]op[%s] input"
                    "size[%llu] is overflow.", sCollectiveType.c_str(), (u64)inputSize), HCCL_E_PARA);
                blockSize = ((u64)inputSize + alignSize - 1) / alignSize * alignSize;
            }
            totalSize = totalSize + blockSize;
        }
    }
    count = totalSize / dataTypeSize;
    HCCL_INFO("op[%s] get count[%llu] success.", sCollectiveType.c_str(), count);
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetCommFromOpDesc(const ge::OpDescPtr &op, int64_t hcomComm, std::string &sGroup)
{
    if (ge::AttrUtils::HasAttr(op, "comm")) {
        if (ge::AttrUtils::GetInt(op, "comm", hcomComm) == false) {
            HCCL_ERROR("[GetComm][OpDesc]errNo[0x%016llx]: get comm failed. get \"comm\" from opDesc failed", \
                HCOM_ERROR_CODE(HCCL_E_PARA));
            return HCCL_E_PARA;
        } else if (hcomComm == static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
            HCCL_INFO("[HcclCommGraph][Type]get comm equal to 0, should get group.");
            CHK_RET(HcomOpUtils::GetGroupFromOpDesc(op, sGroup));
        } else {
            HCCL_INFO("[HcclCommGraph][Type]get comm name[%lld] success.", hcomComm);
        }
    } else {
        CHK_RET(HcomOpUtils::GetGroupFromOpDesc(op, sGroup));
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetOpOutputMemSize(ge::Node& node, const std::string &sCollectiveType)
{
    ge::OpDescPtr op = node.GetOpDesc();
    for (u32 i = 0; i < op->GetOutputsSize(); i++) {
        int64_t memSize = 0;
        ge::GeTensorDesc outputTensor = op->GetOutputDesc(i);
        ge::GeShape outputShape = outputTensor.GetShape();
        ge::Format format = outputTensor.GetFormat();
        ge::DataType dataType = outputTensor.GetDataType();
        // 获取内存大小
        bool bErr = (ge::GRAPH_SUCCESS != ge::TensorUtils::CalcTensorMemSize(outputShape, format, dataType, memSize));
        CHK_PRT_RET(bErr, HCCL_ERROR("[Set][OpOutputMemSize]In get output mem size, error outputSize because no"
            "know shape, Format[%d], dataType[%d], outputSize[%lld], index[%u]", format,
            dataType, memSize, i), HCCL_E_PARA);

        if (memSize == -1) { // memsize 为-1 时，表示输入的shape不正确
            HCCL_ERROR("[Set][OpOutputMemSize]In get output mem size, error outputSize because unknow shape,"
                "Format[%d], dataType[%d], outputSize[%lld], index[%u]", format,
                dataType, memSize, i);
            return HCCL_E_PARA;
        }

        // 根据 规则重新计算内存大小
        CHK_RET(CalcHCCLOutputMemSize(sCollectiveType, memSize));

        // 将内存大小重新传给上层
        ge::TensorUtils::SetSize(outputTensor, memSize);

        // 更新output Tensor
        if (op->UpdateOutputDesc(i, outputTensor)) {
            HCCL_ERROR("[Set][OpOutputMemSize]In get output mem size, update output desc error,"
                "Format[%d], dataType[%d], outputSize[%lld], index[%u]", format, dataType, memSize, i);
            return HCCL_E_PARA;
        }
        HCCL_INFO("In set output MemSize, sCollectiveType[%s], opMemSize[%lld]", sCollectiveType.c_str(),
            memSize);
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CalcHCCLOutputMemSize(const std::string &sCollectiveType, int64_t& memSize)
{
    const u32 MEMORY_ALIGN_RATIO = 2; // GE要求内存需要32KB对齐后，再外加32KB. out = (in + 2 * 32 - 1) / 32 * 32
    const u32 MEMORY_ALIGN_SIZE = 32; // GE要求内存需要32KB对齐后，再外加32KB. out = (in + 2 * 32 - 1) / 32 * 32
    // GE要求内存需要32KB对齐后，再外加32KB
    memSize = ((memSize + MEMORY_ALIGN_RATIO * MEMORY_ALIGN_SIZE - 1) / MEMORY_ALIGN_SIZE) * MEMORY_ALIGN_SIZE;
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::SetOpMemAttr(ge::Node& node, const std::string &sCollectiveType, const u64 &opMemSize)
{
    bool bRet =  false;

    // ATTENTION: 算子在IR定义时input/output同名场合（参考HcomRemoteRefRead算子）会隐式设置reference属性为TRUE,
    //   此处只对IR定义中input/output不同名且需要复用内存的算子，进行内存复用配置。
    //   后续有类似算子实现建议在IR定义时将input/output配置为相同name。
    // broadcast算子因为输入/输出为同一内存Ref属性为true
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST) {
        bRet = ge::AttrUtils::SetBool(node.GetOpDesc(), ge::ATTR_NAME_REFERENCE, true);
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: set  reference attr[%d] to OpDesc"
            "failed.", HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), true), HCCL_E_PARA);
        bRet = node.GetOpDesc()->UpdateOutputName(node.GetOpDesc()->GetAllInputName());
        CHK_PRT_RET(!bRet, HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx] op[%s]: update output name failed.", \
            HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);
        HCCL_INFO("node[%s] set attr [reference]: %u", node.GetName().c_str(), true);

        // 算子属性为reference时，为减少GE的内存分配，设置 ouput 复用 input 内存
        for (uint32_t i = 0; i < static_cast<uint32_t>(node.GetOpDesc()->GetOutputsSize()); i++) {
            auto outDescPtr = node.GetOpDesc()->MutableOutputDesc(i);
            CHK_SMART_PTR_NULL(outDescPtr);
            ge::TensorUtils::SetReuseInput(*outDescPtr, true);
            ge::TensorUtils::SetReuseInputIndex(*outDescPtr, i);
        }
    } else {
        HCCL_INFO("node[%s] set attr [reference]: skip", node.GetName().c_str());
    }
    u32 heterogeneousFlag;
    CHK_RET(hrtGetIsHeterogenous(heterogeneousFlag));
    if (heterogeneousFlag == 0) {
        // 能获取rankMapping，就走离线编译流程获取deviceType
        DevType devType;
        string rankMappingString;
        s32 groupSize = 0;
        std::vector<u32> groupRanks;
        const u32 NUM_SIZE_TWO = 2;
        std::string sGroup;
        bool isHcomInit = false;
        CHK_RET(HcomOpUtils::GetGroupFromOpDesc(node.GetOpDesc(), sGroup));
        bool withRemoteOp = (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_LOOKUP ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_UPDATE_PAIRED ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED ||
            sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED);
        bool withoutImplCompile = IsOfflineCompilation() || withRemoteOp ||
            (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST, rankMappingString) ==
            ge::GRAPH_SUCCESS) ||
            ((HcomGetbackloggedByGroup(sGroup.c_str(), groupRanks, groupSize) == HCCL_SUCCESS) && (groupSize != 0));
        if (IsOfflineCompilation()) {
            CHK_RET(GetOffDeviceTypeWithoutDev(devType));
            if (devType == DevType::DEV_TYPE_310P3 || devType == DevType::DEV_TYPE_310P1) {
                // 离线编译,异构场景，不需要设置ATTR_NAME_INPUT_MEM_TYPE_LIST
                HCCL_INFO("cur is offline heterogeneous");
                return HCCL_SUCCESS;
            }
        } else if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_RANK_MAPPING, rankMappingString) ==
            ge::GRAPH_SUCCESS) {
            CHK_RET(GetOffDeviceTypeWithoutDev(devType));
        } else {
            CHK_RET(GetHcclCommInitStatus(isHcomInit));
            if (isHcomInit || withoutImplCompile) {
                devType = HcomGetDeviceType();
            } else {
                CHK_RET(GetDeviceType(sGroup.c_str(), devType));
            }
        }
        if (devType == DevType::DEV_TYPE_310P3 || devType == DevType::DEV_TYPE_310P1) {
            u32 numHccsLink = 0;
            u32 rankSize = 0;
            if (!withoutImplCompile) {
                CHK_RET(GetRankSize(sGroup.c_str(), &rankSize));
                CHK_RET(GetHccsLinkNum(sGroup.c_str(), numHccsLink));
            }
            //只有HELPER_RES_FILE_PATH存在的时候，才是1951soc型态，用此环境变量是否存在，区分SOC和板卡型态
            bool isRemoteBoard = (SalGetEnv("HELPER_RES_FILE_PATH") == "EmptyString") && withRemoteOp;
            HCCL_INFO("[Set][OpMemAttr]: rankSize is [%u], numHccsLink is [%u] isRemoteBoard[%u]",
                rankSize, numHccsLink, isRemoteBoard);
            // 针对310p duo卡 2p场景申请内存为普通内存，不需要单独设置，其余场景需要设置申请为p2p内存
            // 板卡推理不需要设置申请内存为p2p
            if ((withoutImplCompile || !(rankSize == NUM_SIZE_TWO  && numHccsLink == NUM_SIZE_TWO)) && !isRemoteBoard) {
                vector<int64_t> memTypeInput(node.GetOpDesc()->GetInputsSize(), RT_MEMORY_P2P_DDR);
                vector<int64_t> memTypeOutput(node.GetOpDesc()->GetOutputsSize(), RT_MEMORY_P2P_DDR);
                vector<int64_t> memTypeWorkSpace(1, RT_MEMORY_P2P_DDR);
                bool ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_INPUT_MEM_TYPE_LIST, memTypeInput);
                CHK_PRT_RET(!ret,
                    HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set input mem addr failed. op[%s]",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);

                ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memTypeOutput);
                CHK_PRT_RET(!ret,
                    HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set output mem addr failed. op[%s]",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);

                if (opMemSize != 0) {
                    ret = ge::AttrUtils::SetListInt(node.GetOpDesc(), ge::ATTR_NAME_WORKSPACE_TYPE_LIST,
                        memTypeWorkSpace);
                    CHK_PRT_RET(!ret,
                        HCCL_ERROR("[Set][OpMemAttr]errNo[0x%016llx]: Set workspace mem addr failed. op[%s]",
                        HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str()), HCCL_E_PARA);
                }
                HCCL_INFO("[Set][OpMemAttr] Set memType RT_MEMORY_P2P_DDR");
            }
        }
    } else {
        // Helper 场景
        // GE不区分DDR/HBM, 实际310中使用DDR 910中使用HBM
    }

    return HCCL_SUCCESS;
}
HcclResult HcomGraphOptimizer::SetOpAtomicInputIndex(ge::Node& node, const std::string &sCollectiveType)
{
    // allreduce，reduce 算子设定atomic Input Index属性
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE || sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCE) {
        vector<int64_t> atomicInputIndex(1, -1); // 回传vector的值为-1，作为标志位
        if (!ge::AttrUtils::SetListInt(node.GetOpDesc(), "atomic_input_index", atomicInputIndex)) {
            HCCL_ERROR("[Set][OpAtomicInputIndex]errNo[0x%016llx]: set op[%s] atomic index failed.",
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CalcOpRunningParam(ge::Node& node, bool uknownShapeGraph)
{
    HcclWorkflowMode lastWorkflowMode = GetWorkflowMode();
    SetWorkflowMode(HcclWorkflowMode::HCCL_WORKFLOW_MODE_OPS_KERNEL_INFO_LIB);
    CHK_PRT_RET(!node.GetOpDesc(), HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] GetOpDesc failed. null ptr.", \
        HCOM_ERROR_CODE(HCCL_E_PTR)), HCCL_E_PTR);
    CHK_RET(MarkRemoteAccessMemoryType(node));

    bool unknownShapeNode = false;
    CHK_PRT_RET((ge::NodeUtils::GetNodeUnknownShapeStatus(node, unknownShapeNode) != ge::GRAPH_SUCCESS),
        HCCL_ERROR("[Calc][OpRunningParam]node[%s] get node unknown status failed", node.GetName().c_str()),
        HCCL_E_PARA);
    if (unknownShapeNode) {
        HCCL_INFO("node:%s is unknown shape, does not need to Calc Op Running Param", node.GetName().c_str());
        SetWorkflowMode(lastWorkflowMode);
        return HCCL_SUCCESS;
    }

    CHK_RET(HcomCalcOpRunningParam(node, uknownShapeGraph));
    SetWorkflowMode(lastWorkflowMode);
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetHcomReceiveOpOutputSize(const ge::OpDescPtr &op, u32 dataTypeSize, \
    u64 &outputSize)
{
    CHK_PRT_RET(dataTypeSize == 0, HCCL_ERROR("[Receive][OpOutputSize]dataType size is zero."), HCCL_E_PARA);

    std::string sCollectiveType = op->GetType();
    CHK_PRT_RET((!ge::AttrUtils::HasAttr(op, HCOM_ATTR_SHAPE)), \
        HCCL_ERROR("[Receive][OpOutputSize]op[%s] has no attr[%s].", sCollectiveType.c_str(),
            HCOM_ATTR_SHAPE.c_str()), HCCL_E_PARA);

    vector<int64_t> shapeDims;
    CHK_PRT_RET((!ge::AttrUtils::GetListInt(op, HCOM_ATTR_SHAPE, shapeDims)), \
        HCCL_ERROR("[Receive][OpOutputSize]op[%s] get attr[%s] failed.", sCollectiveType.c_str(),
            HCOM_ATTR_SHAPE.c_str()), HCCL_E_PARA);

    u64 shapeSize = 0;
    if (shapeDims.empty()) {
        // HcomReceive算子标量的话将shapeSize设置为1
        shapeSize = 1;
    } else {
        shapeSize = static_cast<u64>(ge::Shape(shapeDims).GetShapeSize());
    }
    const u32 alignSize = 512; // 以512 Byte 对齐
    CHK_PRT_RET((shapeSize > (INVALID_U64 - alignSize) / dataTypeSize), \
        HCCL_ERROR("[Receive][OpOutputSize]op[%s] shape size[%llu] is overflow.", sCollectiveType.c_str(),
            shapeSize), HCCL_E_PARA);
    outputSize = (static_cast<u64>(shapeSize * dataTypeSize) + alignSize - 1) / alignSize * alignSize;
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CalAndSetOpWorkerSpaceForKnowShape(ge::Node& node, const std::string &sCollectiveType,
    u64 &opMemSize)
{
    HcclResult ret;
    u32 shapeType = ORIGINAL_GRAPH_KNOWNSHAPE_TYPE;
    CHK_RET(GetOriginalGraphShapeTypeFromDesc(node.GetOpDesc(), shapeType));
    if (shapeType == ORIGINAL_GRAPH_KNOWNSHAPE_TYPE) {
        // 获取并设定memSize 大小
        ret = GetOpWorkspaceMemSize(node, sCollectiveType, opMemSize);
        CHK_PRT_RET(ret != HCCL_SUCCESS,
            HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] get op[%s] workspace size failed.",
                HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);
        std::vector<int64_t> workspaceBytes;
        workspaceBytes.push_back(opMemSize);
        node.GetOpDesc()->SetWorkspaceBytes(workspaceBytes);
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::HcomCalcOpRunningParam(ge::Node& node, bool uknownShapeGraph)
{
    // 获取需回传的信息
    u64 streamNum = 0;
    u64 opMemSize = 0;
    HcclResult ret;
    HCCL_INFO("calculate hccl runing parameters start.");

    std::string sCollectiveType = node.GetOpDesc()->GetType();
    std::string nodeName = node.GetName();
    if ((sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST || sCollectiveType == HCCL_KERNEL_OP_TYPE_GATHER) &&
        nodeName.find(NO_CALCULATION) != std::string::npos) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "used_stream_num", streamNum) == false) {
            HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] op[%s]: set stream number[%llu] to OpDesc failed.",
                    HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), streamNum);
            return HCCL_E_INTERNAL;
        }
        CHK_RET(CalAndSetOpWorkerSpaceForKnowShape(node, sCollectiveType, opMemSize));
        HCCL_INFO("node[%s] no need calculate hccl runing parameters. stream num:[%llu], workspace size:[%llu]bytes",
            nodeName.c_str(), streamNum, opMemSize);
        return HCCL_SUCCESS;
    }

    ret = CheckSupportedOP(sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] op type[%s] is not supported.",
        HCOM_ERROR_CODE(ret), sCollectiveType.c_str()), HCCL_E_NOT_SUPPORT);

    std::string groupListString;
    std::string curGroup;
    CHK_RET(HcomOpUtils::GetGroupFromOpDesc(node.GetOpDesc(), curGroup));

    // 获取AllReduce算子通信数据量, 用于判断是否支持Server间流水
    auto iter = HCCL_OPTYPE_NAME_MAP.find(sCollectiveType);
    HcclCMDType opType = (iter != HCCL_OPTYPE_NAME_MAP.end()) ? iter->second : HcclCMDType::HCCL_CMD_INVALID;
    u64 opDataSize = 0;
    CHK_RET(GetOpDataSize(node, opDataSize));

    // 获取并设定stream 数量
    // 能获取到group list时进入离线编译的流程去获取从流个数
    if (IsOfflineCompilation()) {
        CHK_RET(GetStreamNumOfflineComp(sCollectiveType, curGroup, streamNum));
    } else if (ge::GetThreadLocalContext().GetOption(ge::OPTION_EXEC_HCOM_GROUPLIST,
        groupListString) == ge::GRAPH_SUCCESS) {
        s32 deviceNumPerServer = 0;
        s32 serverNum = 0;
        bool multiModuleDiffDeviceNumMode = false;
        CHK_RET(GetDeviceAndServerNumFromGroupList(groupListString, curGroup, serverNum, deviceNumPerServer,
            multiModuleDiffDeviceNumMode));
        CHK_RET(GetSubStreamNum(deviceNumPerServer, streamNum, serverNum));
    } else if (ge::AttrUtils::HasAttr(node.GetOpDesc(), "comm")) {
        int64_t hcomComm = 0;
        bool bRet = ge::AttrUtils::GetInt(node.GetOpDesc(), "comm", hcomComm);
        CHK_PRT_RET(!bRet, HCCL_ERROR("errNo[0x%016llx] get attr \"comm\" failed. ", HCOM_ERROR_CODE(HCCL_E_PARA)),\
            HCCL_E_PARA);
        if (hcomComm != static_cast<int64_t>(CommNumHcom::COMM_VALUE_DEFAULT)) {
            CHK_RET(HcclCommGraphGetWorkspaceSubStreamNum(hcomComm, streamNum, opDataSize, opType));
            HCCL_INFO("[HcomCalcOpRunningParam][GetComm][%d]", hcomComm);
        } else {
            CHK_RET(HcomGetWorkspaceSubStreamNum(curGroup.c_str(), streamNum, opDataSize, opType));
        }
    } else {
        CHK_RET(HcomGetWorkspaceSubStreamNum(curGroup.c_str(), streamNum, opDataSize, opType));
    }

    // embadding service场景下，lookup算子流水化执行需要3条从流
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_UNIQUED_PAIRED ||
        sCollectiveType == HCCL_KERNEL_OP_TYPE_COLL_REMOTE_LOOKUP_PAIRED) {
        s32 maxNum = 0;
        CHK_RET(GetOpIntAttr(node.GetOpDesc(), "max_num", maxNum));
        if (maxNum > ES_PIPELINE_THRESHOLD) {
            streamNum = HCCL_SUB_STREAM_ES_LOOKUP;
        }
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND ||
               sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
        // send recv 算子不需要配置从流
        streamNum = 0;
    }

    if (ge::AttrUtils::SetInt(node.GetOpDesc(), "used_stream_num", streamNum) == false) {
        HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] op[%s]: set stream number[%llu] to OpDesc failed.",
                   HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str(), streamNum);
        return HCCL_E_INTERNAL;
    }
    HCCL_INFO("[Calc][OpRunningParam]set ge attribute \"used_stream_num\"[%llu].", streamNum);

    CHK_RET(CalAndSetOpWorkerSpaceForKnowShape(node, sCollectiveType, opMemSize));
    ret = SetOpMemAttr(node, node.GetOpDesc()->GetType(), opMemSize);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
                HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] set node[%s] mem attr failed.",
                    HCOM_ERROR_CODE(ret), node.GetName().c_str()),
                HCCL_E_INTERNAL);

    // 设置output size 大小
    ret = SetOpOutputMemSize(node, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] set op[%s] output size failed.",
            HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);

    // 设定atomic index参数
    ret = SetOpAtomicInputIndex(node, sCollectiveType);
    CHK_PRT_RET(ret != HCCL_SUCCESS,
        HCCL_ERROR("[Calc][OpRunningParam]errNo[0x%016llx] set op[%s] atomic input index failed.",
            HCOM_ERROR_CODE(ret),  sCollectiveType.c_str()), HCCL_E_INTERNAL);

    HCCL_INFO("node[%s] calcute hccl runing parameters completed. stream num:[%llu], workspace size:[%llu]bytes",
        node.GetName().c_str(), streamNum, opMemSize);

    bool getTaskNumFlag = (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLREDUCE ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLGATHER ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCESCATTER ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_BROADCAST ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_REDUCE ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALL ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLV ||\
                           sCollectiveType == HCCL_KERNEL_OP_TYPE_ALLTOALLVC) ? true : false;

    if (uknownShapeGraph && getTaskNumFlag) { // 动态图+集合通信算子+send/recv
        CHK_RET(GetTaskNumAndCheckForceUnknown(node, sCollectiveType, streamNum));
    }

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetTaskNumAndCheckForceUnknown(const ge::Node &node, const std::string &sCollectiveType,
    const u64 &streamNum)
{
    u32 masterTaskNum = 0;
    u32 slaveTaskNum = 0;
    u32 piplineTaskNum = 0;
    u32 taskNum = 0;

    HCCL_DEBUG("[HcomGraphOptimizer] Entry-GetTaskNum sCollectiveType[%s] streamNum[%llu]",
        sCollectiveType.c_str(), streamNum);
    if (!HcomOpUtils::IsNeedCalTaskNum(sCollectiveType)) {
        if (sCollectiveType == HCCL_KERNEL_OP_TYPE_SEND || sCollectiveType == HCCL_KERNEL_OP_TYPE_RECEIVE) {
            taskNum = SEND_RECEIVE_TASK_NUM;
        } else {
            taskNum = OP_DEFAULT_TASK_NUM;
        }
    } else {
        CHK_RET(CalTaskNum(node, sCollectiveType, streamNum, masterTaskNum, slaveTaskNum, piplineTaskNum, taskNum));
    }
    if (taskNum == 0) {
        taskNum = std::max(masterTaskNum, std::max(slaveTaskNum, piplineTaskNum));
    }

    CHK_RET(CheckForceUnknown(node, taskNum));

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CalTaskNum(const ge::Node &node, const std::string &sCollectiveType,
    const u64 &streamNum, u32 &masterTaskNum, u32 &slaveTaskNum, u32 &piplineTaskNum, u32 &taskNum)
{
    s32 deviceNumPerServer = 0;
    s32 serverNum = 0;
    bool multiModuleDiffDeviceNumMode = false;
    AlgType algType;

    std::unique_lock<std::mutex> calModeLock(g_setTaskNumCalModeLock);
    // 设置为获取tasknum模式
    SetTaskNumCalMode(true);
    // 获取deviceNumPerServer & serverNum
    CHK_RET(HcomOpUtils::GetDeviceAndServerNum(node, deviceNumPerServer, serverNum, multiModuleDiffDeviceNumMode));
    
    SetTaskNumCalMode(false);
    calModeLock.unlock();

    // 获取通信算法
    auto iter = HCCL_OPTYPE_NAME_MAP.find(sCollectiveType);
    HcclCMDType opType = (iter != HCCL_OPTYPE_NAME_MAP.end()) ? iter->second : HcclCMDType::HCCL_CMD_INVALID;
    auto cmdStrIter = HCOM_CMD_TYPE_STR_MAP.find(opType);
    std::string opTypeStr = (cmdStrIter != HCOM_CMD_TYPE_STR_MAP.end()) ? cmdStrIter->second : "invalid";
    CHK_RET(HcomOpUtils::GetAlgType(node, deviceNumPerServer, serverNum, opTypeStr, algType));

    // 如果在线编译没有获取到ranktable file,则返回默认task数量
    if ((deviceNumPerServer == 0) && (serverNum == 0)) {
        taskNum = OP_DEFAULT_TASK_NUM;
    } else {
        // 计算Server间pipline切分数量
        u64 opDataSize = 0;
        CHK_RET(GetOpDataSize(node, opDataSize));

        DevType devType;
        CHK_RET(GetOffDeviceTypeWithoutDev(devType));
        u64 piplineSliceNum = CalculatePiplineSliceNum(opType, opDataSize, algType,
            devType, deviceNumPerServer, serverNum);

        // 计算清零task数量
        CHK_RET(HcomOpUtils::GetTensorCleanTaskNum(node, sCollectiveType, masterTaskNum));
        // 计算DFX校验task数量
        CHK_RET(HcomOpUtils::GetDfxTaskNum(sCollectiveType, masterTaskNum));
        // 计算与从stream同步task数量
        CHK_RET(HcomOpUtils::GetToSlaveStreamTaskNum(sCollectiveType, streamNum, piplineSliceNum, masterTaskNum));
        // 计算与主stream同步task数量
        CHK_RET(HcomOpUtils::GetToMasterStreamTaskNum(sCollectiveType, slaveTaskNum));
        // 计算Server间Pipline从stream和主stream同步的task数量
        piplineTaskNum += (piplineSliceNum >= MIN_PIPLINE_SLICE_NUM) ?
            piplineSliceNum * PIPLINE_STREAM_EVENT_NUM * COM_STEP_NUM : 0;

        u32 intraTaskNum = 0;
        u32 interTaskNum = 0;
        // 获取Server内通信task数量
        u64 count = 0;
        u64 totalSize = 0;
        u32 dataTypeSize;
        HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
        HcclResult ret = HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType);
        CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data type failed. ret[%d]",
            sCollectiveType.c_str(), ret), ret);

        ret = GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count);
        CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get count failed. ret[%d]",
            sCollectiveType.c_str(), ret), ret);

        ret = SalGetDataTypeSize(dataType, dataTypeSize);
        CHK_PRT_RET(ret != HCCL_SUCCESS, HCCL_ERROR("[Get][OpWorkspaceMemSize]op[%s]: get data size failed. ret[%d]",
            sCollectiveType.c_str(), ret), ret);

        if (multiModuleDiffDeviceNumMode) {
            // 获取打平拓扑通信task数量
            CHK_RET(HcomOpUtils::GetCombineComTaskNum(sCollectiveType, serverNum, deviceNumPerServer, intraTaskNum,
                interTaskNum));
        } else {
            totalSize = count * dataTypeSize;
            CHK_RET(HcomOpUtils::GetIntraComTaskNum(sCollectiveType, deviceNumPerServer, streamNum,
                algType, intraTaskNum, totalSize));
            // 获取Server间通信task数量, 从stream没有server间task
            CHK_RET(HcomOpUtils::GetInterComTaskNum(sCollectiveType, serverNum, deviceNumPerServer, devType,
                interTaskNum));
        }

        // 计算通信task
        if (piplineSliceNum >= MIN_PIPLINE_SLICE_NUM) {
            masterTaskNum += intraTaskNum * piplineSliceNum;
            slaveTaskNum += intraTaskNum * piplineSliceNum;
            piplineTaskNum += interTaskNum * piplineSliceNum;
        } else {
            masterTaskNum += intraTaskNum + interTaskNum;
            slaveTaskNum += intraTaskNum;
        }
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::CheckForceUnknown(const ge::Node &node, u32 &taskNum)
{
    /* 对动态shap下的集合通信known算子内存进行检查，如果大于ccl
        * buf，就把算子修改为unkown。GE会按照单算子模式调用 */
    uint64_t memSize = 0;
    if (node.GetOpDesc()->GetType() == HCCL_KERNEL_OP_TYPE_ALLGATHER ||
        node.GetOpDesc()->GetType() == HCCL_KERNEL_OP_TYPE_RECEIVE ||
        node.GetOpDesc()->GetType() == HCCL_KERNEL_OP_TYPE_GATHER) {
        CHK_RET(HcomOpUtils::GetAllOutputsTensorMemSize(node.GetOpDesc(), memSize));
    } else {
        CHK_RET(HcomOpUtils::GetAllInputsTensorMemSize(node.GetOpDesc(), memSize));
    }

    // 获取cclbuffer size
    u64 cclBuffSize;
    CHK_RET(HcomGetCCLBufferAvailableSize(cclBuffSize));

    // 当用户内存大于ccl buff，会loop多次
    u32 loopTimes = (memSize + cclBuffSize - 1) / cclBuffSize;

    taskNum *= loopTimes;

    u32 taskMaxNum = TASK_MAX_NUM_DEV_TYPE_V80;
    DevType devType = HcomGetDeviceType();
    if (devType == DevType::DEV_TYPE_910B) {
        taskMaxNum = TASK_MAX_NUM_DEV_TYPE_V71;
    }

    bool bRet =  false;
    if (taskNum >= taskMaxNum) {
        HCCL_WARNING("[HcomGraphOptimizer][CheckForceUnknown] taskNum >= taskMaxNum set opbase mode");
        bRet = ge::AttrUtils::SetBool(node.GetOpDesc(), ge::ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
        CHK_PRT_RET(!bRet,
            HCCL_ERROR("[Set][UnknownShapeAttr]SetBool unkown shap failed, op type[%s]",
                node.GetOpDesc()->GetType().c_str()), HCCL_E_PARA);
    }

    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::MarkRemoteAccessMemoryType(ge::Node& node)
{
    CHK_PRT_RET(!node.GetOpDesc(), HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] GetOpDesc"
        "failed. null ptr.", HCOM_ERROR_CODE(HCCL_E_PTR)), HCCL_E_PTR);

    std::string sCollectiveType = node.GetOpDesc()->GetType();
    if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_READ) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_output_memory_type", ge::RDMA_HBM) == false) {
            HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] op[%s]: set MEMORY TYPE to OpDesc failed.",
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
        HCCL_INFO("Mark HcomRemoteRead output memory RDMA_HBM.");
    } else if (sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_WRITE ||
               sCollectiveType == HCCL_KERNEL_OP_TYPE_REMOTE_SCATTER_WRITE) {
        if (ge::AttrUtils::SetInt(node.GetOpDesc(), "_input_memory_type", ge::RDMA_HBM) == false) {
            HCCL_ERROR("[Mark][RemoteAccessMemoryType]errNo[0x%016llx] op[%s]: set MEMORY TYPE to OpDesc failed.",
                HCOM_ERROR_CODE(HCCL_E_PARA), sCollectiveType.c_str());
            return HCCL_E_PARA;
        }
        HCCL_INFO("Mark HcomRemoteWrite input memory RDMA_HBM.");
    }
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetOpDataSize(const ge::Node &node, u64 &opDataSize)
{
    u64 count = 0;
    HcclDataType dataType = HCCL_DATA_TYPE_RESERVED;
    std::string sCollectiveType = node.GetOpDesc()->GetType();
    CHK_RET(CheckSupportedOP(sCollectiveType));
    CHK_RET(HcomOpUtils::ConversionOpDataType(node.GetOpDesc(), sCollectiveType, dataType));
    CHK_RET(GetCountFromOpDesc(node.GetOpDesc(), sCollectiveType, dataType, count));
    opDataSize = count * SIZE_TABLE[dataType];
    return HCCL_SUCCESS;
}

HcclResult HcomGraphOptimizer::GetOriginalGraphShapeTypeFromDesc(const ge::OpDescPtr &op, u32 &shapeType)
{
    if (ge::AttrUtils::HasAttr(op, ORIGINAL_GRAPH_SHAPE_TYPE)) {
        if (ge::AttrUtils::GetInt(op, ORIGINAL_GRAPH_SHAPE_TYPE, shapeType) == false) {
            HCCL_ERROR("[Get][OriginalGraphShapeType]errNo[0x%016llx]: get shapeType failed. get \"shapeType\" from"
                "opDesc failed", HCOM_ERROR_CODE(HCCL_E_PARA));
            return HCCL_E_PARA;
        }
    } else {
        shapeType = (u32)ORIGINAL_GRAPH_KNOWNSHAPE_TYPE;
    }
    HCCL_INFO("get shapeType [%u] success.", shapeType);
    return HCCL_SUCCESS;
}
}
