/**
 * @file acl_resource_manager.cpp
 *
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */

#include "acl_resource_manager.h"
#include "awatchdog.h"
#include "runtime/dev.h"
#include "runtime/stream.h"
#include "runtime/context.h"
#include "framework/runtime/gert_api.h"
#include "framework/common/profiling_definitions.h"
#include "framework/memory/allocator_desc.h"
#include "framework/executor/ge_executor.h"
#include "types/tensor_desc_internal.h"
#include "types/op_attr.h"
#include "mmpa/mmpa_api.h"
#include "error_codes_inner.h"
#include "common/log_inner.h"
#include "common/common_inner.h"
#include "common/json_parser.h"
#include "single_op/op_model_parser.h"
#include "single_op/shape_range_utils.h"
#include "single_op/executor/stream_executor.h"
#include "single_op/compile/op_compile_service.h"
#include "utils/attr_utils.h"
#include "utils/file_utils.h"
#include "framework/runtime/subscriber/global_profiler.h"
#include "external/ge/ge_allocator.h"
#include "register/stream_manage_func_registry.h"

namespace {
std::atomic<std::uint64_t> atomicModelId(0UL);
}
namespace acl {
namespace {
constexpr int32_t OM_FILE_SUFFIX_LEN = 3;
constexpr int32_t OM_DIR_MAX_DEPTH = 3;
constexpr int32_t DECIMAL = 10;
const std::string ACL_MAX_OPQUEUE_NUM = "max_opqueue_num";
}

void AclResourceManager::AddBundleInfo(const uint32_t bundleId, const std::vector<BundleInfo> &bundleInfos)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    bundleInfos_[bundleId] = bundleInfos;
    for (const auto &info : bundleInfos) {
        (void)bundleInnerIds_.insert(info.modelId);
    }
}

aclError AclResourceManager::GetBundleInfo(const uint32_t bundleId, std::vector<BundleInfo> &bundleInfos)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    const auto it = bundleInfos_.find(bundleId);
    if (it == bundleInfos_.end()) {
        ACL_LOG_ERROR("This model %u is not bundle model, can not get bundle info.", bundleId);
        return ACL_ERROR_INVALID_BUNDLE_MODEL_ID;
    }
    bundleInfos = it->second;
    return ACL_SUCCESS;
}

bool AclResourceManager::IsBundleInnerId(const uint32_t modelId)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    return (bundleInnerIds_.count(modelId) > 0U);
}

void AclResourceManager::DeleteBundleInfo(const uint32_t bundleId)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    const auto it = bundleInfos_.find(bundleId);
    if (it != bundleInfos_.end()) {
        for (const auto &info : it->second) {
            (void)bundleInnerIds_.erase(info.modelId);
        }
        (void)bundleInfos_.erase(it);
    }
}

void AclResourceManager::AddExecutor(uint32_t &modelId, std::unique_ptr<gert::ModelV2Executor> &&executor,
                                     const std::shared_ptr<gert::RtSession> &rtSession)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    ++modelIdGenerator_;
    modelId = modelIdGenerator_.load();
    executorMap_[modelId] = std::move(executor);
    rtSessionMap_[modelId] = rtSession;
}

aclError AclResourceManager::DeleteExecutor(const uint32_t modelId)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    const auto iter = executorMap_.find(modelId);
    if (iter == executorMap_.end()) {
        ACL_LOG_ERROR("model is not loaded, modelId is %u", modelId);
        return static_cast<aclError>(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID);
    }
    (void)executorMap_.erase(iter);

    const auto it = rtSessionMap_.find(modelId);
    if (it == rtSessionMap_.end()) {
        ACL_LOG_ERROR("model is not loaded, modelId is %u", modelId);
        return static_cast<aclError>(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID);
    }
    (void)rtSessionMap_.erase(it);
    return ACL_SUCCESS;
}

void AclResourceManager::GetRuntimeV2Env()
{
    char_t enableRuntimeV2Flag[MMPA_MAX_PATH] = {};
    if (mmGetEnv("ENABLE_RUNTIME_V2", static_cast<char_t *>(enableRuntimeV2Flag),
                 static_cast<uint32_t>(sizeof(enableRuntimeV2Flag))) == EN_OK) {
        if (enableRuntimeV2Flag[0] == '0') { // 0 both model and singleOp disable
            enableRuntimeV2ForModel_ = false;
            enableRuntimeV2ForSingleOp_ = false;
        } else if (enableRuntimeV2Flag[0] == '2') { // 2: model enable, singleOp disable
            enableRuntimeV2ForModel_ = true;
            enableRuntimeV2ForSingleOp_ = false;
        } else {
            enableRuntimeV2ForModel_ = true;
            enableRuntimeV2ForSingleOp_ = true;
        }
    }
    ACL_LOG_EVENT("runtime v2 flag : model flag = %d, singleOp flag = %d",
                  static_cast<int32_t>(enableRuntimeV2ForModel_),
                  static_cast<int32_t>(enableRuntimeV2ForSingleOp_));
}

std::shared_ptr<gert::ModelV2Executor> AclResourceManager::GetExecutor(const uint32_t modelId)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    const auto iter = executorMap_.find(modelId);
    if (iter == executorMap_.end()) {
        return nullptr;
    }
    return iter->second;
}

std::shared_ptr<gert::RtSession> AclResourceManager::CreateRtSession()
{
    const std::lock_guard<std::mutex> locker(mutex_);
    ++sessionIdGenerator_;
    auto sessionId = sessionIdGenerator_.load();
    return std::make_shared<gert::RtSession>(sessionId);
}

std::shared_ptr<gert::RtSession> AclResourceManager::GetRtSession(const uint32_t rtSessionId)
{
    const std::lock_guard<std::mutex> locker(mutex_);
    const auto iter = rtSessionMap_.find(rtSessionId);
    if (iter == rtSessionMap_.cend()) {
        return nullptr;
    }
    return iter->second;
}

void *AclResourceManager::GetKeyByStreamOrDefaultStream(const aclrtStream stream)
{
    if (stream != nullptr) {
        return stream;
    }
    // get current context default stream
    rtStream_t curCtxDefaultStream = nullptr;
    const rtError_t rtErr = rtCtxGetCurrentDefaultStream(&curCtxDefaultStream);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("get current default stream failed, ret:%d", static_cast<int32_t>(rtErr));
        return nullptr;
    }
    return curCtxDefaultStream;
}

gert::Allocators *AclResourceManager::GetAllocators(const aclrtStream stream, bool createDefaultAllocator)
{
    void *cacheKey = stream;
    if (cacheKey == nullptr) {
        cacheKey = GetKeyByStreamOrDefaultStream(stream);
    }

    if (cacheKey == nullptr) {
        return nullptr;
    }

    {
        const std::unique_lock<std::recursive_mutex> lk(streamAllocatorMutex_);
        // try using exist allocator
        const auto iter = streamAllocator_.find(cacheKey);
        if (iter != streamAllocator_.end()) {
            return iter->second.get();
        }

        std::shared_ptr<ge::Allocator> deviceAllocator = GetDeviceAllocator(stream, createDefaultAllocator);
        if (deviceAllocator == nullptr) {
            return nullptr;
        }
        std::shared_ptr<ge::Allocator> hostAllocator(gert::AllocatorFactory::Create(gert::kOnHost).release());
        if ((hostAllocator == nullptr)) {
            ACL_LOG_ERROR("hostAllocator is nullptr");
            return nullptr;
        }

        // create a new alloctor
        std::shared_ptr<gert::Allocators> allocators(new (std::nothrow) gert::Allocators());
        // only support allocator with placement kOnDeviceHbm, kOnHost, kFollowing
        for (size_t i = 0U; i < static_cast<size_t>(gert::AllocatorUsage::kEnd); ++i) {
            (void)allocators->SetAllocator(static_cast<gert::TensorPlacement>(gert::kOnDeviceHbm), i, deviceAllocator);
            (void)allocators->SetAllocator(static_cast<gert::TensorPlacement>(gert::kOnHost), i, hostAllocator);
            (void)allocators->SetAllocator(static_cast<gert::TensorPlacement>(gert::kFollowing), i, hostAllocator);
        }
        (void)streamAllocator_.insert({cacheKey, allocators});
        return allocators.get();
    }
}

void AclResourceManager::CleanAllocators(const void * const cacheKey)
{
    if (cacheKey == nullptr) {
        return;
    }
    const std::unique_lock<std::recursive_mutex> lk(streamAllocatorMutex_);
    (void)streamAllocator_.erase(cacheKey);
}

void AclResourceManager::SetCompileFlag(const int32_t flag)
{
    SetGlobalCompileFlag(flag);
}

void AclResourceManager::SetJitCompileFlag(const int32_t flag)
{
    SetGlobalJitCompileFlag(flag);
}

void AclResourceManager::SetMaxOpNum(const uint64_t maxOpNum)
{
    opModels_.SetMaxOpNum(maxOpNum);
}

aclError AclResourceManager::HandleMaxOpQueueConfig(const char_t *const configPath)
{
    ACL_LOG_INFO("start to execute HandleMaxOpQueueConfig");
    std::string maxOpNumStr;
    bool found = false;
    const aclError ret =  acl::JsonParser::GetJsonCtxByKey(configPath, maxOpNumStr, ACL_MAX_OPQUEUE_NUM, found);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Parse][Config]parse max_opqueue_num config from file[%s] failed, errorCode = %d",
                            configPath, ret);
        return ret;
    }

    if (found) {
        StringUtils::Strip(maxOpNumStr, "\"");
        // if parse failed, maxOpNum is zero
        const int64_t maxOpNum = strtol(maxOpNumStr.c_str(), nullptr, DECIMAL);
        ACL_LOG_INFO("max_opqueue_num is set [%ld].", maxOpNum);
        if (maxOpNum <= 0) {
            ACL_LOG_INNER_ERROR("[Check][MaxOpNum]max_opqueue_num [%s] is invalid from file[%s], "
                                "it should be larger than 0.", maxOpNumStr.c_str(), configPath);
            return ACL_ERROR_INVALID_PARAM;
        }
        if (static_cast<uint64_t>(maxOpNum) < DEFAULT_MAX_OPQUEUE_NUM) {
            ACL_LOG_WARN("max_opqueue_num [%lu] is less than default value DEFAULT_MAX_OPQUEUE_NUM[%lu], "
                         "it may be low performance.", static_cast<uint64_t>(maxOpNum), DEFAULT_MAX_OPQUEUE_NUM);
        }
        opModels_.SetMaxOpNum(static_cast<uint64_t>(maxOpNum));
    } else {
        ACL_LOG_INFO("no max_opqueue_num found, set default DEFAULT_MAX_OPQUEUE_NUM[%lu].",
                     DEFAULT_MAX_OPQUEUE_NUM);
    }

    ACL_LOG_INFO("HandleMaxOpQueueConfig end in HandleMaxOpQueueConfig");
    return ACL_SUCCESS;
}

bool AclResourceManager::OmFileFilterFn(const std::string &fileName)
{
    const auto pos = fileName.rfind(".om");
    if (pos == std::string::npos) {
        return false;
    }

    return pos == (fileName.size() - static_cast<size_t>(OM_FILE_SUFFIX_LEN));
}

bool AclResourceManager::IsDynamicOpModel(const OpModelDef &modelDef)
{
    if (modelDef.isDynamicModel) {
        ACL_LOG_INFO("the model is dynamic directly");
        return true;
    }
    if (modelDef.isStaticModelWithFuzzCompile == 0U) {
        // 0: ACL_OP_COMPILE_DEFAULT mode
        ACL_LOG_INFO("the model is compiled by exact compile");
        for (size_t i = 0U; i < modelDef.inputDescArr.size(); ++i) {
            if (modelDef.inputDescArr[i].IsDynamicTensor()) {
                return true;
            }
        }
        for (size_t i = 0U; i < modelDef.outputDescArr.size(); ++i) {
            if (modelDef.outputDescArr[i].IsDynamicTensor()) {
                return true;
            }
        }
        return false;
    } else if (modelDef.isStaticModelWithFuzzCompile == 1U) {
        // 1:ACL_OP_COMPILE_FUZZ mode but model is static
        ACL_LOG_INFO("the model is static model with fuzz compile");
        return false;
    } else {
        // 2:ACL_OP_COMPILE_FUZZ mode and model is dynamic
        ACL_LOG_INFO("the model is dynamic model with fuzz compile");
        return true;
    }
}

bool AclResourceManager::IsDynamicOpModel(const AclOp &aclOp)
{
    for (int32_t i = 0; i < aclOp.numInputs; ++i) {
        if (aclOp.inputDesc[i]->IsDynamicTensor()) {
            return true;
        }
    }
    for (int32_t i = 0; i < aclOp.numOutputs; ++i) {
        if (aclOp.outputDesc[i]->IsDynamicTensor()) {
            return true;
        }
    }
    return false;
}

aclError AclResourceManager::LoadAllModels(const std::string &modelDir)
{
    ACL_LOG_INFO("LoadAllModels begin. config path = %s", modelDir.c_str());
    std::vector<OpModelDef> defList;
    ACL_REQUIRES_OK(ReadModelDefs(modelDir, defList));

    if (defList.empty()) {
        ACL_LOG_WARN("No model is loaded.");
        return ACL_SUCCESS;
    }
    ACL_LOG_INFO("Found %zu model config", defList.size());
    for (auto &modelDef : defList) {
        const bool isDynamic = IsDynamicOpModel(modelDef);
        // it is static load
        bool isRegistered = false;
        ACL_REQUIRES_OK(RegisterModel(std::move(modelDef), opModels_, isDynamic, isRegistered, true));
    }
    return ACL_SUCCESS;
}

aclError AclResourceManager::LoadModelFromMem(const void *const model, const size_t modelSize, const bool isStatic)
{
    ACL_LOG_INFO("Load op model begin. modelSize = %zu", modelSize);
    ACL_REQUIRES_NOT_NULL(model);
    ACL_REQUIRES_POSITIVE(modelSize);
    auto *const aclModelData = new (std::nothrow) char_t[modelSize];
    ACL_REQUIRES_NOT_NULL(aclModelData);
    std::shared_ptr<void> modelData;
    modelData.reset(aclModelData, [](const char_t *const p) { delete[]p; });
    if (memcpy_s(aclModelData, modelSize, model, modelSize) != EOK) {
        ACL_LOG_INNER_ERROR("[Copy][Data]Copy model data failed. size = %zu", modelSize);
        return ACL_ERROR_FAILURE;
    }
    return LoadModelFromSharedMem(modelData, modelSize, nullptr, isStatic);
}

aclError AclResourceManager::LoadModelFromSharedMem(const std::shared_ptr<void> &model, const size_t modelSize,
                                                    const AclOp *const aclOp, const bool isStatic)
{
    ACL_LOG_INFO("Load inner op model begin. modelSize = %zu", modelSize);
    ACL_REQUIRES_NOT_NULL(model);
    ACL_REQUIRES_POSITIVE(modelSize);
    OpModel opModel;
    opModel.data = model;
    if (aclOp != nullptr) {
        const_cast<AclOp *>(aclOp)->opModel.profilingIndex =
            static_cast<int64_t>(gert::GlobalProfilingWrapper::GetInstance()->RegisterString(aclOp->opType));
        opModel.profilingIndex = aclOp->opModel.profilingIndex;
    }
    opModel.size = static_cast<uint32_t>(modelSize);
    opModel.name = std::to_string(reinterpret_cast<uintptr_t>(model.get()));
    ACL_LOG_INFO("opModel.name = %s", opModel.name.c_str());

    OpModelDef modelDef;
    modelDef.modelPath = opModel.name;
    modelDef.opModelId = atomicModelId++;

    const auto ret = OpModelParser::ParseOpModel(opModel, modelDef);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("parse model failed. errorCode = %d. skip it", ret);
        return ret;
    }
    const bool isDynamic = IsDynamicOpModel(modelDef);
    ACL_LOG_INFO("The name of opModel is %s", opModel.name.c_str());
    bool isRegistered = false;
    const uint64_t opId = modelDef.opModelId;
    ACL_REQUIRES_OK(RegisterModel(std::move(modelDef), opModels_, isDynamic, isRegistered, isStatic));
    if (!isRegistered) {
        ACL_REQUIRES_OK(modelCache_.Add(opId, opModel));
    } else {
        ACL_LOG_INFO("current model %s is registered", opModel.name.c_str());
    }
    return ACL_SUCCESS;
}

aclError AclResourceManager::RegisterModel(OpModelDef &&modelConfig,
                                           ModelMap &opModelDefs,
                                           const bool isDynamic,
                                           bool &isRegistered,
                                           const bool isStaticRegister)
{
    const auto numInputs = modelConfig.inputDescArr.size();
    std::vector<const aclTensorDesc *> inputDesc;
    for (size_t i = 0U; i < numInputs; ++i) {
        inputDesc.emplace_back(&modelConfig.inputDescArr[i]);
    }
    std::vector<const aclTensorDesc *> outputDesc;
    const auto numOutputs = modelConfig.outputDescArr.size();
    for (size_t i = 0U; i < numOutputs; ++i) {
        outputDesc.emplace_back(&modelConfig.outputDescArr[i]);
    }

    AclOp aclOp;
    aclOp.opType = modelConfig.opType;
    aclOp.opAttr = &modelConfig.opAttr;
    aclOp.numInputs = static_cast<int32_t>(numInputs);
    aclOp.numOutputs = static_cast<int32_t>(numOutputs);
    aclOp.inputDesc = inputDesc.data();
    aclOp.outputDesc = outputDesc.data();
    if (!isStaticRegister) {
        // only dynamic load need update timestamp, static load is ULLONG_MAX default no need aging
        modelConfig.timestamp = attr_utils::GetCurrentTimestamp();
    }

    const auto modelDefPtr = std::make_shared<OpModelDef>(std::move(modelConfig));
    ACL_REQUIRES_NOT_NULL(modelDefPtr);
    std::shared_ptr<OpModelDef> agingModelDef = nullptr;
    if (isDynamic) {
        ACL_LOG_INFO("The model is dynamic shape");
        uint64_t seq = 0U;
        ShapeRangeUtils::GetInstance().SetTensorShapeInfo(aclOp, seq);
        modelDefPtr->seq = seq;
        ACL_REQUIRES_OK(opModelDefs.InsertDynamic(aclOp, modelDefPtr, agingModelDef, isRegistered));
    } else {
        ACL_LOG_INFO("Insert modeldef to hash map");
        ACL_REQUIRES_OK(opModelDefs.Insert(aclOp, modelDefPtr, agingModelDef, isRegistered));
    }

    if (agingModelDef != nullptr) {
        ACL_REQUIRES_OK(modelCache_.Delete(*agingModelDef, isDynamic));
    }
    const bool castHasTruncate = ((!GetIfCastHasTruncateAttr()) && (aclOp.opType == "Cast")) &&
                                 ((aclOp.opAttr != nullptr) && (aclOp.opAttr->HasAttr("truncate")));
    if (castHasTruncate) {
        ACL_LOG_INFO("Find cast op whose attr contains truncate");
        SetCastHasTruncateAttr(true);
    }
    ACL_LOG_INFO("Register model. OpModelDef = %s", modelDefPtr->DebugString().c_str());
    return ACL_SUCCESS;
}

aclError AclResourceManager::SetTensorConst(aclTensorDesc *const desc, const aclDataBuffer *const dataBuffer)
{
    ACL_REQUIRES_NOT_NULL(desc);
    ACL_REQUIRES_NOT_NULL(dataBuffer);
    const size_t length = dataBuffer->length;
    void *const hostMem = dataBuffer->data;
    if (length == 0U) {
        desc->isConst = true;
        return ACL_SUCCESS;
    }

    desc->isConst = true;
    desc->constDataBuf.reset(reinterpret_cast<char_t *>(hostMem), [](const char_t *const) {});
    desc->constDataLen = length;
    return ACL_SUCCESS;
}

aclError AclResourceManager::SetHostMemToConst(const AclOp &aclopHostMemToConst, bool &isExistConst) const
{
    for (int32_t i = 0; i < aclopHostMemToConst.numInputs; ++i) {
        if ((aclopHostMemToConst.inputDesc[i]->memtype == ACL_MEMTYPE_HOST) &&
            (!aclopHostMemToConst.inputDesc[i]->isConst)) {
            isExistConst = true;
            // HostMem needs to be constructed as constTensor
            ACL_REQUIRES_OK(SetTensorConst(const_cast<aclTensorDesc *>(aclopHostMemToConst.inputDesc[i]),
                                           aclopHostMemToConst.inputs[i]));
        }
    }
    for (int32_t i = 0; i < aclopHostMemToConst.numOutputs; ++i) {
        if ((aclopHostMemToConst.outputDesc[i]->memtype == ACL_MEMTYPE_HOST) &&
            (!aclopHostMemToConst.outputDesc[i]->isConst)) {
            isExistConst = true;
            // HostMem needs to be constructed as constTensor
            ACL_REQUIRES_OK(SetTensorConst(const_cast<aclTensorDesc *>(aclopHostMemToConst.outputDesc[i]),
                                           aclopHostMemToConst.outputs[i]));
        }
    }
    return ACL_SUCCESS;
}

aclError AclResourceManager::GetOpModel(AclOp &aclOp)
{
    bool isDynamic = false;
    aclError ret = MatchOpModel(aclOp, aclOp.opModel, isDynamic);
    if (ret == ACL_SUCCESS) {
        aclOp.isMatched = true;
        aclOp.isDynamic = isDynamic;
        ACL_LOG_INFO("operator %s is already registered, isDynamicModel = %d", aclOp.opType.c_str(),
                     static_cast<int32_t>(isDynamic));
        return ret;
    }
    constexpr uint32_t awdTimeout = 300U; // u300 seconds, 5 min
    const AwdHandle hdl = AwdCreateThreadWatchdog(static_cast<uint32_t>(ACL_MODE_ID), awdTimeout, nullptr);
    (void)AwdStartThreadWatchdog(hdl);
    ret = BuildOpModel(aclOp);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Build][Op]Fail to build op model");
        (void)AwdStopThreadWatchdog(hdl);
        AwdDestroyThreadWatchdog(hdl);
        return ret;
    }
    (void)AwdStopThreadWatchdog(hdl);
    AwdDestroyThreadWatchdog(hdl);
    return ACL_SUCCESS;
}

aclError AclResourceManager::MatchStaticOpModel(const AclOp &aclOp, OpModel &opModel,
                                                bool &isDynamic, bool &isNeedMatchDymaic)
{
    RT2_PROFILING_SCOPE(gert::profiling::kUnknownName, gert::profiling::kAclMatchStaticOpModel);
    std::shared_ptr<OpModelDef> modelDef;
    aclError ret = ACL_SUCCESS;
    isNeedMatchDymaic = false;
    bool isExistConst = false;
    ACL_REQUIRES_OK(SetHostMemToConst(aclOp, isExistConst));
    if (isExistConst) {
        ret = opModels_.Get(aclOp, modelDef, true);
        aclOp.RecoverConst();
        if (ret == ACL_SUCCESS) {
            isDynamic = false;
            ACL_LOG_INFO("Match static model with const memory successfully. opType = %s, opModel = %s",
                         aclOp.opType.c_str(), modelDef->modelPath.c_str());
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        } else {
            ret = opModels_.Get(aclOp, modelDef, true);
            if (ret == ACL_SUCCESS) {
                isDynamic = false;
                ACL_LOG_INFO("Match static model successfully. opType = %s, opModel = %s",
                             aclOp.opType.c_str(), modelDef->modelPath.c_str());
                ret = modelCache_.GetOpModel(*modelDef, opModel);
                return ret;
            }
        }
    } else {
        ret = opModels_.Get(aclOp, modelDef, true);
        if (ret == ACL_SUCCESS) {
            isDynamic = false;
            ACL_LOG_INFO("Match static model successfully. opType = %s, opModel = %s",
                         aclOp.opType.c_str(), modelDef->modelPath.c_str());
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        }
    }
    isNeedMatchDymaic = true;

    return ret;
}

aclError AclResourceManager::MatchDynamicOpModel(const AclOp &aclOp, OpModel &opModel, bool &isDynamic)
{
    RT2_PROFILING_SCOPE(gert::profiling::kUnknownName, gert::profiling::kAclMatchDynamicOpModel);
    std::shared_ptr<OpModelDef> modelDef;
    aclError ret = ACL_SUCCESS;
    // check the input shape must be static when executing
    if (!aclOp.isCompile) {
        ACL_CHECK_WITH_INNER_MESSAGE_AND_RETURN(!IsDynamicOpModel(aclOp), ACL_ERROR_INVALID_PARAM,
                                                "[Check][Op]TensorDesc must be static when executing, "
                                                "tensorDesc is %s", aclOp.DebugString().c_str());
    }
    // Need to refresh the shape(-1/-2) and go to map to find model when executing
    std::vector<OpRangeInfo> *opRangeInfoVec;
    const MmRDLockGuard lk(&ShapeRangeUtils::GetInstance().shapeInfoMutex_);
    opRangeInfoVec = ShapeRangeUtils::GetInstance().GetTensorShapeInfo(aclOp);
    if (opRangeInfoVec == nullptr) {
        if (!aclOp.isCompile) {
            ACL_LOG_INFO("No match dynamic model, opName = %s .", aclOp.opType.c_str());
        }
        return ACL_ERROR_OP_NOT_FOUND;
    }
    for (size_t i = 0U; i < opRangeInfoVec->size(); ++i) {
        if (!ShapeRangeUtils::GetInstance().CheckShapeRange(aclOp, (*opRangeInfoVec)[i])) {
            continue;
        }
        ret = opModels_.GetDynamic(aclOp, modelDef, (*opRangeInfoVec)[i].seq, true);
        if (ret == ACL_SUCCESS) {
            ACL_LOG_INFO("Match dynamic model success. opType = %s, opModel = %s",
                         aclOp.opType.c_str(), modelDef->modelPath.c_str());
            isDynamic = true;
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        } else {
            // aicpu is -2 but need to set const
            bool isExistConst = false;
            ACL_REQUIRES_OK(SetHostMemToConst(aclOp, isExistConst));
            if (!isExistConst) {
                continue;
            }
            ret = opModels_.GetDynamic(aclOp, modelDef, (*opRangeInfoVec)[i].seq, true);
            aclOp.RecoverConst();
            if (ret == ACL_SUCCESS) {
                ACL_LOG_INFO("Match dynamic model success. opType = %s, opModel = %s",
                             aclOp.opType.c_str(), modelDef->modelPath.c_str());
                isDynamic = true;
                ret = modelCache_.GetOpModel(*modelDef, opModel);
                return ret;
            }
        }
    }
    return ACL_ERROR_OP_NOT_FOUND;
}

aclError AclResourceManager::MatchOpModel(const AclOp &aclOp, OpModel &opModel, bool &isDynamic)
{
    aclError ret;
    RT2_PROFILING_SCOPE(gert::profiling::kUnknownName, gert::profiling::kAclMatchOpModel);
    aclOp.BackupConst();
    if ((GetGlobalJitCompileFlag() == 0) || (GetGlobalCompileFlag() == 1)) {
        ret = MatchDynamicOpModel(aclOp, opModel, isDynamic);
        if (ret != ACL_SUCCESS) {
            bool isNeedMatchDymaic = false;
            ret = MatchStaticOpModel(aclOp, opModel, isDynamic, isNeedMatchDymaic);
        }
    } else {
        bool isNeedMatchDymaic = false;
        ret = MatchStaticOpModel(aclOp, opModel, isDynamic, isNeedMatchDymaic);
        if (!isNeedMatchDymaic) {
            return ret;
        }
        ret = MatchDynamicOpModel(aclOp, opModel, isDynamic);
    }

    if (UNLIKELY((ret != ACL_SUCCESS) && (!aclOp.isCompile))) {
        ACL_LOG_INNER_ERROR("[Match][OpModel]MatchOpModel opName = %s fail from static map or dynamic map. "
                            "Please make sure the op executed and the op compiled is matched, "
                            "you can check the op type, op inputs number, outputs number, "
                            "input format, origin format, datatype, memtype, attr, dim range, "
                            "and so on.", aclOp.opType.c_str());
    }
    return ret;
}

aclError AclResourceManager::UpdateRT2Executor(const uint64_t id,
                                               const std::shared_ptr<gert::StreamExecutor> &executor)
{
    return modelCache_.UpdateCachedExecutor(id, executor);
}

std::shared_ptr<std::mutex> AclResourceManager::GetCacheMutex(const uint64_t id)
{
    return modelCache_.GetCacheMutex(id);
}

std::shared_ptr<gert::StreamExecutor> AclResourceManager::GetRT2Executor(const uint64_t id)
{
    return modelCache_.GetRT2Executor(id);
}

aclError AclResourceManager::UnloadModelData(const uint64_t id)
{
    return modelCache_.UnloadCachedModelData(id);
}

aclError AclResourceManager::CleanRT2Executor(rtStream_t stream)
{
    return modelCache_.CleanCachedExecutor(stream);
}

aclError AclResourceManager::ReadModelDefs(const std::string &configPath,
                                           std::vector<OpModelDef> &configList)
{
    std::vector<std::string> modelFilePaths;
    ACL_REQUIRES_OK(file_utils::ListFiles(configPath, &OmFileFilterFn, modelFilePaths, OM_DIR_MAX_DEPTH));

    for (auto &path : modelFilePaths) {
        OpModel opModel;
        ACL_REQUIRES_OK(ReadOpModelFromFile(path, opModel));
        OpModelDef modelDef;
        modelDef.modelPath = path;
        modelDef.opModelId = atomicModelId++;
        const auto ret = OpModelParser::ParseOpModel(opModel, modelDef);
        if (ret != ACL_SUCCESS) {
            ACL_LOG_WARN("can not parse model, errorCode = %d, model = %s, skip it", ret, modelDef.modelPath.c_str());
            continue;
        }

        configList.emplace_back(std::move(modelDef));
        ACL_LOG_INFO("Add model: %s", configList.back().modelPath.c_str());
        ACL_REQUIRES_OK(modelCache_.Add(configList.back().opModelId, opModel));
    }

    return ACL_SUCCESS;
}

aclError AclResourceManager::BuildOpModel(const AclOp &aclOp)
{
    RT2_PROFILING_SCOPE(gert::profiling::kUnknownName, gert::profiling::kAclBuildOpModel);
    std::shared_ptr<void> modelData;
    size_t modelSize;
    auto ret = OpCompileService::GetInstance().CompileOp(aclOp, modelData, modelSize);
    if (ret != ACL_SUCCESS) {
        return ret;
    }
    // it is dynamic load
    ret = LoadModelFromSharedMem(modelData, modelSize, &aclOp, false);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Load][Model]load model from mem failed.");
        return ret;
    }

    ACL_LOG_INFO("Compile operator %s success", aclOp.opType.c_str());
    return ACL_SUCCESS;
}

aclError AclResourceManager::CreateExternalAllocator(const aclrtStream stream, void *allocatorDesc)
{
    auto geAllocatorDesc = static_cast<ge::AllocatorDesc *>(allocatorDesc);
    if (geAllocatorDesc->obj == nullptr) {
        ACL_LOG_INNER_ERROR("Should call aclrtAllocatorSetObjToDesc first");
        return ACL_ERROR_INVALID_PARAM;
    }
    if (geAllocatorDesc->alloc_func == nullptr) {
        ACL_LOG_INNER_ERROR("Should call aclrtAllocatorSetAllocFuncToDesc first");
        return ACL_ERROR_INVALID_PARAM;
    }
    if (geAllocatorDesc->free_func == nullptr) {
        ACL_LOG_INNER_ERROR("Should call aclrtAllocatorSetFreeFuncToDesc first");
        return ACL_ERROR_INVALID_PARAM;
    }
    if (geAllocatorDesc->get_addr_from_block_func == nullptr) {
        ACL_LOG_INNER_ERROR("Should call aclrtAllocatorSetGetAddrFromBlockFuncToDesc first");
        return ACL_ERROR_INVALID_PARAM;
    }
    std::shared_ptr<ge::Allocator> deviceAllocator(gert::CreateExternalAllocator(geAllocatorDesc).release());
    ACL_REQUIRES_NOT_NULL_WITH_INNER_REPORT(deviceAllocator);
    const std::unique_lock<std::mutex> lk(streamExternalAllocatorMutex_);
    streamExternalAllocator_[stream] = deviceAllocator;
    ACL_LOG_INFO("Create external allocator success, stream %p, allocatorDesc %p", stream, allocatorDesc);
    return ACL_SUCCESS;
}

aclError AclResourceManager::DeleteExternalAllocator(const aclrtStream stream)
{
    {
        const std::unique_lock<std::mutex> lk(streamExternalAllocatorMutex_);
        (void)streamExternalAllocator_.erase(stream);
    }
    CleanAllocators(stream);
    return ACL_SUCCESS;
}

std::shared_ptr<ge::Allocator> AclResourceManager::GetDeviceAllocator(const aclrtStream stream, bool createDefaultAllocator)
{
    // the external allocator is preferred
    const std::unique_lock<std::mutex> lk(streamExternalAllocatorMutex_);
    const auto iter = streamExternalAllocator_.find(stream);
    if (iter != streamExternalAllocator_.end()) {
        return iter->second;
    }

    // For model inference (dynamic shape), the default Allocator is not created to improve performance.
    if (!createDefaultAllocator) {
        ACL_LOG_INFO("The default deviceAllocator is not created.");
        return nullptr;
    }

    std::shared_ptr<ge::Allocator> deviceAllocator(
        gert::AllocatorFactory::Create(gert::kOnDeviceHbm).release());
    if (deviceAllocator == nullptr) {
        ACL_LOG_ERROR("deviceAllocator is nullptr");
    }
    return deviceAllocator;
}

AclResourceManager::~AclResourceManager()
{
    // note: op_model的executor的释放依赖allocator，所以先释放executor，再释放allocator
    modelCache_.CleanCachedModels();
    streamAllocator_.clear();
    streamExternalAllocator_.clear();
}

void AclResourceManager::HandleReleaseSourceByDevice(uint32_t devId, bool isReset) const
{
    ACL_LOG_INFO("start to execute HandleReleaseSourceByDevice, devId:%u.", devId);
    if (!isReset) {
        ACL_LOG_INFO("it's set device calback, currently do nothing.");
        return;
    }
    (void)ge::GeExecutor::ReleaseResource();
    ACL_LOG_INFO("successfully execute HandleReleaseSourceByDevice, devId:%u.", devId);
}

void AclResourceManager::HandleReleaseSourceByStream(aclrtStream stream, bool isCreate)
{
    ACL_LOG_INFO("start to execute HandleReleaseSourceByStream.");
    if (isCreate) {
        ACL_LOG_INFO("it's create stream calback, currently do nothing.");
        return;
    }
    (void)ge::GeExecutor::ReleaseSingleOpResource(stream);
    (void)acl::Executors::RemoveExecutor(stream);
    (void)CleanRT2Executor(stream);
    (void)CleanAllocators(stream);
    ACL_LOG_INFO("successfully execute HandleReleaseSourceByStream.");
}

aclError AclResourceManager::CreateRT2Executor(std::shared_ptr<gert::StreamExecutor> &streamExecutor, rtStream_t stream,
                                               const gert::ModelExecuteArg &arg, gert::ModelV2Executor *&executor)
{
    return modelCache_.CreateCachedExecutor(streamExecutor, stream, arg, executor);
}

}

