/**
* @file op_model_manager.cpp
*
* Copyright (C) Huawei Technologies Co., Ltd. 2019-2020. All Rights Reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/

#include "op_model_manager.h"

#include "framework/common/profiling_definitions.h"
#include "types/tensor_desc_internal.h"
#include "common/log_inner.h"
#include "common/common_inner.h"
#include "types/op_attr.h"
#include "json_parser.h"
#include "op_model_parser.h"
#include "shape_range_utils.h"
#include "utils/attr_utils.h"
#include "utils/file_utils.h"
#include "compile/op_compile_service.h"

namespace acl {
namespace {
constexpr int32_t OM_FILE_SUFFIX_LEN = 3;
constexpr int32_t OM_DIR_MAX_DEPTH = 3;
constexpr int32_t DECIMAL = 10;
const std::string ACL_MAX_OPQUEUE_NUM = "max_opqueue_num";
}

void OpModelManager::SetCompileFlag(const int32_t flag)
{
    SetGlobalCompileFlag(flag);
}

aclError OpModelManager::HandleMaxOpQueueConfig(const char_t *const configPath)
{
    ACL_LOG_INFO("start to execute HandleMaxOpQueueConfig");
    nlohmann::json js;
    const aclError ret =  acl::JsonParser::ParseJsonFromFile(configPath, js, nullptr, nullptr);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Parse][Config]parse max_opqueue_num config from file[%s] failed, errorCode = %d",
            configPath, ret);
        return ret;
    }
    try {
        if (js.find(ACL_MAX_OPQUEUE_NUM) != js.end()) {
            const std::string maxOpNumStr = js.at(ACL_MAX_OPQUEUE_NUM).get<std::string>();
            const int64_t maxOpNum = strtol(maxOpNumStr.c_str(), nullptr, DECIMAL);
            ACL_LOG_INFO("max_opqueue_num is set [%ld].", maxOpNum);
            if (maxOpNum <= 0) {
                ACL_LOG_INNER_ERROR("[Check][MaxOpNum]max_opqueue_num [%s] is invalid from file[%s], "
                    "it should be larger than 0.", maxOpNumStr.c_str(), configPath);
                return ACL_ERROR_INVALID_PARAM;
            }
            if (static_cast<uint64_t>(maxOpNum) < DEFAULT_MAX_OPQUEUE_NUM) {
                ACL_LOG_WARN("max_opqueue_num [%lu] is less than default value DEFAULT_MAX_OPQUEUE_NUM[%lu], "
                             "it may be low performance.", static_cast<uint64_t>(maxOpNum), DEFAULT_MAX_OPQUEUE_NUM);
            }
            opModels_.SetMaxOpNum(static_cast<uint64_t>(maxOpNum));
        } else {
            ACL_LOG_INFO("no max_opqueue_num found, set default DEFAULT_MAX_OPQUEUE_NUM[%lu].",
                DEFAULT_MAX_OPQUEUE_NUM);
        }
    } catch (const nlohmann::json::exception &e) {
        ACL_LOG_INNER_ERROR("[Parse][Json]parse json for max_opqueue_num config failed, exception:%s.", e.what());
        return ACL_ERROR_INVALID_MAX_OPQUEUE_NUM_CONFIG;
    }
    ACL_LOG_INFO("HandleMaxOpQueueConfig end in HandleMaxOpQueueConfig");
    return ACL_SUCCESS;
}

bool OpModelManager::OmFileFilterFn(const std::string &fileName)
{
    const auto pos = fileName.rfind(".om");
    if (pos == std::string::npos) {
        return false;
    }

    return pos == (fileName.size() - static_cast<size_t>(OM_FILE_SUFFIX_LEN));
}

bool OpModelManager::IsDynamicOpModel(const OpModelDef &modelDef)
{
    if (modelDef.isStaticModelWithFuzzCompile == 0U) {
        // 0: ACL_OP_COMPILE_DEFAULT mode
        ACL_LOG_INFO("the model is compiled by exact compile");
        for (size_t i = 0U; i < modelDef.inputDescArr.size(); ++i) {
            if (modelDef.inputDescArr[i].IsDynamicTensor()) {
                return true;
            }
        }
        for (size_t i = 0U; i < modelDef.outputDescArr.size(); ++i) {
            if (modelDef.outputDescArr[i].IsDynamicTensor()) {
                return true;
            }
        }
        return false;
    } else if (modelDef.isStaticModelWithFuzzCompile == 1U) {
        // 1:ACL_OP_COMPILE_FUZZ mode but model is static
        ACL_LOG_INFO("the model is static model with fuzz compile");
        return false;
    } else {
        // 2:ACL_OP_COMPILE_FUZZ mode and model is dynamic
        ACL_LOG_INFO("the model is dynamic model with fuzz compile");
        return true;
    }
}

bool OpModelManager::IsDynamicOpModel(const AclOp &aclOp)
{
    for (int32_t i = 0; i < aclOp.numInputs; ++i) {
        if (aclOp.inputDesc[i]->IsDynamicTensor()) {
            return true;
        }
    }
    for (int32_t i = 0; i < aclOp.numOutputs; ++i) {
        if (aclOp.outputDesc[i]->IsDynamicTensor()) {
            return true;
        }
    }
    return false;
}

aclError OpModelManager::LoadAllModels(const std::string &modelDir)
{
    ACL_LOG_INFO("LoadAllModels begin. config path = %s", modelDir.c_str());
    std::vector<OpModelDef> defList;
    ACL_REQUIRES_OK(ReadModelDefs(modelDir, defList));

    if (defList.empty()) {
        ACL_LOG_WARN("No model is loaded.");
        return ACL_SUCCESS;
    }

    ACL_LOG_INFO("Found %zu model config", defList.size());
    for (auto &modelDef : defList) {
        const bool isDynamic = IsDynamicOpModel(modelDef);
        // it is static load
        ACL_REQUIRES_OK(RegisterModel(std::move(modelDef), opModels_, isDynamic, true));
    }
    return ACL_SUCCESS;
}

aclError OpModelManager::LoadModelFromMem(const void *const model, const size_t modelSize, const bool isStatic)
{
    ACL_LOG_INFO("Load op model begin. modelSize = %zu", modelSize);
    ACL_REQUIRES_NOT_NULL(model);
    ACL_REQUIRES_POSITIVE(modelSize);
    auto *const aclModelData = new (std::nothrow) char_t[modelSize];
    ACL_REQUIRES_NOT_NULL(aclModelData);
    std::shared_ptr<void> modelData;
    modelData.reset(aclModelData, [](const char_t *const p) { delete[]p; });
    if (memcpy_s(aclModelData, modelSize, model, modelSize) != EOK) {
        ACL_LOG_INNER_ERROR("[Copy][Data]Copy model data failed. size = %zu", modelSize);
        return ACL_ERROR_FAILURE;
    }
    return LoadModelFromSharedMem(modelData, modelSize, nullptr, isStatic);
}

aclError OpModelManager::LoadModelFromSharedMem(const std::shared_ptr<void> &model, const size_t modelSize,
    const AclOp *const aclOp, const bool isStatic)
{
    ACL_LOG_INFO("Load inner op model begin. modelSize = %zu", modelSize);
    ACL_REQUIRES_NOT_NULL(model);
    ACL_REQUIRES_POSITIVE(modelSize);
    OpModel opModel;
    opModel.data = model;
    if (aclOp != nullptr) {
        const_cast<AclOp *>(aclOp)->opModel.profilingIndex =
            ge::profiling::ProfilingContext::GetInstance().RegisterString(aclOp->opType);
        opModel.profilingIndex = aclOp->opModel.profilingIndex;
    }
    opModel.size = static_cast<uint32_t>(modelSize);
    opModel.name = std::to_string(reinterpret_cast<uintptr_t>(model.get()));
    ACL_LOG_INFO("opModel.name = %s", opModel.name.c_str());

    OpModelDef modelDef;
    modelDef.modelPath = opModel.name;

    const auto ret = OpModelParser::ParseOpModel(opModel, modelDef);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("parse model failed. errorCode = %d. skip it", ret);
        return ret;
    }
    const bool isDynamic = IsDynamicOpModel(modelDef);
    ACL_LOG_INFO("The model is static shape, the name of opModel is %s", opModel.name.c_str());
    ACL_REQUIRES_OK(modelCache_.Add(modelDef, opModel));
    ACL_REQUIRES_OK(RegisterModel(std::move(modelDef), opModels_, isDynamic, isStatic));

    return ACL_SUCCESS;
}

aclError OpModelManager::RegisterModel(OpModelDef &&modelConfig,
                                       ModelMap &opModelDefs,
                                       const bool isDynamic,
                                       const bool isStaticRegister)
{
    const auto numInputs = modelConfig.inputDescArr.size();
    std::vector<const aclTensorDesc *> input_desc;
    for (size_t i = 0U; i < numInputs; ++i) {
        input_desc.emplace_back(&modelConfig.inputDescArr[i]);
    }
    std::vector<const aclTensorDesc *> output_desc;
    const auto numOutputs = modelConfig.outputDescArr.size();
    for (size_t i = 0U; i < numOutputs; ++i) {
        output_desc.emplace_back(&modelConfig.outputDescArr[i]);
    }

    AclOp aclOp;
    aclOp.opType = modelConfig.opType;
    aclOp.opAttr = &modelConfig.opAttr;
    aclOp.numInputs = static_cast<int32_t>(numInputs);
    aclOp.numOutputs = static_cast<int32_t>(numOutputs);
    aclOp.inputDesc = input_desc.data();
    aclOp.outputDesc = output_desc.data();
    if (!isStaticRegister) {
        // only dynamic load need update timestamp, static load is ULLONG_MAX default no need aging
        modelConfig.timestamp = attr_utils::GetCurrentTimestamp();
    }

    const auto modelDefPtr = std::shared_ptr<OpModelDef>(new (std::nothrow)OpModelDef(std::move(modelConfig)));
    ACL_REQUIRES_NOT_NULL(modelDefPtr);
    std::shared_ptr<OpModelDef> agingModelDef = nullptr;
    if (isDynamic) {
        ACL_LOG_INFO("The model is dynamic shape");
        uint64_t seq = 0U;
        ShapeRangeUtils::GetInstance().SetTensorShapeInfo(aclOp, seq);
        modelDefPtr->seq = seq;
        ACL_REQUIRES_OK(opModelDefs.InsertDynamic(aclOp, modelDefPtr, agingModelDef));
    } else {
        ACL_LOG_INFO("Insert modeldef to hash map");
        ACL_REQUIRES_OK(opModelDefs.Insert(aclOp, modelDefPtr, agingModelDef));
    }

    if (agingModelDef != nullptr) {
        ACL_REQUIRES_OK(modelCache_.Delete(*agingModelDef, isDynamic));
    }
    const bool castHasTruncate = ((!GetIfCastHasTruncateAttr()) && (aclOp.opType == "Cast")) &&
                           ((aclOp.opAttr != nullptr) && (aclOp.opAttr->HasAttr("truncate")));
    if (castHasTruncate) {
        ACL_LOG_INFO("Find cast op whose attr contains truncate");
        SetCastHasTruncateAttr(true);
    }
    ACL_LOG_INFO("Register model. OpModelDef = %s", modelDefPtr->DebugString().c_str());
    return ACL_SUCCESS;
}

aclError OpModelManager::SetTensorConst(aclTensorDesc *const desc, const aclDataBuffer *const dataBuffer)
{
    ACL_REQUIRES_NOT_NULL(desc);
    ACL_REQUIRES_NOT_NULL(dataBuffer);
    const size_t length = dataBuffer->length;
    void *const hostMem = dataBuffer->data;
    if (length == 0U) {
        desc->isConst = true;
        return ACL_SUCCESS;
    }

    desc->isConst = true;

    desc->constDataBuf.reset(reinterpret_cast<char_t *>(hostMem), [](const char_t *const){});
    desc->constDataLen = length;
    return ACL_SUCCESS;
}

aclError OpModelManager::SetHostMemToConst(const AclOp &aclopHostMemToConst, bool &isExistConst) const
{
    for (int32_t i = 0; i < aclopHostMemToConst.numInputs; ++i) {
        if ((aclopHostMemToConst.inputDesc[i]->memtype == ACL_MEMTYPE_HOST) &&
            (!aclopHostMemToConst.inputDesc[i]->isConst)) {
            isExistConst = true;
            // HostMem needs to be constructed as constTensor
            ACL_REQUIRES_OK(SetTensorConst(const_cast<aclTensorDesc *>(aclopHostMemToConst.inputDesc[i]),
                aclopHostMemToConst.inputs[i]));
        }
    }
    for (int32_t i = 0; i < aclopHostMemToConst.numOutputs; ++i) {
        if ((aclopHostMemToConst.outputDesc[i]->memtype == ACL_MEMTYPE_HOST) &&
            (!aclopHostMemToConst.outputDesc[i]->isConst)) {
            isExistConst = true;
            // HostMem needs to be constructed as constTensor
            ACL_REQUIRES_OK(SetTensorConst(const_cast<aclTensorDesc *>(aclopHostMemToConst.outputDesc[i]),
                aclopHostMemToConst.outputs[i]));
        }
    }
    return ACL_SUCCESS;
}

aclError OpModelManager::GetOpModel(AclOp &aclOp)
{
    bool isDynamic = false;
    aclError ret = MatchOpModel(aclOp, aclOp.opModel, isDynamic);
    if (ret == ACL_SUCCESS) {
        aclOp.isMatched = true;
        aclOp.isDynamic = isDynamic;
        ACL_LOG_INFO("operator %s is already registered, isDynamicModel = %d", aclOp.opType.c_str(),
                     static_cast<int32_t>(isDynamic));
        return ret;
    }

    ret = BuildOpModel(aclOp);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Build][Op]Fail to build op model");
        return ret;
    }

    return ACL_SUCCESS;
}

aclError OpModelManager::MatchStaticOpModel(const AclOp &aclOp, OpModel &opModel,
    bool &isDynamic, bool &isNeedMatchDymaic)
{
    PROFILING_SCOPE(-1, ge::profiling::kAclMatchStaticOpModel);
    std::shared_ptr<OpModelDef> modelDef;
    aclError ret = ACL_SUCCESS;
    isNeedMatchDymaic = false;
    bool isExistConst = false;
    ACL_REQUIRES_OK(SetHostMemToConst(aclOp, isExistConst));
    if (isExistConst) {
        ret = opModels_.Get(aclOp, modelDef, true);
        aclOp.RecoverConst();
        if (ret == ACL_SUCCESS) {
            isDynamic = false;
            ACL_LOG_INFO("Match static model with const memory successfully. opType = %s, opModel = %s",
                aclOp.opType.c_str(), modelDef->modelPath.c_str());
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        } else {
            ret = opModels_.Get(aclOp, modelDef, true);
            if (ret == ACL_SUCCESS) {
                isDynamic = false;
                ACL_LOG_INFO("Match static model successfully. opType = %s, opModel = %s",
                    aclOp.opType.c_str(), modelDef->modelPath.c_str());
                ret = modelCache_.GetOpModel(*modelDef, opModel);
                return ret;
            }
        }
    } else {
        ret = opModels_.Get(aclOp, modelDef, true);
        if (ret == ACL_SUCCESS) {
            isDynamic = false;
            ACL_LOG_INFO("Match static model successfully. opType = %s, opModel = %s",
                aclOp.opType.c_str(), modelDef->modelPath.c_str());
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        }
    }
    isNeedMatchDymaic = true;

    return ret;
}

aclError OpModelManager::MatchDynamicOpModel(const AclOp &aclOp, OpModel &opModel, bool &isDynamic)
{
    PROFILING_SCOPE(-1, ge::profiling::kAclMatchDynamicOpModel);
    std::shared_ptr<OpModelDef> modelDef;
    aclError ret = ACL_SUCCESS;
    // check the input shape must be static when executing
    if (!aclOp.isCompile) {
        if (IsDynamicOpModel(aclOp)) {
            ACL_LOG_INNER_ERROR("[Check][Op]TensorDesc must be static when executing, "
                "tensorDesc is %s", aclOp.DebugString().c_str());
            return ACL_ERROR_INVALID_PARAM;
        }
    }
    // Need to refresh the shape(-1/-2) and go to map to find model when executing
    std::vector<OpRangeInfo> *opRangeInfoVec;
    const MmRDLockGuard lk(&ShapeRangeUtils::GetInstance().shapeInfoMutex_);
    opRangeInfoVec = ShapeRangeUtils::GetInstance().GetTensorShapeInfo(aclOp);
    if (opRangeInfoVec == nullptr) {
        if (!aclOp.isCompile) {
            ACL_LOG_INNER_ERROR("[Match][Model]failed to match model, opName = %s has not been compiled or"
                " loaded, Please make sure the op executed and the op compiled is matched, you can check"
                " the op type, opinputs number, outputs number, input format, origin format, datatype, memtype,"
                " attr, dim range and so on.", aclOp.opType.c_str());
        }
        return ACL_ERROR_OP_NOT_FOUND;
    }
    for (size_t i = 0U; i < opRangeInfoVec->size(); ++i) {
        if (!ShapeRangeUtils::GetInstance().CheckShapeRange(aclOp, (*opRangeInfoVec)[i])) {
            continue;
        }
        ret = opModels_.GetDynamic(aclOp, modelDef, (*opRangeInfoVec)[i].seq, true);
        if (ret == ACL_SUCCESS) {
            ACL_LOG_INFO("Match dynamic model success. opType = %s, opModel = %s",
                aclOp.opType.c_str(), modelDef->modelPath.c_str());
            isDynamic = true;
            ret = modelCache_.GetOpModel(*modelDef, opModel);
            return ret;
        } else {
            // aicpu is -2 but need to set const
            bool isExistConst = false;
            ACL_REQUIRES_OK(SetHostMemToConst(aclOp, isExistConst));
            if (!isExistConst) {
                continue;
            }
            ret = opModels_.GetDynamic(aclOp, modelDef, (*opRangeInfoVec)[i].seq, true);
            aclOp.RecoverConst();
            if (ret == ACL_SUCCESS) {
                ACL_LOG_INFO("Match dynamic model success. opType = %s, opModel = %s",
                    aclOp.opType.c_str(), modelDef->modelPath.c_str());
                isDynamic = true;
                ret = modelCache_.GetOpModel(*modelDef, opModel);
                return ret;
            }
        }
    }
    ACL_CHECK_WITH_MESSAGE_AND_NO_RETURN(aclOp.isCompile, "[Match][OpModel]MatchOpModel fail from static"
        " map or dynamic map. Please make sure the op executed and the op compiled is matched, you can check the op"
        " type, op inputs number, outputs number, input format, origin format, datatype, memtype, attr, dim range, "
        " and so on.");
    return ACL_ERROR_OP_NOT_FOUND;
}

aclError OpModelManager::MatchOpModel(const AclOp &aclOp, OpModel &opModel, bool &isDynamic)
{
    PROFILING_SCOPE(-1, ge::profiling::kAclMatchOpModel);
    bool isNeedMatchDymaic = false;
    aclOp.BackupConst();
    aclError ret = MatchStaticOpModel(aclOp, opModel, isDynamic, isNeedMatchDymaic);
    if (!isNeedMatchDymaic) {
        return ret;
    }
    ret = MatchDynamicOpModel(aclOp, opModel, isDynamic);
    return ret;
}

aclError OpModelManager::ReadModelDefs(const std::string &configPath,
    std::vector<OpModelDef> &configList)
{
    std::vector<std::string> modelFilePaths;
    ACL_REQUIRES_OK(file_utils::ListFiles(configPath, &OmFileFilterFn, modelFilePaths, OM_DIR_MAX_DEPTH));

    for (auto &path : modelFilePaths) {
        OpModel opModel;
        ACL_REQUIRES_OK(ReadOpModelFromFile(path, opModel));
        OpModelDef modelDef;
        modelDef.modelPath = path;
        const auto ret = OpModelParser::ParseOpModel(opModel, modelDef);
        if (ret != ACL_SUCCESS) {
            ACL_LOG_WARN("can not parse model, errorCode = %d, model = %s, skip it", ret, modelDef.modelPath.c_str());
            continue;
        }

        configList.emplace_back(std::move(modelDef));
        ACL_LOG_INFO("Add model: %s", configList.back().modelPath.c_str());
        ACL_REQUIRES_OK(modelCache_.Add(configList.back(), opModel));
    }

    return ACL_SUCCESS;
}

aclError OpModelManager::BuildOpModel(const AclOp &aclOp)
{
    PROFILING_SCOPE(-1, ge::profiling::kAclBuildOpModel);
    std::shared_ptr<void> modelData;
    size_t modelSize;
    auto ret = OpCompileService::GetInstance().CompileOp(aclOp, modelData, modelSize);
    if (ret != ACL_SUCCESS) {
        return ret;
    }
    // it is dynamic load
    ret = LoadModelFromSharedMem(modelData, modelSize, &aclOp, false);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("[Load][Model]load model from mem failed.");
        return ret;
    }

    ACL_LOG_INFO("Compile operator %s success", aclOp.opType.c_str());
    return ACL_SUCCESS;
}
}
