/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2017-2020. All rights reserved.
 * Description: The description of the ai model executor manager class
 */

#include "ai_model_executor_manager.h"
#include "ai_base_executor_manager.h"
#include <mutex>
#include "debug/engine_log.h"
#include "infra/base/securestl.h"
#include "framework/common/types.h"
#include "framework/common/fmk_error_codes.h"
#include "framework/common/hcs_types.h"
#include "framework/common/memory_allocator_factory.h"
#include "framework/model/base_buffer.h"
#include "framework/graph/core/cgraph/graph_list_walker.h"
#include "framework/graph/core/node/node_functor.h"
#include "framework/graph/core/node/node_spec.h"
#include "framework/graph/utils/attr_utils.h"
#include "compiler/compiled_model.h"
#include "base/common/cl_manager/initializer.h"
#include "executor/model_executor_factory.h"

#if defined(HIAI_DDK)
#include "general_compute/model_buffer_helper.h"
#include "model/model_type_util.h"
#include "base/common/cl_manager/ops_kernel_store_manager.h"
#endif

using namespace std;
using namespace hiai;

namespace ge {
std::mutex ExecutorManager::instanceLock_;
ExecutorManager* ExecutorManager::instance_ = nullptr;

ExecutorManager::ExecutorManager()
{
    currentModelId_ = 1;
}

ExecutorManager::~ExecutorManager()
{
    modelExecutorMap_.clear();
}

ExecutorManager* ExecutorManager::Instance()
{
    if (instance_ == nullptr) {
        std::lock_guard<std::mutex> lck(instanceLock_);
        // Initialize the computing library
#ifndef __AICP_TA__
        hiai::Initializer::Instance()->Init();
#endif
        static ExecutorManager instance;
        instance_ = &instance;
    }
    return instance_;
}

uint32_t ExecutorManager::LoadModelProc(ModelType type, const LoadModelOptions& options, uint32_t& modelId,
    std::shared_ptr<hiai::ICompiledModel>& compiledModel)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::LoadModel(const LoadModelOptions& options, const BaseBuffer& model, uint32_t& modelId)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::UnloadModel(uint32_t modelId)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::Reshape(uint32_t modelId, const std::vector<std::vector<int64_t>>& inputShape,
    std::vector<std::vector<int64_t>>& outputShape)
{
    std::shared_ptr<aicp::IModelExecutor> executor =
        aicp::ModelExecutorFactory::GetInstance().Create(hiai::ModelType::STANDARD_IR_GRAPH_MODEL, modelId);
    if (executor == nullptr) {
        return HCS_FAILED;
    }

    if (executor->Reshape(inputShape, outputShape) != HCS_SUCCESS) {
        return HCS_FAILED;
    }
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::Execute(uint32_t modelId, const std::vector<BaseBuffer>& input,
    std::vector<BaseBuffer>& output, const std::vector<std::vector<int64_t>>& inputShapes,
    std::vector<std::vector<int64_t>>& outputShapes)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::ExecuteAsync(uint32_t modelId, uint32_t taskId, const std::vector<BaseBuffer>& input,
    std::vector<BaseBuffer>& output, const std::vector<std::vector<int64_t>>& inputShapes,
    std::vector<std::vector<int64_t>>& outputShapes, std::shared_ptr<ExecutionCallback> callback)
{
    (*callback)(modelId, taskId, HCS_FAILED);
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::Execute(
    uint32_t modelId, const vector<ge::TensorBuffer>& input, vector<ge::TensorBuffer>& output)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::Execute(uint32_t modelId, const vector<ge::BaseBuffer>& input, vector<ge::BaseBuffer>& output)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::ExecuteAsync(uint32_t modelId, uint32_t taskId, const std::vector<ge::TensorBuffer>& input,
    std::vector<ge::TensorBuffer>& output, std::shared_ptr<ExecutionCallback> callback)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::ExecuteAsync(uint32_t modelId, uint32_t taskId, const std::vector<ge::BaseBuffer>& input,
    std::vector<ge::BaseBuffer>& output, std::shared_ptr<ExecutionCallback> callback)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::CancelTask(uint32_t modelId, uint32_t taskId)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::GetInputDescInfos(uint32_t modelId, vector<hiai::InputOutputDescInfo>& inputDimVec)
{
    std::cout << __func__ << std::endl;
    hiai::InputOutputDescInfo info1;
    info1.name = "input1";
    info1.size = 10;
    info1.dataType = 2;
    info1.format = 1;
    info1.shapeInfo.num = 10;
    info1.shapeInfo.channel = 3;
    info1.shapeInfo.height = 125;
    info1.shapeInfo.width = 130;
    info1.shapeInfo.dims = {10, 3, 125, 130};

    hiai::InputOutputDescInfo info2;
    info2.name = "input2";
    info2.size = 20;
    info2.dataType = 2;
    info2.format = 1;
    info2.shapeInfo.num = 20;
    info2.shapeInfo.channel = 4;
    info2.shapeInfo.height = 255;
    info2.shapeInfo.width = 526;
    info2.shapeInfo.dims = {20, 4, 255, 526};

    inputDimVec.push_back(info1);
    inputDimVec.push_back(info2);
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::GetOutputDescInfos(uint32_t modelId, vector<hiai::InputOutputDescInfo>& outputDimVec)
{
    std::cout << __func__ << std::endl;
    hiai::InputOutputDescInfo info;
    info.name = "output";
    info.size = 15;
    info.dataType = 3;
    info.format = 2;
    info.shapeInfo.num = 11;
    info.shapeInfo.channel = 5;
    info.shapeInfo.height = 150;
    info.shapeInfo.width = 160;
    info.shapeInfo.dims = {11, 5, 150, 160};

    outputDimVec.push_back(info);
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::GetTensorAippInfo(
    uint32_t modelId, uint32_t inputIndex, uint32_t& batchNum, uint32_t& aippNodesCount)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::GetTensorAippParas(
    uint32_t modelId, uint32_t inputIndex, std::vector<BaseBuffer>& aippParaBuffers)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::CheckModelCompatibility(const BaseBuffer& modelBuffer)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::GetExecutor(uint32_t modelId, std::shared_ptr<IModelExecutor>& exe)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::SetModelPriority(uint32_t modelId, int32_t priority)
{
    return HCS_SUCCESS;
}

void ExecutorManager::Cancel(uint32_t modelId)
{
    return;
}

#if defined(HIAI_DDK)
enum DdkGetModelType {
    DDK_MODEL_USE_C_INTF = 0, // 包括FP16 IR模型，老模型，只有一段且计算库标记为NPU的分段异构模型
    DDK_MODEL_USE_HCS_INTF, // 包括FP32 IR模型、单边量化模型，只有一段且计算库标记为CPU_APP的分段异构模型
    DDK_MODEL_USE_DIRECT_C_INTF // 直接调用C接口，支持第三方模型
};

uint32_t ExecutorManager::BuildModel(
    const BuildOptions& buildOptions, const Model& irModel, ge::BaseBuffer& outputBuffer)
{
    return HCS_SUCCESS;
}

uint32_t ExecutorManager::BuildModel(
    const BuildOptions& buildOptions, const BaseBuffer& modelBuffer, ge::BaseBuffer& outputBuffer)
{
    uint8_t* basePtr = new (std::nothrow) uint8_t[100 * 1024]();
    outputBuffer.SetData(basePtr);
    outputBuffer.SetSize(100 * 1024);
    return HCS_SUCCESS;
}
#endif
} // namespace ge
