/**
 * Copyright 2019-2020 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "gtest/gtest.h"

#include "runtime/rt_model.h"
#include "external/runtime/rt_error_codes.h"
#include "graph/ge_tensor.h"
#include "graph/op_desc.h"
#include "graph/compute_graph.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/attr_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph/utils/graph_utils.h"
#include "common/l2_stream_info.h"
#include "common/resource_def.h"
#include "common/util/op_info_util.h"
#include "common/comm_error_codes.h"
#include "../fe_test_utils.h"
#include "securec.h"


#define protected public
#define private public
#include "common/fe_log.h"
#include "adapter/tbe_adapter/tbe_task_builder_adapter.h"
#include "adapter/adapter_itf/task_builder_adapter.h"
#include "adapter/adapter_itf/op_store_adapter.h"
#include "adapter/common/op_store_adapter_manager.h"
#include "adapter/tbe_adapter/tbe_op_store_adapter.h"
#include "fusion_manager/fusion_manager.h"
#include "task_builder/task_builder.h"
#include "graph_optimizer/stream_graph_optimizer/l2_optimizer/l2_optimizer.h"
#undef private
#undef protected
#define SET_SIZE 128

using namespace std;
using namespace testing;
using namespace ge;
using namespace domi;
using namespace fe;

using OpStoreAdapterManagerPtr = std::shared_ptr<fe::OpStoreAdapterManager>;

FEOpsStoreInfo cce_custom_opinfo_adapter {
      0,
      "cce-custom",
      EN_IMPL_CUSTOM_TBE,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/cce_custom_opinfo",
      ""
};
FEOpsStoreInfo tik_custom_opinfo_adapter  {
      1,
      "tik-custom",
      EN_IMPL_CUSTOM_TIK,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/tik_custom_opinfo",
      ""
};
FEOpsStoreInfo tbe_custom_opinfo_adapter  {
      2,
      "tbe-custom",
      EN_IMPL_CUSTOM_TBE,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/tbe_custom_opinfo",
      ""
};
FEOpsStoreInfo cce_constant_opinfo_adapter  {
      3,
      "cce-constant",
      EN_IMPL_CUSTOM_TBE,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/cce_constant_opinfo",
      ""
};
FEOpsStoreInfo cce_general_opinfo_adapter  {
      4,
      "cce-general",
      EN_IMPL_CUSTOM_TBE,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/cce_general_opinfo",
      ""
};
FEOpsStoreInfo tik_opinfo_adapter  {
      5,
      "tik-builtin",
      EN_IMPL_HW_TIK,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/tik_opinfo",
      ""
};
FEOpsStoreInfo tbe_opinfo_adapter  {
      6,
      "tbe-builtin",
      EN_IMPL_HW_TBE,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/tbe_opinfo",
      ""
};
FEOpsStoreInfo rl_opinfo_adapter  {
      7,
      "rl-builtin",
      EN_IMPL_RL,
      "./air/test/engines/nneng/ut/testcase/fusion_engine/ops_kernel_store/fe_config/rl_opinfo",
      ""
};

std::vector<FEOpsStoreInfo> all_fe_ops_store_info_adapter{
      cce_custom_opinfo_adapter ,
      tik_custom_opinfo_adapter ,
      tbe_custom_opinfo_adapter ,
      cce_constant_opinfo_adapter ,
      cce_general_opinfo_adapter ,
      tik_opinfo_adapter ,
      tbe_opinfo_adapter ,
      rl_opinfo_adapter
};

class STEST_TaskBuilder: public testing::Test {
protected:

    static void SetOpDecSize(NodePtr& node){
        OpDesc::Vistor<GeTensorDesc> tensors = node->GetOpDesc()->GetAllInputsDesc();
        for (int i = 0; i < node->GetOpDesc()->GetAllInputsDesc().size(); i++){
            ge::GeTensorDesc tensor = node->GetOpDesc()->GetAllInputsDesc().at(i);
            ge::TensorUtils::SetSize(tensor, SET_SIZE);
            node->GetOpDesc()->UpdateInputDesc(i, tensor);
        }
        OpDesc::Vistor<GeTensorDesc> tensors_output = node->GetOpDesc()->GetAllOutputsDesc();
        for (int i = 0; i < tensors_output.size(); i++){
            ge::GeTensorDesc tensor_output = tensors_output.at(i);
            ge::TensorUtils::SetSize(tensor_output, SET_SIZE);
            node->GetOpDesc()->UpdateOutputDesc(i, tensor_output);
        }
    }

    void SetUp()
    {
        rtContext_t rt_context;
        assert(rtCtxCreate(&rt_context, RT_CTX_GEN_MODE, 0) == ACL_RT_SUCCESS);
        assert(rtCtxSetCurrent(rt_context) == ACL_RT_SUCCESS);
        //cce::cceSysInit();

        Configuration::Instance(fe::AI_CORE_NAME).ops_store_info_vector_ = (all_fe_ops_store_info_adapter);
        context_ = CreateContext();
        FusionManager::Instance(fe::AI_CORE_NAME).op_store_adapter_manager_ = make_shared<fe::OpStoreAdapterManager>();
        task_builder_ = shared_ptr <TaskBuilder> (new (nothrow) TaskBuilder());
        FusionManager::Instance(fe::AI_CORE_NAME).ops_kernel_info_store_ = make_shared<fe::FEOpsKernelInfoStore>(FusionManager::Instance(fe::AI_CORE_NAME).op_store_adapter_manager_);
        FusionManager::Instance(fe::AI_CORE_NAME).graph_opt_ = make_shared<fe::FEGraphOptimizer>(FusionManager::Instance(fe::AI_CORE_NAME).ops_kernel_info_store_,
                                                                                FusionManager::Instance(fe::AI_CORE_NAME).op_store_adapter_manager_);
        TbeOpStoreAdapter tbe_adapter;
        std:: map<string, string> options;
        FusionManager::Instance(fe::AI_CORE_NAME).op_store_adapter_manager_->Initialize(options, fe::AI_CORE_NAME);
        ops_adapter_manage_ptr = make_shared<OpStoreAdapterManager>();
    }

    void TearDown()
    {
        task_builder_.reset();
        FusionManager::Instance(fe::AI_CORE_NAME).Finalize();
        DestroyContext(context_);

        rtContext_t rt_context;
        assert(rtCtxGetCurrent(&rt_context) == ACL_RT_SUCCESS);
        assert(rtCtxDestroy(rt_context) == ACL_RT_SUCCESS);


    }

    static NodePtr CreateNodeWithoutAttrs(bool hasWeight = false)
    {
        FeTestOpDescBuilder builder;
        builder.SetName("test_tvm");
        builder.SetType("conv");
        builder.SetInputs( { 1 });
        builder.SetOutputs( { 1 });
        builder.AddInputDesc( { 1, 1, 1, 1 }, ge::FORMAT_NCHW, ge::DT_FLOAT);
        builder.AddOutputDesc( { 1, 1, 1, 1 }, ge::FORMAT_NCHW, ge::DT_FLOAT);
        if (hasWeight) {
            size_t len = 10;
            unique_ptr<float[]> buf(new float[len]);
            auto weight = builder.AddWeight((uint8_t*) buf.get(), len * sizeof(float), { 1, 1, 2, 5 }, ge::FORMAT_NCHW,
                    ge::DT_FLOAT);
            ge::TensorUtils::SetWeightSize(weight->MutableTensorDesc(), len * sizeof(float));
        }

        return builder.Finish();
    }

    static NodePtr CreateNode(bool hasWeight = false)
    {
        NodePtr node = CreateNodeWithoutAttrs(hasWeight);

        const char* bin_file = "./air/test/engines/nneng/stub/cce_reductionLayer_1_10_float16__1_SUMSQ_1_0.o";
        vector<char> buffer;
        assert(ReadBytesFromBinaryFile(bin_file, buffer));
        OpKernelBinPtr tbe_kernel_ptr = std::make_shared<OpKernelBin>(node->GetName(), std::move(buffer));
        node->GetOpDesc()->SetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel_ptr);

        ge::AttrUtils::SetInt(node->GetOpDesc(), "_fe_imply_type", (int64_t) EN_IMPL_CUSTOM_TBE);
        ge::AttrUtils::SetStr(node->GetOpDesc(), node->GetName() + "_kernelname", "cce_reductionLayer_1_10_float16__1_SUMSQ_1_0__kernel0");
        ge::AttrUtils::SetInt(node->GetOpDesc(), "tvm_blockdim", 1);
        ge::AttrUtils::SetStr(node->GetOpDesc(), "tvm_magic", "RT_DEV_BINARY_MAGIC_ELF");
        ge::AttrUtils::SetBool(node->GetOpDesc(), "is_first_node", true);
        ge::AttrUtils::SetBool(node->GetOpDesc(), "is_last_node", true);
        std::string meta_data = "";
        meta_data.append("cce_reductionLayer_1_10_float16__1_SUMSQ_1_0"); // binFileName
        meta_data.append(".o");  // binFileSuffix
        meta_data.append(",version,");
        meta_data.append("c53fcf5403daaf993a95a4aeea228eae30196565d8b287bae9a4ca6a52e58c2b");    // sha256
        meta_data.append(",shared");
        ge::AttrUtils::SetStr(node->GetOpDesc(), "tvm_metadata", meta_data);
        SetOpDecSize(node);
        return node;
    }

    static RunContext CreateContext()
    {
        rtStream_t stream = nullptr;
        rtModel_t model = nullptr;

        assert(rtStreamCreate(&stream, 0) == ACL_RT_SUCCESS);
        assert(rtModelCreate(&model, 0) == ACL_RT_SUCCESS);
        assert(rtModelBindStream(model, stream, 0) == ACL_RT_SUCCESS);

        RunContext context;
        context.model = model;
        context.stream = stream;
        context.dataMemSize = 100;
        context.dataMemBase = (uint8_t *) (intptr_t) 1000;
        context.weightMemSize = 200;
        context.weightMemBase = (uint8_t *) (intptr_t) 1100;
        context.weightsBuffer = Buffer(20);

        return context;
    }

    static void DestroyContext(RunContext& context)
    {
        assert(rtModelUnbindStream(context.model, context.stream) == ACL_RT_SUCCESS);
        assert(rtModelDestroy(context.model) == ACL_RT_SUCCESS);
        assert(rtStreamDestroy(context.stream) == ACL_RT_SUCCESS);
    }

    static bool ReadBytesFromBinaryFile(const char* path, std::vector<char>& buffer)
    {
        if (path == nullptr)
            return false;

        std::ifstream file(path, std::ios::binary | std::ios::ate);
        if(!file.is_open())
            return false;

        std::streamsize size = file.tellg();

        if(size <= 0) {
            file.close();
            return false;
        }

        if (size > INT_MAX) {
            file.close();
            return false;
        }

        file.seekg(0, std::ios::beg);

        buffer.resize(size);
        file.read(buffer.data(), size);
        file.close();

        return true;
    }

    static Status DestroyHandle(ccHandle_t *handle)
    {
        if (NULL == handle || *handle == NULL)
        {
            FE_LOGE("handle is NULL!");
            return TASK_BUILDER_STATUS_BAD_PARAM;
        }
        ccClearOpMap(*handle);
        rtError_t ret;
        ret = rtFreeHost(*handle);
        if (ret != ACL_RT_SUCCESS)
        {
            FE_LOGE("free handler failed!");
            return fe::FAILED;
        }
        *handle = NULL;
        return fe::SUCCESS;
    };

    static void ReleaseGlobalResouce() {
        for (auto stubFunc : kStubFuncs) {
        delete[] stubFunc;
        }
        kStubFuncs.clear();
    }
protected:
    RunContext context_;
    std::shared_ptr<TaskBuilder> task_builder_;
    OpStoreAdapterManagerPtr ops_adapter_manage_ptr;
};

TEST_F(STEST_TaskBuilder, case_prerequisite) {

}


TEST_F(STEST_TaskBuilder, case_all_success)
{
    NodePtr node = CreateNode();

    std::vector<TaskDef> task_defs;
    EXPECT_EQ(task_builder_->GenerateTask(*node, context_, task_defs), fe::SUCCESS);
    ReleaseGlobalResouce();
}

TEST_F(STEST_TaskBuilder, l2with_confirm)
{
    fusion::TaskL2Info_t ll;
    ll.nodeName = "1";
    //fusion::TaskL2Info_t *l2Data = &ll;
    fusion::TaskL2InfoFEMap_t hh;
    hh["0"] = ll;

    StreamL2Info::Instance().SetStreamL2Info(context_.stream, hh, "Batch_-1");
    setFuncState(FUSION_L2, true);
    NodePtr node = CreateNode();
    std::vector<TaskDef> task_defs;
    EXPECT_EQ(task_builder_->GenerateTask(*node, context_, task_defs), fe::SUCCESS);
    ReleaseGlobalResouce();
}

TEST_F(STEST_TaskBuilder, case_TaskGenCallback_task_kernel_ex_success)
{
    rtTaskInfo_t task_info;
    task_info.type = RT_MODEL_TASK_KERNEL_EX;

    int args;
    task_info.u.kernelTaskEx.argsSize = sizeof(args);
    task_info.u.kernelTaskEx.args = (uint8_t*)&args;

    std::vector<TaskDef> task_defs;
    task_builder_->task_defs_ = &task_defs;
    task_builder_->orig_op_indexes_.push_back(0);

    TaskBuilder::task_builder_map_[context_.model] = task_builder_.get();
    EXPECT_EQ(TaskBuilder::TaskGenCallback(context_.model, &task_info), fe::SUCCESS);
    TaskBuilder::task_builder_map_.erase(context_.model);
}

TEST_F(STEST_TaskBuilder, case_FillKernelContextMultiKernel_success)
{
    vector<uint32_t> orig_op_indexes = {1};
    rtTaskInfo_t task_info;
    rtAllKernelTaskInfo_t& kernel_task = task_info.u.allKernelTask;

    uint16_t args_offset[] = {0, sizeof(uint64_t)};
    kernel_task.argsOffset = args_offset;
    kernel_task.argsCount = 2;

    ccOpContext op_context;
    op_context.kernelType = ccKernelType::CCE_AI_CORE;
    op_context.isFlowtable = true;

    KernelContext kernel_context;
    EXPECT_EQ(TaskBuilder::FillKernelContextMultiKernel(orig_op_indexes,
              kernel_task, op_context, kernel_context), fe::SUCCESS);

    op_context.kernelType = ccKernelType::CCE_AI_CPU;
    EXPECT_EQ(TaskBuilder::FillKernelContextMultiKernel(orig_op_indexes,
              kernel_task, op_context, kernel_context), fe::SUCCESS);
}


TEST_F(STEST_TaskBuilder, case_FillKernelContext_kernel_type_cce_ai_core)
{
    vector<uint32_t> orig_op_indexes = {1};
    rtKernelTaskInfo_t kernel_task;

    uint16_t args_offset[] = {0, sizeof(uint64_t)};
    kernel_task.argsOffset = args_offset;
    kernel_task.argsCount = 2;

    ccOpContext op_context;
    op_context.kernelType = ccKernelType::CCE_AI_CORE;
    op_context.isFlowtable = true;

    KernelContext kernel_context;
    EXPECT_EQ(TaskBuilder::FillKernelContext(orig_op_indexes, kernel_task, op_context, kernel_context), fe::SUCCESS);
}

TEST_F(STEST_TaskBuilder, case_FillFlowtable_success)
{
    KernelContext kernel_context;
    rtKernelTaskInfo_t kernel_task;
    KernelDef kernel_Def;

    kernel_context.set_is_flowtable(true);
    kernel_task.argsCount = 2;

    uint64_t value;
    uint8_t args[sizeof(uint64_t) + sizeof(uint32_t)];
    *(uint64_t *)args = (intptr_t)&value;
    *(uint32_t *)(args + sizeof(uint64_t)) = sizeof(value);
    kernel_task.args = (uint8_t*)&args;

    uint16_t args_offset[] = {0, sizeof(uint64_t)};
    kernel_task.argsOffset = args_offset;

    EXPECT_EQ(TaskBuilder::FillFlowtable(kernel_context, kernel_task, kernel_Def), fe::SUCCESS);
}


