/**
 * Copyright 2019-2020 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <gtest/gtest.h>


#define protected public
#define private public
#include "platform_info.h"
#include "graph_optimizer/op_compiler/op_compiler.h"
#include "graph/utils/graph_utils.h"
#include "ops_kernel_store/sub_ops_store.h"
#include "fusion_manager/fusion_manager.h"
#include "adapter/tbe_adapter/tbe_op_store_adapter.h"
#include "common/configuration.h"
#include "ops_store/sub_op_info_store.h"
#include "ops_store/ops_kernel_manager.h"
#include "common/fe_type_utils.h"
#undef private
#undef protected

using namespace testing;
using namespace fe;
using namespace ge;
class UTEST_fusion_engine_op_compiler : public testing::Test
{
    friend class OpCompiler;
  protected:
    void SetUp()
    {
        op_store_adapter_manager_ptr_ = std::make_shared<OpStoreAdapterManager>();

        FEOpsStoreInfo ops_store_info;
        SubOpInfoStorePtr sub_ops_kernel_ptr = std::make_shared<SubOpInfoStore>(ops_store_info);

        OpContent op_content;
        op_content.op_type_ = "otherNode";
        map<std::string, std::string> in_info_map;
        in_info_map.emplace(std::make_pair("format", "NC1HWC0,NCHW"));
        in_info_map.emplace(std::make_pair("dtype", "float"));
        op_content.map_kernel_info_.emplace(std::make_pair("input0", in_info_map));
        op_content.map_kernel_info_.emplace(std::make_pair("input1", in_info_map));
        op_content.map_kernel_info_.emplace(std::make_pair("output0", in_info_map));
        op_content.map_kernel_info_.emplace(std::make_pair("output1", in_info_map));
        sub_ops_kernel_ptr->op_content_map_.emplace(std::make_pair("otherNode", op_content));

        OpKernelInfoPtr info_ptr = std::make_shared<OpKernelInfo>("otherNode");
        info_ptr->impl_type_ = EN_IMPL_HW_TBE;
        sub_ops_kernel_ptr->op_kernel_info_map_.emplace(std::make_pair("otherNode", info_ptr));

        ops_kernel_info_store_ptr_ = std::make_shared<FEOpsKernelInfoStore>(op_store_adapter_manager_ptr_, fe::AI_CORE_NAME);
        SubOpsStorePtr sub_ops_store_ptr = std::make_shared<SubOpsStore>(op_store_adapter_manager_ptr_);
        sub_ops_store_ptr->format_dtype_querier_ptr_ =
                std::make_shared<FormatDtypeQuerier>(op_store_adapter_manager_ptr_);
        ops_kernel_info_store_ptr_->map_all_sub_store_info_.emplace(std::make_pair("tbe-builtin", sub_ops_store_ptr));
        OpsKernelManager::Instance(AI_CORE_NAME).sub_ops_kernel_map_["tbe-builtin"] = sub_ops_kernel_ptr;

        graph_ = CreateTestGraph();
        graph_cce_ = CreateCceGraph();
        graph_mix_ = CreateMixGraph();
    }

    void TearDown()
    {

    }

    static NodePtr CreateCceNode(string name, GeTensorDescPtr tensor_desc_ptr, ComputeGraphPtr graph)
    {
        OpDescPtr other_desc_ptr = std::make_shared<OpDesc>(name, "otherNode");
        //set OpDesc
        auto local_tensor_desc = tensor_desc_ptr->Clone();
        // add two input desc
        for (int i = 0; i < 2; ++i) {
            AttrUtils::SetStr(local_tensor_desc, "name", name + "In" + std::to_string(i));
            other_desc_ptr->AddInputDesc(local_tensor_desc);
        }
        // add two output desc
        for (int i = 0; i < 2; ++i) {
            AttrUtils::SetStr(local_tensor_desc, "name", name + "Out" + std::to_string(i));
            other_desc_ptr->AddOutputDesc(local_tensor_desc);
        }
        // add node from other_desc_ptr to graph
        // set attr
        AttrUtils::SetInt(other_desc_ptr, "T", DT_FLOAT);
        AttrUtils::SetInt(other_desc_ptr, "_fe_imply_type", EN_IMPL_HW_GENERAL_CCE);

        NodePtr node_other = graph->AddNode(other_desc_ptr);

        return node_other;
    }

    static NodePtr CreateOtherNode(string name, GeTensorDescPtr tensor_desc_ptr, ComputeGraphPtr graph)
    {
        OpDescPtr other_desc_ptr = std::make_shared<OpDesc>(name, "otherNode");
        //set OpDesc
        auto local_tensor_desc = tensor_desc_ptr->Clone();
        // add two input desc
        for (int i = 0; i < 2; ++i) {
            AttrUtils::SetStr(local_tensor_desc, "name", name + "In" + std::to_string(i));
            other_desc_ptr->AddInputDesc(local_tensor_desc);
        }
        // add two output desc
        for (int i = 0; i < 2; ++i) {
            AttrUtils::SetStr(local_tensor_desc, "name", name + "Out" + std::to_string(i));
            other_desc_ptr->AddOutputDesc(local_tensor_desc);
        }
        // add node from other_desc_ptr to graph
        // set attr
        AttrUtils::SetInt(other_desc_ptr, "T", DT_FLOAT);
        AttrUtils::SetInt(other_desc_ptr, "_fe_imply_type", EN_IMPL_HW_TBE);

        NodePtr node_other = graph->AddNode(other_desc_ptr);

        return node_other;
    }

    static ComputeGraphPtr CreateCceGraph()
    {
        ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
         // new a output GeTensorDesc
        GeTensorDescPtr general_ge_tensor_desc = std::make_shared<GeTensorDesc>();
        general_ge_tensor_desc->SetFormat(FORMAT_NCHW);
        general_ge_tensor_desc->SetDataType(DT_FLOAT);

        int total_node_num = 4;
        vector<NodePtr> nodes;
        for (int i = 0; i < total_node_num; ++i) {
            nodes.push_back(CreateCceNode("test/other" + std::to_string(i), general_ge_tensor_desc, graph));
        }
         /* add link of anchors */
        std::vector<OutDataAnchorPtr> srcs;
        std::vector<InDataAnchorPtr> dsts;
        for (int i = 0; i < total_node_num - 1; ++i) {
            srcs.push_back(nodes[i]->GetOutDataAnchor(0));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(0));
            srcs.push_back(nodes[i]->GetOutDataAnchor(1));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(1));
        }

        // add edges
        for (int i = 0; i < srcs.size(); ++i)
        {
            GraphUtils::AddEdge(srcs[i], dsts[i]);
        }

        return graph;
    }

    static ComputeGraphPtr CreateMixGraph()
    {
        ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
         // new a output GeTensorDesc
        GeTensorDescPtr general_ge_tensor_desc = std::make_shared<GeTensorDesc>();
        general_ge_tensor_desc->SetFormat(FORMAT_NCHW);
        general_ge_tensor_desc->SetDataType(DT_FLOAT);

        int total_node_num = 4;
        vector<NodePtr> nodes;
        for (int i = 0; i < 2; ++i) {
            nodes.push_back(CreateOtherNode("test/other" + std::to_string(i), general_ge_tensor_desc, graph));
        }
        for (int i = 2; i < total_node_num; ++i) {
            nodes.push_back(CreateCceNode("test/other" + std::to_string(i), general_ge_tensor_desc, graph));
        }
         /* add link of anchors */
        std::vector<OutDataAnchorPtr> srcs;
        std::vector<InDataAnchorPtr> dsts;
        for (int i = 0; i < total_node_num - 1; ++i) {
            srcs.push_back(nodes[i]->GetOutDataAnchor(0));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(0));
            srcs.push_back(nodes[i]->GetOutDataAnchor(1));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(1));
        }

        // add edges
        for (int i = 0; i < srcs.size(); ++i)
        {
            GraphUtils::AddEdge(srcs[i], dsts[i]);
        }

        return graph;
    }

    static ComputeGraphPtr CreateTestGraph()
    {
        ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
         // new a output GeTensorDesc
        GeTensorDescPtr general_ge_tensor_desc = std::make_shared<GeTensorDesc>();
        general_ge_tensor_desc->SetFormat(FORMAT_NCHW);
        general_ge_tensor_desc->SetDataType(DT_FLOAT);

        int total_node_num = 4;
        vector<NodePtr> nodes;
        for (int i = 0; i < total_node_num; ++i) {
            if (i == 0) {
              NodePtr node_other = CreateOtherNode("test/other" + std::to_string(i), general_ge_tensor_desc, graph);
              node_other->GetOpDesc()->SetType("Cast");
              nodes.push_back(node_other);
            } else {
              nodes.push_back(CreateOtherNode("test/other" + std::to_string(i), general_ge_tensor_desc, graph));
            }
        }
         /* add link of anchors */
        std::vector<OutDataAnchorPtr> srcs;
        std::vector<InDataAnchorPtr> dsts;
        for (int i = 0; i < total_node_num - 1; ++i) {
            srcs.push_back(nodes[i]->GetOutDataAnchor(0));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(0));
            srcs.push_back(nodes[i]->GetOutDataAnchor(1));
            dsts.push_back(nodes[i + 1]->GetInDataAnchor(1));
        }

        // add edges
        for (int i = 0; i < srcs.size(); ++i)
        {
            GraphUtils::AddEdge(srcs[i], dsts[i]);
        }

        return graph;
    }

    static ComputeGraphPtr  BuildTestGraph(const int32_t &strategy) {
      std::shared_ptr<ScopeAllocator> scope_allocator_ptr = std::make_shared<ScopeAllocator>();
      OpDescPtr conv1 = std::make_shared<OpDesc>("conv1", "Conv2D");
      OpDescPtr conv2 = std::make_shared<OpDesc>("conv2", "Conv2D");
      OpDescPtr relu1 = std::make_shared<OpDesc>("relu1", "RelU");
      OpDescPtr relu2 = std::make_shared<OpDesc>("relu2", "RelU");

      int64_t scope_id_1 = scope_allocator_ptr->AllocateScopeId();
      int64_t scope_id_2 = scope_allocator_ptr->AllocateScopeId();
      int64_t scope_id_3 = scope_allocator_ptr->AllocateScopeId();
      switch (strategy) {
        case 1:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);

          scope_allocator_ptr->SetL1ScopeAttr(conv1, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu1, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(conv2, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu2, scope_id_3);
          break;
        case 2:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);
          break;
        case 3:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);
          scope_allocator_ptr->SetL1ScopeAttr(conv1, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu1, scope_id_3);
          break;
        case 4:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);

          scope_allocator_ptr->SetL1ScopeAttr(conv2, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu2, scope_id_3);
          break;
        case 5:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);

          scope_allocator_ptr->SetL1ScopeAttr(conv2, scope_id_1);
          scope_allocator_ptr->SetL1ScopeAttr(relu2, scope_id_1);
          break;
        default:
          scope_allocator_ptr->SetScopeAttr(conv1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(relu1, scope_id_1);
          scope_allocator_ptr->SetScopeAttr(conv2, scope_id_2);
          scope_allocator_ptr->SetScopeAttr(relu2, scope_id_2);

          scope_allocator_ptr->SetL1ScopeAttr(conv1, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu1, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(conv2, scope_id_3);
          scope_allocator_ptr->SetL1ScopeAttr(relu2, scope_id_3);
      }

      AttrUtils::SetInt(conv1, FE_IMPLY_TYPE, fe::EN_IMPL_HW_TBE);
      AttrUtils::SetInt(conv2, FE_IMPLY_TYPE, fe::EN_IMPL_HW_TBE);
      AttrUtils::SetInt(relu1, FE_IMPLY_TYPE, fe::EN_IMPL_HW_TBE);
      AttrUtils::SetInt(relu2, FE_IMPLY_TYPE, fe::EN_IMPL_HW_TBE);

      // add descriptor
      vector<int64_t> dim = {4, 4, 1, 4};
      GeShape shape(dim);
      GeTensorDesc tenosr_desc(shape);

      conv1->AddInputDesc(tenosr_desc);
      conv1->AddInputDesc(tenosr_desc);
      conv1->AddOutputDesc(tenosr_desc);

      conv2->AddInputDesc(tenosr_desc);
      conv2->AddInputDesc(tenosr_desc);
      conv2->AddOutputDesc(tenosr_desc);

      relu1->AddInputDesc(tenosr_desc);
      relu1->AddOutputDesc(tenosr_desc);
      relu2->AddInputDesc(tenosr_desc);
      relu2->AddOutputDesc(tenosr_desc);

      ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
      NodePtr conv1_node = graph->AddNode(conv1);
      NodePtr conv2_node = graph->AddNode(conv2);
      NodePtr relu1_node = graph->AddNode(relu1);
      NodePtr relu2_node = graph->AddNode(relu2);

      GraphUtils::AddEdge(conv1_node->GetOutDataAnchor(0), relu1_node->GetInDataAnchor(0));
      GraphUtils::AddEdge(relu1_node->GetOutDataAnchor(0), conv2_node->GetInDataAnchor(1));
      GraphUtils::AddEdge(conv2_node->GetOutDataAnchor(0), relu2_node->GetInDataAnchor(0));

      return graph;
    }

    FEOpsKernelInfoStorePtr ops_kernel_info_store_ptr_;
    OpStoreAdapterManagerPtr op_store_adapter_manager_ptr_;
    ComputeGraphPtr graph_;
    ComputeGraphPtr graph_cce_;
    ComputeGraphPtr graph_mix_;
};

TEST_F(UTEST_fusion_engine_op_compiler, save_fusion_node_found)
{
    auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
    auto node = graph_->GetDirectNode().at(0);
    AttrUtils::SetInt(node->GetOpDesc(), "fusion_scope", 1);

    int64_t scope_id = 1;

    ScopeNodeIdMap fusion_node_map;
    std::vector<ge::Node*> fusion_nodes;
    fusion_node_map.emplace(std::make_pair(1, fusion_nodes));

    Status status = op_compiler_ptr->AddNodeToFusionMap(*node, scope_id, fusion_node_map);

    EXPECT_EQ(fe::SUCCESS, status);
}

TEST_F(UTEST_fusion_engine_op_compiler, save_fusion_node_not_found)
{
    auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
    auto node = graph_->GetDirectNode().at(0);
    AttrUtils::SetInt(node->GetOpDesc(), "fusion_scope", 1);

    int64_t scope_id = 1;

    ScopeNodeIdMap fusion_node_map;
    std::vector<ge::Node*> fusion_nodes;
    fusion_node_map.emplace(std::make_pair(2, fusion_nodes));

    Status status = op_compiler_ptr->AddNodeToFusionMap(*node, scope_id, fusion_node_map);

    EXPECT_EQ(fe::SUCCESS, status);
}


Status GetOpStoreInfoByImplTypeStub(Configuration *This, OpImplType op_impl_type, FEOpsStoreInfo& op_store_info)
{
    Status return_status = fe::SUCCESS;
    op_store_info.need_pre_compile = true;
    op_store_info.need_compile = true;
    op_store_info.op_impl_file_path = "";
    op_store_info.op_impl_type = EN_IMPL_HW_TBE;
    return return_status;
}

Status GetOpStoreInfoByImplTypeStub2(Configuration *This, OpImplType op_impl_type, FEOpsStoreInfo& op_store_info)
{
    Status return_status = fe::SUCCESS;
    op_store_info.need_pre_compile = true;
    op_store_info.op_impl_file_path = "xxx";
    op_store_info.op_impl_type = EN_IMPL_HW_TBE;
    return return_status;
}


const OpStoreAdapterManagerPtr& GetOpStoreAdapterManageStub()
{
    OpStoreAdapterManagerPtr op_store_adapter_manager_ptr = std::make_shared<OpStoreAdapterManager>();
    return op_store_adapter_manager_ptr;
}

Status GetOpStoreAdapterStub(OpStoreAdapterManager *This, const OpImplType &op_impl_type, OpStoreAdapterPtr &adapter_ptr)
{
    adapter_ptr = std::make_shared<TbeOpStoreAdapter>();
    return fe::SUCCESS;
}

TEST_F(UTEST_fusion_engine_op_compiler, pre_compile_op_success) {
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->Initialize();
  EXPECT_EQ(fe::SUCCESS, status);

  ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  auto op_type = "ProposalD";
  OpDescPtr op_desc1 = std::make_shared<OpDesc>("test1", op_type);
  OpDescPtr op_desc2 = std::make_shared<OpDesc>("test2", op_type);
  // add descriptor
  vector<int64_t> dim(4, 1);
  GeShape shape(dim);
  GeTensorDesc out_desc(shape);
  out_desc.SetOriginFormat(FORMAT_NCHW);
  out_desc.SetFormat(FORMAT_NCHW);
  out_desc.SetDataType(DT_FLOAT16);
  op_desc1->AddInputDesc("cls_prob", out_desc);
  op_desc1->AddInputDesc("bbox_delta", out_desc);
  op_desc1->AddInputDesc("im_info", out_desc);
  op_desc1->AddInputDesc("rpn_bbox", out_desc);
  op_desc1->AddOutputDesc("rois", out_desc);
  op_desc1->AddOutputDesc("actual_rois_num", out_desc);

  op_desc2->AddInputDesc("cls_prob", out_desc);
  op_desc2->AddInputDesc("bbox_delta", out_desc);
  op_desc2->AddInputDesc("im_info", out_desc);
  op_desc2->AddInputDesc("rpn_bbox", out_desc);
  op_desc2->AddOutputDesc("rois", out_desc);
  auto proposal_node1 = graph->AddNode(op_desc1);
  auto proposal_node2 = graph->AddNode(op_desc2);

  OpKernelInfoPtr op_kernel_info = std::shared_ptr<OpKernelInfo>(new (std::nothrow) OpKernelInfo(op_type));
  InputOrOutputInfoPtr output_info_ptr = std::make_shared<InputOrOutputInfo>("rois");
  output_info_ptr->op_param_type_ = OpParamType::REQUIRED;
  op_kernel_info->output_infos_.push_back(output_info_ptr);
  InputOrOutputInfoPtr output_info_ptr1 = std::make_shared<InputOrOutputInfo>("actual_rois_num");
  output_info_ptr1->op_param_type_ = OpParamType::OPTIONAL;
  op_kernel_info->output_infos_.push_back(output_info_ptr1);
  Status res = op_compiler_ptr->SetMemoryTypeForOutput(proposal_node1, op_kernel_info);
  EXPECT_EQ(fe::SUCCESS, res);
  res = op_compiler_ptr->SetMemoryTypeForOutput(proposal_node2, op_kernel_info);
  EXPECT_EQ(fe::SUCCESS, res);

  for (const auto &node : graph->GetDirectNode()) {
    for (int i = 0; i != node->GetOpDesc()->GetAllOutputsDesc().size(); ++i) {
      bool res = IsMemoryEmpty(op_desc1->GetOutputDesc(i));
      if (i == 1) {
        EXPECT_EQ(true, res);
      } else {
        EXPECT_EQ(false, res);
      }
    }

    for (const auto &tensor_desc : node->GetOpDesc()->GetAllInputsDesc()) {
      EXPECT_EQ(false, IsMemoryEmpty(tensor_desc));
    }
  }
}

TEST_F(UTEST_fusion_engine_op_compiler, setcompressweightattr_03)
{
  vector<int64_t> dim1 = {1, 64, 56, 56};
  GeShape shape1(dim1);
  GeTensorDesc tensor_desc1(shape1);
  tensor_desc1.SetOriginDataType(DT_INT8);
  tensor_desc1.SetDataType(DT_INT8);
  tensor_desc1.SetFormat(FORMAT_NCHW);
  tensor_desc1.SetOriginFormat(FORMAT_NCHW);
  tensor_desc1.SetOriginShape(shape1);

  vector<int64_t> dim2 = {256, 64, 1, 1};
  GeShape shape2(dim2);
  GeTensorDesc tensor_desc2(shape2);
  tensor_desc2.SetOriginDataType(DT_INT8);
  tensor_desc2.SetDataType(DT_INT8);
  tensor_desc2.SetFormat(FORMAT_NCHW);
  tensor_desc2.SetOriginFormat(FORMAT_NCHW);
  tensor_desc2.SetOriginShape(shape2);

  vector<int64_t> dim3 = {256};
  GeShape shape3(dim3);
  GeTensorDesc tensor_desc3(shape3);
  tensor_desc3.SetOriginDataType(DT_INT8);
  tensor_desc3.SetDataType(DT_INT8);
  tensor_desc3.SetFormat(FORMAT_NCHW);
  tensor_desc3.SetOriginFormat(FORMAT_NCHW);
  tensor_desc3.SetOriginShape(shape3);

  vector<int64_t> dim4 = {1, 256, 56, 56};
  GeShape shape4(dim4);
  GeTensorDesc tensor_desc4(shape4);
  tensor_desc4.SetOriginDataType(DT_INT8);
  tensor_desc4.SetDataType(DT_INT8);
  tensor_desc4.SetFormat(FORMAT_NCHW);
  tensor_desc4.SetOriginFormat(FORMAT_NCHW);
  tensor_desc4.SetOriginShape(shape4);

  OpDescPtr op_desc = std::make_shared<OpDesc>("conv2d2", "Conv2D");
  op_desc->AddInputDesc("input", tensor_desc1);
  op_desc->AddInputDesc("filter", tensor_desc2);
  op_desc->AddInputDesc("bias", tensor_desc3);
  op_desc->AddOutputDesc("out", tensor_desc4);

  ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  NodePtr node = graph->AddNode(op_desc);

  string path = "./air/test/engines/nneng/config/data/platform_config";
  string real_path = RealPath(path);
  PlatformInfoManager::Instance().platform_info_map_.clear();
  PlatformInfoManager::Instance().platform_infos_map_.clear();
  PlatformInfoManager::Instance().LoadConfigFile(real_path);
  Configuration::Instance(AI_CORE_NAME).soc_version_ = "Hi3796CV300ES";

  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->SetCompressWeightAttr(node);
  EXPECT_EQ(fe::SUCCESS, status);
  bool has_fe_weight_attr = ge::AttrUtils::HasAttr(op_desc, ATTR_NAME_FE_WEIGHT_COMPRESS);
  EXPECT_EQ(has_fe_weight_attr, false);
  PlatformInfoManager::Instance().platform_info_map_.clear();
  PlatformInfoManager::Instance().platform_infos_map_.clear();
}

TEST_F(UTEST_fusion_engine_op_compiler, setcompressweightattr_04)
{
    vector<int64_t> dim(4, 2);
    GeShape shape1(dim);
    GeTensorDesc tensor_desc1(shape1);
    tensor_desc1.SetOriginDataType(DT_INT8);
    tensor_desc1.SetDataType(DT_INT8);
    tensor_desc1.SetFormat(FORMAT_NCHW);
    tensor_desc1.SetOriginFormat(FORMAT_NCHW);
    tensor_desc1.SetOriginShape(shape1);

    OpDescPtr op_desc = std::make_shared<OpDesc>("fc", "FullyConnection");
    op_desc->AddInputDesc("input", tensor_desc1);
    op_desc->AddInputDesc("filter", tensor_desc1);
    op_desc->AddInputDesc("bias", tensor_desc1);
    op_desc->AddOutputDesc("out", tensor_desc1);

    ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
    NodePtr node = graph->AddNode(op_desc);

    auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
    Status status = op_compiler_ptr->SetCompressWeightAttr(node);
    EXPECT_EQ(fe::SUCCESS, status);
    bool has_fe_weight_attr = ge::AttrUtils::HasAttr(op_desc, ATTR_NAME_FE_WEIGHT_COMPRESS);
    EXPECT_EQ(has_fe_weight_attr, true);
    bool fe_weight_compress = false;
    ge::AttrUtils::GetBool(op_desc, ATTR_NAME_FE_WEIGHT_COMPRESS, fe_weight_compress);
    EXPECT_EQ(fe_weight_compress, true);
}

TEST_F(UTEST_fusion_engine_op_compiler, setcompressweightattr_05)
{
    vector<int64_t> dim(4, 2);
    GeShape shape1(dim);
    GeTensorDesc tensor_desc1(shape1);
    tensor_desc1.SetOriginDataType(DT_INT8);
    tensor_desc1.SetDataType(DT_INT8);
    tensor_desc1.SetFormat(FORMAT_NCHW);
    tensor_desc1.SetOriginFormat(FORMAT_NCHW);
    tensor_desc1.SetOriginShape(shape1);

    OpDescPtr op_desc = std::make_shared<OpDesc>("relu", "Relu");
    op_desc->AddInputDesc("input", tensor_desc1);
    op_desc->AddOutputDesc("out", tensor_desc1);

    ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
    NodePtr node = graph->AddNode(op_desc);

    auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
    Status status = op_compiler_ptr->SetCompressWeightAttr(node);
    EXPECT_EQ(fe::SUCCESS, status);
    bool has_fe_weight_attr = ge::AttrUtils::HasAttr(op_desc, ATTR_NAME_FE_WEIGHT_COMPRESS);
    EXPECT_EQ(has_fe_weight_attr, false);
}

TEST_F(UTEST_fusion_engine_op_compiler, getfusionscope_1)
{
  ComputeGraphPtr graph = BuildTestGraph(1);
  ScopeNodeIdMap fusion_nodes_map;
  vector<ge::NodePtr> nodes_be_compiled;
  std::vector<ge::NodePtr> buff_fus_rollback_nodes;
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->GetFusionScope(*graph, buff_fus_rollback_nodes, fusion_nodes_map, nodes_be_compiled);
  EXPECT_EQ(fe::SUCCESS, status);
  for (auto iter = fusion_nodes_map.begin(); iter != fusion_nodes_map.end(); iter++) {
    if (iter->first == 3) {
      EXPECT_EQ(iter->second.size(), 4);
    }
  }
}

TEST_F(UTEST_fusion_engine_op_compiler, getfusionscope_2)
{
  ComputeGraphPtr graph = BuildTestGraph(2);
  ScopeNodeIdMap fusion_nodes_map;
  vector<ge::NodePtr> nodes_be_compiled;
  std::vector<ge::NodePtr> buff_fus_rollback_nodes;
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->GetFusionScope(*graph, buff_fus_rollback_nodes, fusion_nodes_map, nodes_be_compiled);
  EXPECT_EQ(fe::SUCCESS, status);
  for (auto iter = fusion_nodes_map.begin(); iter != fusion_nodes_map.end(); iter++) {
    if (iter->first == 1 || iter->first == 2) {
      EXPECT_EQ(iter->second.size(), 2);
    }
  }
}

TEST_F(UTEST_fusion_engine_op_compiler, getfusionscope_3)
{
  ComputeGraphPtr graph = BuildTestGraph(3);
  ScopeNodeIdMap fusion_nodes_map;
  vector<ge::NodePtr> nodes_be_compiled;
  std::vector<ge::NodePtr> buff_fus_rollback_nodes;
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->GetFusionScope(*graph, buff_fus_rollback_nodes, fusion_nodes_map, nodes_be_compiled);
  EXPECT_EQ(fe::SUCCESS, status);
  for (auto iter = fusion_nodes_map.begin(); iter != fusion_nodes_map.end(); iter++) {
    if (iter->first == 2 || iter->first == 3) {
      EXPECT_EQ(iter->second.size(), 2);
    }
  }
}

TEST_F(UTEST_fusion_engine_op_compiler, getfusionscope_4)
{
  ComputeGraphPtr graph = BuildTestGraph(4);
  ScopeNodeIdMap fusion_nodes_map;
  vector<ge::NodePtr> nodes_be_compiled;
  std::vector<ge::NodePtr> buff_fus_rollback_nodes;
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->GetFusionScope(*graph, buff_fus_rollback_nodes, fusion_nodes_map, nodes_be_compiled);
  EXPECT_EQ(fe::SUCCESS, status);
  for (auto iter = fusion_nodes_map.begin(); iter != fusion_nodes_map.end(); iter++) {
    if (iter->first == 1 || iter->first == 3) {
      EXPECT_EQ(iter->second.size(), 2);
    }
  }
}

TEST_F(UTEST_fusion_engine_op_compiler, getfusionscope_5)
{
  ComputeGraphPtr graph = BuildTestGraph(5);
  ScopeNodeIdMap fusion_nodes_map;
  vector<ge::NodePtr> nodes_be_compiled;
  std::vector<ge::NodePtr> buff_fus_rollback_nodes;
  auto op_compiler_ptr = std::make_shared<OpCompiler>("normal compiler", AI_CORE_NAME, op_store_adapter_manager_ptr_);
  Status status = op_compiler_ptr->GetFusionScope(*graph, buff_fus_rollback_nodes, fusion_nodes_map, nodes_be_compiled);
  EXPECT_EQ(fe::FAILED, status);
}