/**
 * Copyright 2020 Huawei Technologies Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef AICPU_TF_KERNEL_BUILDER_H_
#define AICPU_TF_KERNEL_BUILDER_H_

#include <mutex>
#include "common/aicpu_ops_kernel_builder/kernel_builder.h"
#include "proto/fwk_adapter.pb.h"

namespace aicpu {
using KernelBuilderPtr = std::shared_ptr<KernelBuilder>;
class TfKernelBuilder : public KernelBuilder {
 public:
  /**
   * Destructor
   */
  virtual ~TfKernelBuilder() = default;

  /**
   * @return kernel info object
   */
  static KernelBuilderPtr Instance();

  /**
   * Calc the running size of Operator,then GE will alloc the memsize from runtime
   * The size is consist of the part as follow:
   *   1.Input and output size;
   *   2.NodeDef in tf; 3.FuncDef in tf.
   * @param node Node information, return task_memsize in node's attr
   * @return status whether this operation successful
   */
  ge::Status CalcOpRunningParam(const ge::Node &node) const override;

  /**
   * Copy the data from host to device, then address is alloced by GE, then invoked
   * the runtime's interface to generate the task
   * @param node Node information
   * @param run_context
   * @return status whether operation successful
   */
  ge::Status GenerateTask(const ge::Node &node,
                          const ge::RunContext &run_context,
                          std::vector<domi::TaskDef> &tasks) override;

  /**
   * Generate the task
   * @param node Node information
   * @param task[out]
   * @param task_info[out]
   * @return status whether this operation success
   */
  ge::Status GenSingleOpRunTask(const ge::NodePtr &node, STR_FWK_OP_KERNEL &task, string &task_info) override;

  /**
   * Generate the task
   * @param count the memcopy times
   * @param task[out]
   * @param task_info[out]
   * @return status whether this operation success
   */
  ge::Status GenMemCopyTask(uint64_t count, STR_FWK_OP_KERNEL &task, string &task_info) override;

  /**
   * init optimizer
   * @return status whether this operation success
   */
  ge::Status Initialize() override;

  // Copy prohibited
  TfKernelBuilder(const TfKernelBuilder &tf_kernel_builder) = delete;

  // Move prohibited
  TfKernelBuilder(const TfKernelBuilder &&tf_kernel_builder) = delete;

  // Copy prohibited
  TfKernelBuilder &operator=(const TfKernelBuilder &tf_kernel_builder) = delete;

  // Move prohibited
  TfKernelBuilder &operator=(TfKernelBuilder &&tf_kernel_builder) = delete;
 protected:
  /**
   * get type of input and output
   * @param op_desc_ptr, ptr store op information used for ge
   * @param inputs_type, type of input
   * @param outputs_type, type of output
   */
  void GetInOutPutsDataType(const ge::OpDescPtr &op_desc_ptr,
                            std::vector<uint32_t> &inputs_type,
                            std::vector<uint32_t> &outputs_type) const override;
 private:
  /**
   * Constructor
   */
  TfKernelBuilder() = default;

  // Calculation workspace size for node
  ge::Status CalcWorkspaceSize(const ge::Node &node, int64_t &workspace_size) const;

  /**
   * Build the kernelRunParam
   * @param opDesc Op description
   * @param kernel_run_param fake kernel_run_param just the input and
   *  output data_addr is not real)
   * @param skip_dim_check
   * @return status whether operation successful
   */
  ge::Status BuildKernelRunParam(const ge::OpDesc &opDesc,
                                 ::aicpu::FWKAdapter::KernelRunParam &kernel_run_param,
                                 bool skip_dim_check = false) const;

  /**
   * Calc the size of tf's node_def, firstly transform the
   *  Node to tf's node_def,then get the node_def's size,
   *  if the operator has the func_def, then calc the
   *  func_def's size together.
   * @param node original GE node info
   * @param node_def_bytes tf node def
   * @param func_def_lib_bytes tf function def library
   * @param node_def_size the size of node def
   * @param func_def_lib_size the size of function def library
   * @return status whether operation successful
   */
  ge::Status ParseNodeDefAndFuncDef(const ge::Node &node,
                                    ge::GeAttrValue::BYTES &node_def_bytes,
                                    ge::GeAttrValue::BYTES &func_def_lib_bytes,
                                    int64_t &node_def_size,
                                    int64_t &func_def_lib_size) const;

  /**
   * Create node def for ge node
   * @param  node Ge node
   * @return status whether operation successful
   */
  ge::Status CreateNodeDef(const ge::Node &node) const;

  /**
   * Set the aicpu::FWKAdapter::TensorDataInfo, the data is from Ge Tensor
   * @param ge_tensor_desc Original Ge Tensor
   * @param tensor_data_info The input or output data, defined by protobuf
   * @param is_ref if output is ref
   * @param skip_dim_check if skip dim
   * @param is_output if is output
   * @return status whether operation successful
   */
  aicpu::State SetTensorDataInfo(const ge::GeTensorDesc &ge_tensor_desc,
                                 ::aicpu::FWKAdapter::TensorDataInfo *tensor_data_info,
                                 bool is_ref = false,
                                 bool skip_dim_check = false,
                                 bool is_output = false) const;

  /**
   * Task's callback, in the Gen model, client register the callback in the runtime,
   * after launch kernel, runtime will feedback the taskinfo to client
   * @param model runtime's model
   * @param task_info runtime's task info
   * @return whether handle successfully
   */
  static rtError_t GetTaskInfoCallback(rtModel_t model, rtTaskInfo_t *task_info);

  /**
   * Build the struct StrFWKKernel and set the real value, then launch the task
   * @param node original GE node info
   * @param run_context GE's run_context, the sessionId is needed
   * @return status whether operation successful
   */
  ge::Status BuildAndLaunchKernel(const ge::Node &node, const ge::RunContext &run_context) const;

  /**
   * update op information in framework
   * @param op_desc_ptr Op description
   * @return status whether operation successful
   */
  ge::Status UpdateFmkOpInfo(std::shared_ptr<ge::OpDesc> &op_desc_ptr) const;

  /**
   * Make task extend info for node
   * @param node original GE node info
   * @param task_ext_info runtime's task info name vector
   * @return status whether operation successful
   */
  ge::Status MakeTaskExtInfo(const ge::Node &node, std::vector<char> &task_ext_info) const;

  /**
   * check if the address need update
   * @param node original GE node info
   * @param update_addr_flag, flag used to check if the address need update
   * @return whether the address need update
   */
  bool NeedUpdateAddr(const ge::Node &node, int32_t &update_addr_flag) const;

  /**
   * Check the known node whether in dynamic shape graph
   * @param node original GE node info
   * @return whether node is dynamic shape op
   */
  bool IsKnownNodeDynamic(const ge::Node &node) const;

  /**
    * Generate the task
    * @param node Node information
    * @param skip_dim_check skip dim check
    * @param str_tf_kernel tf kernel name
    * @param task_info task info
    * @return status whether this operation success
    */
  ge::Status GenTaskImply(const ge::NodePtr &node,
                          ::aicpu::FWKAdapter::FWKOperateParam *str_tf_kernel,
                          string &task_info,
                          bool skip_dim_check = false);

 private:
  // singleton instance
  static KernelBuilderPtr instance_;
  std::mutex mutex_;
};
} // namespace aicpu
#endif // AICPU_TF_KERNEL_BUILDER_H_
