#include <string>
#include <vector>
#include "register/register.h"
#include "graph/operator.h"
#include "graph/graph.h"
#include "graph/operator_factory.h"

namespace domi {
using namespace ge;

static Status AddOptionalPlaceholderForFA(const ge::Operator &tf_op, ge::Graph &graph) {
  // 1. 创建一个FlashAttentionScore算子
  ge::AscendString op_name;
  tf_op.GetName(op_name);
  auto npu_fa_op = OperatorFactory::CreateOperator(op_name.GetString(), "FlashAttentionScore");
  // 2. 将ir中的属性映射到新算子上
  float scale_value = 1.0;
  (void)tf_op.GetAttr("scale_value", scale_value);
  (void)npu_fa_op.SetAttr("scale_value", scale_value);

  float keep_prob = 1.0;
  (void)tf_op.GetAttr("keep_prob", keep_prob);
  (void)npu_fa_op.SetAttr("keep_prob", keep_prob);

  int32_t pre_tockens = 2147483647;
  (void)tf_op.GetAttr("pre_tockens", pre_tockens);
  (void)npu_fa_op.SetAttr("pre_tockens", pre_tockens);

  int32_t next_tockens = 2147483647;
  (void)tf_op.GetAttr("next_tockens", next_tockens);
  (void)npu_fa_op.SetAttr("next_tockens", next_tockens);

  int32_t head_num = 0;
  (void)tf_op.GetAttr("head_num", head_num);
  (void)npu_fa_op.SetAttr("head_num", head_num);

  std::string input_layout;
  (void)tf_op.GetAttr("input_layout", input_layout);
  (void)npu_fa_op.SetAttr("input_layout", input_layout);

  int32_t inner_precise = 0;
  (void)tf_op.GetAttr("inner_precise", inner_precise);
  (void)npu_fa_op.SetAttr("inner_precise", inner_precise);

  int32_t sparse_mode = 0;
  (void)tf_op.GetAttr("sparse_mode", sparse_mode);
  (void)npu_fa_op.SetAttr("sparse_mode", sparse_mode);

  int32_t pse_type = 1;
  (void)tf_op.GetAttr("pse_type", pse_type);
  (void)npu_fa_op.SetAttr("pse_type", pse_type);

  // 3. 创建输入Data
  std::vector<Operator> inputs;
  for (size_t i = 0UL; i < tf_op.GetInputsSize(); i++) {
    const std::string data_name = "Data_" + std::to_string(i);
    Operator data_op = OperatorFactory::CreateOperator(data_name.c_str(), "Data");
    (void)data_op.SetAttr("index", static_cast<int32_t>(i));
    inputs.emplace_back(data_op);
  }

  size_t index = 0UL;
  //4. 必选输入直接设置Data到算子输入
  (void)npu_fa_op.SetInput("query", inputs[index++]);
  (void)npu_fa_op.SetInput("key", inputs[index++]);
  (void)npu_fa_op.SetInput("value", inputs[index++]);

  // 5. 可选输入需要判断type属性的个数是否为0，不为0则表示optionalInput已经使能
  std::vector<DataType> real_shift_type;
  (void)tf_op.GetAttr("real_shift_type", real_shift_type);
  if (!real_shift_type.empty()) {
    (void)npu_fa_op.SetInput("real_shift", inputs[index++]);
  }

  std::vector<DataType> drop_mask_type;
  (void)tf_op.GetAttr("drop_mask_type", drop_mask_type);
  if (!drop_mask_type.empty()) {
    (void)npu_fa_op.SetInput("drop_mask", inputs[index++]);
  }

  std::vector<DataType> padding_mask_type;
  (void)tf_op.GetAttr("padding_mask_type", padding_mask_type);
  if (!padding_mask_type.empty()) {
    (void)npu_fa_op.SetInput("padding_mask", inputs[index++]);
  }
  std::vector<DataType> atten_mask_type;
  (void)tf_op.GetAttr("atten_mask_type", atten_mask_type);
  if (!atten_mask_type.empty()) {
    (void)npu_fa_op.SetInput("atten_mask", inputs[index++]);
  }
  std::vector<DataType> prefix_type;
  (void)tf_op.GetAttr("prefix_type", prefix_type);
  if (!prefix_type.empty()) {
    (void)npu_fa_op.SetInput("prefix", inputs[index++]);
  }
  std::vector<DataType> actual_seq_qlen_type;
  (void)tf_op.GetAttr("actual_seq_qlen_type", actual_seq_qlen_type);
  if (!actual_seq_qlen_type.empty()) {
    (void)npu_fa_op.SetInput("actual_seq_qlen", inputs[index++]);
  }
  std::vector<DataType> actual_seq_kvlen_type;
  (void)tf_op.GetAttr("actual_seq_kvlen_type", actual_seq_kvlen_type);
  if (!actual_seq_kvlen_type.empty()) {
    (void)npu_fa_op.SetInput("actual_seq_kvlen", inputs[index++]);
  }

  std::vector<DataType> q_start_idx_type;
  (void)tf_op.GetAttr("q_start_idx_type", q_start_idx_type);
  if (!q_start_idx_type.empty()) {
    (void)npu_fa_op.SetInput("q_start_idx", inputs[index++]);
  }

  std::vector<DataType> kv_start_idx_type;
  (void)tf_op.GetAttr("kv_start_idx_type", kv_start_idx_type);
  if (!kv_start_idx_type.empty()) {
    (void)npu_fa_op.SetInput("kv_start_idx", inputs[index++]);
  }

  // 6. 使用FA算子的输出构造图的输出。
  std::vector<std::pair<Operator, std::vector<size_t>>> output_indexs;
  std::vector<size_t> node_output_index;
  for (size_t i = 0UL; i < npu_fa_op.GetOutputsSize(); i++) {
    node_output_index.emplace_back(i);
  }
  (void)output_indexs.emplace_back(std::make_pair(npu_fa_op, node_output_index));
  (void)graph.SetInputs(inputs).SetOutputs(output_indexs);
  return SUCCESS;
}

static Status FlashAttentionScoreMapping(const ge::Operator& op_src, ge::Operator& op_dst) {
  // 1. 调用默认映射函数即可
  if (AutoMappingByOpFn(op_src, op_dst) != ge::GRAPH_SUCCESS) {
    return FAILED;
  }
  // 2. 需要将tf框架中的原算子名字设置到GE算子的original_type属性中，为触发后续调整optionInput的连边的动作使用
  op_dst.SetAttr("original_type", "FlashAttentionScore");
  return SUCCESS;
}

REGISTER_CUSTOM_OP("FlashAttentionScore")
    .FrameworkType(TENSORFLOW)
    .OriginOpType({"FlashAttentionScore"})
    .ParseParamsByOperatorFn(FlashAttentionScoreMapping) // 注册此函数用于实现算子本身属性的映射
    .ParseOpToGraphFn(AddOptionalPlaceholderForFA) // 注册此函数用于实现将tf中的输入转化为可选输入，改变连边关系
    .ImplyType(ImplyType::TVM);

}  // namespace domi