{# ----------------------------- op maker ----------------------------------- #}
{% macro op_maker(op) %}
  {% set op_name = op["op_name"] %}
class {{op_name | to_pascal_case}}OpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
  {% filter indent(4, True) %}
    {% for input in op["inputs"] %}
{{add_input(loop.index0, input, op_name)}};
    {% endfor %}
    {% for output in op["outputs"] %}
{{add_output(loop.index0, output, op_name)}};
    {% endfor %}
    {% for attr in op["attrs"] %}
      {% if attr["fluid_name"] %}
{{add_attr(loop.index0, attr, op_name)}};
      {% endif %}
    {% endfor %}
  {% endfilter %}
    AddComment(R"DOC(
TODO: Documentation of {{op_name}} op.
)DOC");
  }
};
{% endmacro %}


{# add input, it could be duplicable or dispensable #}
{% macro add_input(i, input, op_name) %}{# inline #}
  {% set name = input["fluid_name"] %}
  {% set typename = input["typename"] %}
AddInput("{{name}}", "({{typename}}), input {{i}} of {{op_name}} op.")
  {%- if typename is vec %}

    .AsDuplicable()
  {%- endif %}
  {%- if input["optional"] %}

    .AsDispensable()
  {%- endif %}
  {%- if "is_extra" in input and input["is_extra"] %}

    .AsExtra()
  {%- endif %}
{%- endmacro %}

{# add output, it could be duplicable or intermediate, however, optional output is not supported #}
{% macro add_output(i, output, op_name) %}{# inline #}
  {% set name = output["fluid_name"] %}
  {% set typename = output["typename"] %}
  {% set is_intermediate = output["intermediate"] %}
AddOutput("{{name}}", "({{typename}}), output {{i}} of {{op_name}} op.")
  {%- if typename is vec %}

    .AsDuplicable()
  {%- endif %}
  {%- if is_intermediate %}

    .AsIntermediate()
  {%- endif %}
  {%- if output["optional"] %}

    .AsDispensable()
  {%- endif %}
  {%- if "is_extra" in output and output["is_extra"] %}

    .AsExtra()
  {%- endif %}
{%- endmacro %}

{# add attribute, and process default value if needed #}
{% macro add_attr(i, attr, op_name) %}{# inline #}
  {% set name = attr["fluid_name"] %}
  {% set typename = attr["typename"] %}
  {% if typename is scalar and
  ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}
AddInput("{{attr | to_scalar_tensor_name}}", "attribute {{i}} for {{op_name}} op from 0D Tensor.")
    .AsDispensable();
AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.")
  {% elif typename == "IntArray" and
  ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}{# the type has been renamed #}
    {% if 'tensor_name' in attr or 'manual_flag' not in attr %}
AddInput("{{attr | to_int_array_tensor_name}}", "attribute {{i}} for {{op_name}} op from 1D integer Tensor.")
    .AsDispensable();
    {% endif %}
    {% if 'tensors_name' in attr or 'manual_flag' not in attr %}
AddInput("{{attr | to_int_array_tensors_name}}", "attribute {{i}} for {{op_name}} op from list fo 0D integer Tensors.")
    .AsDuplicable()
    .AsDispensable();
    {% endif %}
  AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.")
  {% elif "is_support_tensor" in attr and attr["is_support_tensor"] %}
AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.")
  {% else %}
AddAttr<{{typename | to_op_attr_type}}>("{{name}}", "({{typename | to_op_attr_type}}), attribute {{i}} for {{op_name}} op.")
  {% endif %}
  {% if "default_value" in attr %}
    .SetDefault({{process_default_value(attr)}})
  {%- endif %}
  {% if "is_support_tensor" in attr and attr["is_support_tensor"] %}

    .SupportTensor()
  {%- endif %}
{%- endmacro %}

{# process default value for attributes, some attribute has different types and different default values in op & opmaker #}
{% macro process_default_value(attr) %}{# inline #}
  {% set default_value = attr["default_value"] %}
  {% set typename = attr["typename"] %}
  {% if typename == "DataType" %}{# convert back to VarType #}
    {% if default_value == "DataType::UNDEFINED" %}
-1
    {%- else %}
static_cast<int>(framework::TransToProtoVarType(phi::{{default_value}}))
    {%- endif %}
  {%- elif typename == "DataLayout" %} {# does DataLayout need any processing?#}
static_cast<int>(phi::{{default_value}})
  {%- elif typename == "Place" %}{# construct a Place to get the type #}
static_cast<int>(phi::Place({{"phi::" if not default_value is initializer_list}}{{default_value}}).GetType())
  {%- else %}{# pass through as-is #}
{{default_value}}
  {%- endif %}
{%- endmacro %}


{% macro choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, kernel_name, is_first) %} {#inline#}
  {%- set inputs_len = inputs_type | length -%}
  {%- if is_first -%}
if (
  {%- else -%}
else if (
  {%- endif -%}
      {%- for input_type in inputs_type -%}
      {%- set kernel_arg_name = kernel_args[loop.index0] | to_opmaker_name_cstr -%}
        {%- if loop.index0 != inputs_len - 1%}
          {%- if kernel_args[loop.index0] in optional_inputs_name %}
  ((ctx.HasInput({{kernel_arg_name}}) && {{input_type | assert_dense_or_sr}}({{kernel_arg_name}})) || (!ctx.HasInput({{kernel_arg_name}}))) &&
          {% else %}
  {{input_type | assert_dense_or_sr}}({{kernel_arg_name}}) &&
          {% endif %}
        {% else %} {# the last param #}
          {% if kernel_args[loop.index0] in optional_inputs_name -%}
  ((ctx.HasInput({{kernel_arg_name}}) && {{input_type | assert_dense_or_sr}}({{kernel_arg_name}})) || (!ctx.HasInput({{kernel_arg_name}}))))
          {%- else -%}
  {{input_type | assert_dense_or_sr}}({{kernel_arg_name}}))
          {%- endif %}  {
    return KernelSignature("{{kernel_name}}", std::move(inputs), std::move(attrs), std::move(outputs));
  }
        {% endif %}
      {% endfor %}
{%- endmacro -%}


{# --------------------------------------- name mapping ---------------------------------------------- #}
{% macro name_map(op) %}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:

{{op | cartesian_prod_mapping}}
******************************************************************
*/

KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) {
  {% set kernel_args = op["kernel"]["param"] %}
  {% set optional_inputs_name = op["inputs"]| find_optinal_inputs_name %}
  {{get_input_list(op["inputs"], kernel_args)}};
  paddle::small_vector<const char*> attrs;
  {% for attr in op["attrs"]%}
    {% filter indent(2)%}
  {{get_an_attr(attr, kernel_args)}}
    {% endfilter %}
  {% endfor %}
  {{get_output_list(op["outputs"], kernel_args)}};
  {% set kernel_num = op["kernel"]["func"] | length %}
  {% if kernel_num == 1 %}
  return KernelSignature("{{op["kernel"]["func"][0]}}", std::move(inputs), std::move(attrs), std::move(outputs));
  {% elif kernel_num == 2 %}{# it has kernel for selected rows #}
  {% set fun_name = op["kernel"]["func"][0] %}
    {% set inputs_type = op["kernel"]["dispatch"][fun_name][0] %}
{{choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, fun_name, true)}}
    {%- set fun_name = op["kernel"]["func"][1] -%}
    {%- set inputs_type = op["kernel"]["dispatch"][fun_name][0] -%}
{{choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, fun_name, false)-}}
  else { return KernelSignature("unregistered", {}, {}, {}); }
  {% elif kernel_num == 3 %}{# it has kernel for selected rows #}
    {%- set fun_name = op["kernel"]["func"][0] -%}
    {%- set inputs_type = op["kernel"]["dispatch"][fun_name][0] -%}
{{choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, fun_name, true)}}
    {%- set fun_name = op["kernel"]["func"][1] -%}
    {%- set inputs_type = op["kernel"]["dispatch"][fun_name][0] -%}
{{choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, fun_name, false)-}}
    {%- set fun_name = op["kernel"]["func"][2] -%}
    {%- set inputs_type = op["kernel"]["dispatch"][fun_name][0] -%}
{{choose_kernel_signature(inputs_type, optional_inputs_name, kernel_args, fun_name, false)-}}
  else { return KernelSignature("unregistered", {}, {}, {}); }
  {% else %} {# only support kernel_num <= 3 #}
  return KernelSignature("unregistered", {}, {}, {});
  {%endif%}
}
{% endmacro %}

{% macro get_kernel_dispatch(inputs, kernel_config) %}{# inline #}
{%- for kernel_func in kernel_config["func"] %}
  {% set input_idx = namespace(idx=0) %}
  {% set kernel_in_type_list = kernel_config["dispatch"][kernel_func][0] %}

  if ( {%- for input in inputs %}
    {%- if input["fluid_name"] in kernel_config["param"] %}
      {%- if kernel_in_type_list[input_idx.idx] == "dense" %}
ctx.IsDenseTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}}
      {%- elif kernel_in_type_list[input_idx.idx] == "selected_rows" %}
ctx.IsSelectedRowsInput("{{input["fluid_name"]}}"){{" && " if not loop.last}}
      {%- elif kernel_in_type_list[input_idx.idx] == "sparse_coo" %}
ctx.IsSparseCooTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}}
      {%- elif kernel_in_type_list[input_idx.idx] == "sparse_csr" %}
ctx.IsSparseCsrTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}}
      {%- endif %}
      {% set input_idx.idx = input_idx.idx + 1 %}
    {%- endif %}
  {%- endfor %}) {
    kernel_name = "{{kernel_func}}";
  }
{%- endfor %}
{%- endmacro %}

{% macro sparse_op_name_map(op) %}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:

{{op | cartesian_prod_mapping}}
******************************************************************
*/

KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) {
  {% set kernel_args = op["kernel"]["param"] %}
  {{get_input_list(op["inputs"], kernel_args)}};
  paddle::small_vector<const char*> attrs;
  {% for attr in op["attrs"]%}
  {% filter indent(2)%}
  {{get_an_attr(attr, kernel_args)}}
  {% endfilter %}
  {% endfor %}
  {{get_output_list(op["outputs"], kernel_args)}};

  const char* kernel_name = "unregistered";
{{get_kernel_dispatch(op["inputs"], op["kernel"])}}
  KernelSignature sig (kernel_name, std::move(inputs), std::move(attrs), std::move(outputs));
  return sig;
}
{% endmacro %}

{% macro register_base_kernel_name(op) %}
PD_REGISTER_BASE_KERNEL_NAME({{op["op_name"]}}, {{op["name"]}});
{%- endmacro %}

{% macro register_name_map(op) %}
PD_REGISTER_ARG_MAPPING_FN({{op["op_name"]}}, phi::{{op["op_name"] | to_pascal_case}}OpArgumentMapping);
{%- endmacro %}

{% macro get_input_list(inputs, kernel_args) %}{# inline #}
paddle::small_vector<const char*> inputs {
{%- for input in inputs %}
{%- if input["fluid_name"] in kernel_args %}
{{input["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- endif %}
{%- endfor %}
}
{%- endmacro %}

{% macro get_an_attr(attr, kernel_args) %}{# inline #}
{% set typename = attr["typename"] %}
{%- if attr["fluid_name"] in kernel_args %}
{% set name = attr["fluid_name"] %}
{% if typename is scalar and
("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}{# scalar correspond to a dispensable input and an attr in opmaker #}
attrs.emplace_back(ctx.HasInput("{{attr | to_scalar_tensor_name}}") ? "{{attr | to_scalar_tensor_name}}" : "{{name}}");
{%- elif typename == "IntArray" and
("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}
  {% if 'tensor_name' in attr and  'tensors_name' not in attr %}
attrs.emplace_back(
  ctx.HasInput("{{attr | to_int_array_tensor_name}}")
  ? "{{attr | to_int_array_tensor_name}}"
  : "{{name}}");
  {% elif 'tensor_name' not in attr and  'tensors_name' in attr %}
attrs.emplace_back(
  ctx.InputSize("{{attr | to_int_array_tensors_name}}") > 0
    ? "{{attr | to_int_array_tensors_name}}"
    : "{{name}}");
  {% else %}
attrs.emplace_back(
  ctx.HasInput("{{attr | to_int_array_tensor_name}}")
  ? "{{attr | to_int_array_tensor_name}}"
  : ctx.InputSize("{{attr | to_int_array_tensors_name}}") > 0
    ? "{{attr | to_int_array_tensors_name}}"
    : "{{name}}");
  {%- endif %}
{%- else %}
attrs.emplace_back("{{name}}");
{%- endif %}
{%- endif %}
{%- endmacro %}

{% macro get_output_list(outputs, kernel_args) %}{# inline #}
paddle::small_vector<const char*> outputs {
{%- for output in outputs %}
{{output["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- endfor %}
}
{%- endmacro %}

{% macro get_expected_kernel(op) %}
{% set kernel = op["kernel"] %}
phi::KernelKey GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const override {
{% if "get_expected_kernel_type" not in op %}
    phi::KernelKey kt;
{%if kernel["data_type"] is not none %}{# data type ---------------------------------#}
  {% if kernel["data_type"]["candidates"] | length == 1 %}
    {% set data_type_arg = kernel["data_type"]["candidates"][0] %}
    {% set inputs = op["inputs"] | map(attribute="fluid_name") | list %}
    {% set attrs = op["attrs"] | map(attribute="fluid_name") | list %}
    {% if data_type_arg in inputs %}
  auto data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_arg | to_opmaker_name}});
      {% if kernel["data_type"]["to_complex_flag"][0] %}
  data_type = framework::ToComplexType(data_type);
      {% endif %}
    {% elif data_type_arg in attrs %}{# it is an attribute and probably named dtype#}
  auto data_type = framework::proto::VarType::Type(ctx.Attr<int>("{{data_type_arg}}"));
    {% else %}
  auto data_type = framework::TransToProtoVarType(phi::{{data_type_arg}});
    {% endif %}
  {% elif kernel["data_type"]["candidates"] | length == 2 %}
    {% set data_type_args = kernel["data_type"]["candidates"] %}
  auto data_type = framework::proto::VarType::Type(ctx.Attr<int>("{{data_type_args[0]}}"));
  if (data_type == static_cast<framework::proto::VarType::Type>(-1)) {
    data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_args[1] | to_opmaker_name}});
  }
  {% endif %}
  kt = phi::KernelKey(data_type, ctx.GetPlace());
{% elif "complex_promote" in op and "forward" not in op%} {# compext data promote #}
  {% set inputs = op["complex_promote"]%}
  auto data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "{{inputs[0]}}", "{{inputs[1]}}");
  kt = phi::KernelKey(data_type, ctx.GetPlace());
{% endif -%}
{%- if kernel["backend"] is not none %}
    {% if kernel["data_type"] is none %}
    kt = OperatorWithKernel::GetExpectedKernelType(ctx);
    {% endif %}
    kt.set_backend(
        phi::TransToPhiBackend(ctx.Input<phi::DenseTensor>("{{kernel["backend"]["candidates"][0]}}")->place()));
{% endif %}
{% if "force_backend" in op["kernel"] and op["kernel"]["force_backend"] == "force_cpu" %}
  {% if  kernel["backend"] is none and kernel["data_type"] is none %} {# only force_cpu#}
    kt = OperatorWithKernel::GetExpectedKernelType(ctx);
  {% endif %}
    if (ctx.Attr<bool>("force_cpu")) {
      kt.set_backend(phi::Backend::CPU);
    }
{% endif %}
  return kt;
{% else %}
  return {{op["get_expected_kernel_type"]}}(ctx, this);
{% endif %}
}
{% endmacro -%}

{% macro get_kernel_for_var(op) %}
{% set skip_args = none %}
{% if op["data_transform"] is not none%}
  {% if "skip_transform" in op["data_transform"] %}
  {# TODO:(lizhiyu) support skip_transform and support_trans_dtype at the same time#}
    {% set skip_args = op["data_transform"]["skip_transform"] %}
  {% elif "support_trans_dtype" in op["data_transform"] %}
    {% set skip_args = op["data_transform"]["support_trans_dtype"] %}
  {% endif %}
{% endif %}
{% set var_name = "var_name" -%}

phi::KernelKey GetKernelTypeForVar(
    const std::string& {{var_name}},
    const phi::DenseTensor& tensor,
    const phi::KernelKey& expected_kernel_type) const override {
{%if skip_args is not none%}{# deal data_transform #}
  {% set skip_args_len = skip_args | length %}
      if (
        {%- for skip_arg in skip_args -%}
          var_name == "{{ skip_arg }}"
          {%- if skip_args_len != 1 and loop.index != skip_args_len %} || {% endif -%}
        {%- endfor -%}
      ) {
    {% if "skip_transform" in op["data_transform"] %}
        return phi::KernelKey(phi::Backend::ALL_BACKEND,
                            expected_kernel_type.layout(),
                            expected_kernel_type.dtype());
    {% elif "support_trans_dtype" in op["data_transform"] %}
       return phi::KernelKey(tensor.place(), tensor.layout(), tensor.dtype());
    {% endif %}
      }
{% else %}{# deal complex_promote #}
      if (framework::IsComplexType(expected_kernel_type.dtype())) {
      // only promote inputs’s types when contains complex input
        return phi::KernelKey(tensor.place(), tensor.layout(), tensor.dtype());
      }
{% endif %}
      else {
          return phi::KernelKey(
            tensor.place(), tensor.layout(), expected_kernel_type.dtype());
      }
    }
{% endmacro %}

{# --------------------------------------- operator  ---------------------------------------------- #}
{% macro operator(op) %}
class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  {# ----------- get expected kernel type function -------------------------- #}
  {% set kernel = op["kernel"] %}
  {% if kernel["data_type"] is not none or kernel["backend"] is not none
      or kernel["force_backend"] is not none
      or "complex_promote" in op or "data_transform" in op 
      or "get_expected_kernel_type" in op%}
 protected:
    {% if kernel["data_type"] is not none or kernel["backend"] is not none
      or kernel["force_backend"] is not none or "complex_promote" in op
      or "get_expected_kernel_type" in op%}
      {% filter indent(2, True)%}
{{get_expected_kernel(op)}}
      {% endfilter %}
    {% endif %}
  {% endif %}
  {%- if "data_transform" in op and op["data_transform"] is not none -%}
      {% filter indent(2, True) %}
{{get_kernel_for_var(op)}}
      {% endfilter %}
  {%- elif "complex_promote" in op and op["complex_promote"] is not none -%}
      {% filter indent(2, True) %}
{{get_kernel_for_var(op)}}
        {% endfilter %}
      {%- endif %}
};

{% set infer_var_type_func_str = op["op_name"] | get_infer_var_type_func %}
{% if infer_var_type_func_str is not none %}
{{infer_var_type_func_str}}
{% endif %}

DECLARE_INFER_SHAPE_FUNCTOR({{op["op_name"]}}, {{op["op_name"] | to_pascal_case}}InferShapeFunctor,
                            PD_INFER_META(phi::{{op["infer_meta"]["func"]}}));
{# inplace inferer #}
{% if op["inplace"] is not none and op["inplace"] | length == 1%}
  {% set inplace_map %}
  {% for source, target in op["inplace"].items() %}
{{"{"}}{{target | to_opmaker_name}}, {{source | to_opmaker_name}}{{"}"}}{{", " if not loop.last}}
  {%- endfor %}
  {%- endset %}
DECLARE_INPLACE_OP_INFERER({{op["op_name"] | to_pascal_case}}InplaceInferer,
                           {{inplace_map}});
{% endif %}

{# no_need_buffer inferer #}
{% if op["no_need_buffer"] is not none %}
DECLARE_NO_NEED_BUFFER_VARS_INFERER({{op["op_name"] | to_pascal_case}}NoNeedBufferVarInferer,
                                    {{op["no_need_buffer"] | map("to_opmaker_name") | join(", ")}});
{% endif %}
{% endmacro%}

{% macro register_op_with_components(op) %}
{% set name = op["op_name"] %}
REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op,
{% if not "forward" in op %}{# it is a forward op #}
                  ops::{{name | to_pascal_case}}OpMaker,
{% endif %}
{% if "backward" in op and op["backward"] is not none and op["only_backward_composite"] is false%}{# backward #}
  {% set backward_name = op["backward"] %}
                  ops::{{backward_name | to_pascal_case}}OpMaker<paddle::framework::OpDesc>,
                  ops::{{backward_name | to_pascal_case}}OpMaker<paddle::imperative::OpBase>,
{% elif "forward" not in op %}
                  paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
                  paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
{% else %}
{% endif %}
{% if op is supports_inplace %}{# inplace#}
                  ops::{{name | to_pascal_case}}InplaceInferer,
{% endif %}
{% set infer_var_type_func_str = op["op_name"] | get_infer_var_type_func %}
{% if infer_var_type_func_str is not none %}
                  ops::{{name | to_pascal_case}}InferVarType,
{% endif %}
{% if "backward_composite" in op and op["backward_composite"] is not none %}
                  ops::{{op["backward_composite"] | to_composite_grad_opmaker_name}},
{% endif %}
{% if op is supports_no_need_buffer %}{# no_need_buffer #}
                  ops::{{name | to_pascal_case}}NoNeedBufferVarInferer,
{% endif %}
                  ops::{{name | to_pascal_case}}InferShapeFunctor);
{% endmacro %}

{% macro register_op_version(op) %}
{% if "version" in op %}
{% set name = op["op_name"] %}
REGISTER_OP_VERSION({{name}})
  {% for checkpoint in op["version"]%}
  .AddCheckpoint(
    R"ROC({{checkpoint["checkpoint"]}})ROC",
      paddle::framework::compatible::OpVersionDesc()
    {% for action in checkpoint["action"]%}
      {% if "add_input" in action %}
        .NewInput("{{action["add_input"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "delete_input" in action %}
        .DeleteInput("{{action["delete_input"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "modify_input" in action %}
        .ModifyInput("{{action["modify_input"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "add_output" in action %}
        .NewOutput("{{action["add_output"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "delete_output" in action %}
        .DeleteOutput("{{action["delete_output"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "modify_output" in action %}
        .ModifyOutput("{{action["modify_output"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "add_attr" in action %}
        .NewAttr("{{action["add_attr"]}}", "{{action["comment"]}}", {{action["default"]}}){{")" if loop.last}}
      {% endif %}
      {% if "delete_attr" in action %}
        .DeleteAttr("{{action["delete_attr"]}}", "{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
      {% if "modify_attr" in action %}
        .ModifyAttr("{{action["modify_attr"]}}", "{{action["comment"]}}", {{action["default"]}}){{")" if loop.last}}
      {% endif %}
      {% if "fix_bug" in action %}
        .BugfixWithBehaviorChanged("{{action["comment"]}}"){{")" if loop.last}}
      {% endif %}
    {% endfor %}
  {% endfor %};
{% endif %}
{% endmacro %}


{# --------------------------------------- backward op maker ---------------------------------------------- #}
{% macro backward_op_maker(op, forward_op ) %}
  {% set name = op["op_name"] %}
  {% set forward_input_names = op["forward"]["inputs"] | map(attribute="fluid_name") | list %}
  {% set forward_output_names = op["forward"]["outputs"] | map(attribute="fluid_name") | list %}
  {% set forward_attr_names = op["forward"]["attrs"] | map(attribute="fluid_name") | list %}
  {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %}
  {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %}
  {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %}
template <typename T>
class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("{{name}}");

  {% for input in op["inputs"] %}
    grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
      input["fluid_name"],
      forward_input_names,
      forward_output_names,
      forward_input_orig_names,
      forward_output_orig_names)}});
  {% endfor %}

  {% for output in op["outputs"] %}
    grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
      output["fluid_name"],
      forward_input_names,
      forward_output_names,
      forward_input_orig_names,
      forward_output_orig_names,
      output['drop_empty_grad'])}});
  {% endfor %}

    grad_op->SetAttrMap(this->Attrs());
  {% for attr in op["attrs"] %}
    {% set attr_name = attr["fluid_name"] %}
    {% if attr_name in forward_attr_names %}
      {% if attr["typename"] == "IntArray" %}
        {% if 'tensor_name' in attr or 'manual_flag' not in attr %}
    if (this->HasInput("{{attr | to_int_array_tensor_name}}")) {
      grad_op->SetInput("{{attr | to_int_array_tensor_name}}", this->Input("{{attr | to_int_array_tensor_name}}"));
    }
        {% endif %}
        {% if 'tensors_name' in attr or 'manual_flag' not in attr %}
    if (this->HasInput("{{attr | to_int_array_tensors_name}}")) {
      grad_op->SetInput("{{attr | to_int_array_tensors_name}}", this->Input("{{attr | to_int_array_tensors_name}}"));
    }
        {% endif %}
      {% elif attr["typename"] is scalar and 
      ("is_support_tensor" not in attr or attr["is_support_tensor"] is false)%}
    if (this->HasInput("{{attr | to_scalar_tensor_name}}")) {
      grad_op->SetInput("{{attr | to_scalar_tensor_name}}", this->Input("{{attr | to_scalar_tensor_name}}"));
    }
      {% endif %}
    {% else %}{# maybe something wrong: backward op has more attrs than the forward one#}
    grad_op->SetAttr("{{attr_name}}", {{process_default_value(attr)}});
    {% endif %}
  {% endfor %}
  }
};
{% endmacro %}

{% macro backward_op_reused_maker(bw_op, forward_op, invoke_op) %}
  {% set name = bw_op["op_name"] %}
  {% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="fluid_name") | list %}
  {% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="fluid_name") | list %}
  {% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="fluid_name") | list %}
  {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %}
  {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %}
  {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %}
template <typename T>
class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("{{invoke_op["func"]}}");

  {% for input in invoke_op["inputs"] %}
    grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
      input["value"],
      forward_input_names,
      forward_output_names,
      forward_input_orig_names,
      forward_output_orig_names)}});
  {% endfor %}

  {% for output in invoke_op["outputs"] %}
    grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
      output["value"],
      forward_input_names,
      forward_output_names,
      forward_input_orig_names,
      forward_output_orig_names,
      true)}});
  {% endfor %}

  {% for attr in invoke_op["attrs"] %}
    {% set attr_name = attr["fluid_name"] %}
    {% set fw_attrs = forward_op["attrs"] %}
    {% if attr_name in forward_attr_names %}
      {# invoke_op's attrs and fw_attr's attrs must be the same#}
      {% set fw_attr = fw_attrs[loop.index0] %}
      {% if fw_attr["typename"] == "IntArray" %}
        {% if 'tensor_name' in attr or 'manual_flag' not in attr %}
    if (this->HasInput("{{fw_attr | to_int_array_tensor_name}}")) {
      grad_op->SetInput("{{fw_attr | to_int_array_tensor_name}}", this->Input("{{fw_attr | to_int_array_tensor_name}}"));
    }
        {% endif %}
        {% if 'tensors_name' in fw_attr or 'manual_flag' not in fw_attr %}
    if (this->HasInput("{{fw_attr | to_int_array_tensors_name}}")) {
      grad_op->SetInput("{{fw_attr | to_int_array_tensors_name}}", this->Input("{{fw_attr | to_int_array_tensors_name}}"));
    }
        {% endif %}
      {% elif fw_attr["typename"] is scalar and
         ("is_support_tensor" not in attr or attr["is_support_tensor"] is false) %}
    if (this->HasInput("{{fw_attr | to_scalar_tensor_name}}")) {
      grad_op->SetInput("{{fw_attr | to_scalar_tensor_name}}", this->Input("{{fw_attr | to_scalar_tensor_name}}"));
    }
      {% endif %}
    {% endif %}
  {% endfor %}

  {% for attr in invoke_op["attrs"] %}
    grad_op->SetAttr("{{attr["fluid_name"]}}", {{attr["value"]}});
  {% endfor %}
  }
};
{% endmacro %}

{% macro composite_grad_op_maker(backward_op, forward_op) %}
  {% set op_name = backward_op["op_name"] %}
  {% set inputs = backward_op["inputs"] | to_variable_names("name")%}
  {% set input_dict = backward_op["input_dict"] %}
  {% set fluid_inputs = backward_op["inputs"] | to_variable_names("fluid_name")%}
  {% set forward_fluid_inputs = backward_op["forward"]["inputs"] | to_variable_names("fluid_name")%}
  {% set forward_fluid_outputs = backward_op["forward"]["outputs"] | to_variable_names("fluid_name")%}
  {% set forward_fluid_orig_inputs = forward_op["inputs"] | map(attribute="fluid_name") | list %}
  {% set forward_fluid_orig_outputs = forward_op["outputs"] | map(attribute="fluid_name") | list %}
  {% set attrs = backward_op["attrs"] | to_variable_names("name") %}
  {% set fluid_attrs = backward_op["attrs"] | to_variable_names("fluid_name") %}
  {% set attr_dict = backward_op["attr_dict"] %}
  {% set outputs = backward_op["outputs"] | to_variable_names("name")%}
  {% set output_dict = backward_op["output_dict"] %}
  {% set fluid_outputs = backward_op["outputs"] | to_variable_names("fluid_name")%}
  {% set composite_func_info = backward_op["composite"] %}
class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradOpMakerBase {
 public:
  using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;
  void Apply() override {
    //get inputs
{{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, forward_fluid_orig_inputs, forward_fluid_orig_outputs, input_dict)}}
{{construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name)}}
    //get attr
{{construct_composite_attr(attrs, fluid_attrs, attr_dict)}}
    //get output
{{construct_composite_output(outputs, fluid_outputs, forward_fluid_inputs, forward_fluid_orig_inputs, output_dict)}}
    //get output ptr
{{construct_composite_output_ptr(outputs, output_dict)}}
    //get output orginal name
{{get_composite_output_orginal_name(outputs, output_dict)}}
    //call composite backward func
{{call_composite_backward_api(composite_func_info)}}
    //recover output name
{{recover_composite_output_name(outputs)}}
  }
};
{%- endmacro %}

{% macro construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs,  forward_fluid_orig_inputs, forward_fluid_orig_outputs, input_dict) %}
  {% set inputs_length = inputs | length %}
  {% for i in range(inputs_length) %}
    {% set input_typename = input_dict[inputs[i]]["typename"] %}
    {% set input_optional_flag = input_dict[inputs[i]]["optional"] %}
    {% if fluid_inputs[i] in forward_fluid_inputs %}
      {% set name_in_forward_orig = forward_fluid_orig_inputs[forward_fluid_inputs.index(fluid_inputs[i])]%}
      {% if input_typename == "Tensor" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalSingleForwardInput({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetSingleForwardInput({{name_in_forward_orig | to_opmaker_name}});
        {% endif %}
      {% elif input_typename == "Tensor[]" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalMultiForwardInput({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetMultiForwardInput({{name_in_forward_orig | to_opmaker_name}});
        {% endif %}
      {% endif %}
    {% elif fluid_inputs[i] in forward_fluid_outputs %}
      {% set name_in_forward_orig = forward_fluid_orig_outputs[forward_fluid_outputs.index(fluid_inputs[i])]%}
      {% if input_typename == "Tensor" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalSingleForwardOutput({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetSingleForwardOutput({{name_in_forward_orig | to_opmaker_name}});
        {% endif %}
      {% elif input_typename == "Tensor[]" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalMultiForwardOutput({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetMultiForwardOutput({{name_in_forward_orig | to_opmaker_name}});
        {% endif %}
      {% endif %}
    {% elif fluid_inputs[i][:-5] in forward_fluid_outputs %}
      {% set name_in_forward_orig = forward_fluid_orig_outputs[forward_fluid_outputs.index(fluid_inputs[i][:-5])]%}
      {% if input_typename == "Tensor" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalSingleOutputGrad({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetSingleOutputGrad({{name_in_forward_orig | to_opmaker_name}});
        {% endif %}
      {% elif input_typename == "Tensor[]" %}
        {% if input_optional_flag == True %}
    auto {{inputs[i]}} = this->GetOptionalMultiOutputGrad({{name_in_forward_orig | to_opmaker_name}});
        {% else %}
    auto {{inputs[i]}} = this->GetMultiOutputGrad({{name_in_forward_orig | to_opmaker_name}});
        {%- endif %}
      {%- endif %}
    {%- endif %}
  {%- endfor %}
{%- endmacro %}

{% macro construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name) %}
  {% set attrs_length = attrs | length %}
  {% for i in range(attrs_length) %}
    {% if "tensor_name" in attr_dict[attrs[i]] %}
    auto {{'tensor_' + attrs[i]}} = this->GetOptionalSingleForwardInput("{{attr_dict[attrs[i]]['tensor_name']}}");
    if ({{'tensor_' + attrs[i]}}) {
      PADDLE_THROW(platform::errors::Unimplemented(
          "We don't support dynamic tensor attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite"
          "for now. "));
    }
    {%- endif %}
    {% if "tensors_name" in attr_dict[attrs[i]] %}
    auto {{'tensors_' + attrs[i]}} = this->GetOptionalMultiForwardInput("{{attr_dict[attrs[i]]['tensors_name']}}");
    if ({{'tensors_' + attrs[i]}}) {
      PADDLE_THROW(platform::errors::Unimplemented(
          "We don't support dynamic tensors attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite "
          "for now. "));
    }
    {%- endif %}
  {%- endfor %}
{%- endmacro %}

{% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %}
  {% set attrs_length = attrs | length %}
  {% for i in range(attrs_length) %}
    {% if "data_type" in attr_dict[attrs[i]] %}
      {% set attrs_data_type = attr_dict[attrs[i]]["data_type"]%}
    {% else %}
      {% set attrs_data_type = attr_dict[attrs[i]]["typename"] | to_op_attr_type %}
    {%- endif %}
    const {{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}");
  {% endfor %}
{%- endmacro %}

{% macro construct_composite_output(outputs, fluid_outputs, forward_fluid_inputs, forward_fluid_orig_inputs, output_dict) %}
  {% set outputs_length = outputs | length %}
  {% for i in range(outputs_length) %}
    {% set name_in_forward_orig = forward_fluid_orig_inputs[forward_fluid_inputs.index(fluid_outputs[i][:-5])]%}
    {% set output_typename = output_dict[outputs[i]]["typename"] %}
    {% if output_typename == "Tensor" %}
    auto {{outputs[i] + "_t"}} = this->GetSingleInputGrad({{name_in_forward_orig | to_opmaker_name}});
    {% elif output_typename == "Tensor[]" %}
    auto {{outputs[i] + "_t"}} = this->GetMultiInputGrad({{name_in_forward_orig | to_opmaker_name}});
    {%- endif %}    
  {%- endfor %}
{%- endmacro %}

{% macro construct_composite_output_ptr(outputs, output_dict) %}
  {% set outputs_length = outputs | length %}
  {% for i in range(outputs_length) %}
    {% set output_typename = output_dict[outputs[i]]["typename"] %}
    {% if output_typename == "Tensor" %}
    auto {{outputs[i]}} = this->GetOutputPtr(&{{outputs[i]+ "_t"}});
    {% elif output_typename == "Tensor[]" %}
    std::vector<paddle::Tensor*> {{outputs[i]}}({{outputs[i] + "_t"}}.size());
    for(size_t i = 0; i < {{outputs[i]}}.size(); ++i){
      {{outputs[i]}}[i] = &{{outputs[i] + "_t"}}[i];
    }
    {{outputs[i]}} = this->GetOutputPtr({{outputs[i]}});
    {%- endif %}    
  {%- endfor %}
{%- endmacro %}

{% macro get_composite_output_orginal_name(outputs, output_dict) %}
  {% set outputs_length = outputs | length %}
  {% for i in range(outputs_length) %}
    {% set output_typename = output_dict[outputs[i]]["typename"] %}
    {% if output_typename == "Tensor" %}
    auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}});
    {% elif output_typename == "Tensor[]" %}
    auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}});
    {%- endif %}    
  {%- endfor %}
{%- endmacro %}

{% macro call_composite_backward_api(composite_func_info) %}
    VLOG(6) << "Runing {{composite_func_info["func_name"]}} composite func";
    prim::{{composite_func_info["func_name"]}}<prim::DescTensor>({{composite_func_info["func_args"]}});
{%- endmacro %}

{% macro recover_composite_output_name(outputs) %}
  {% set outputs_length = outputs | length %}
  {% for i in range(outputs_length) %}
    this->RecoverOutputName({{outputs[i] + "_t"}}, {{outputs[i] + "_name"}});
  {% endfor %}
{%- endmacro %}

{% macro extract_input_from_forward(name,
  input_names, output_names,
  input_orig_names, output_orig_names) %}{# inline #}
  {% if name in input_names %}
    {% set name_in_forward_orig = input_orig_names[input_names.index(name)]%}
Input({{name_in_forward_orig | to_opmaker_name}})
  {%- elif name in output_names %}
    {% set name_in_forward_orig = output_orig_names[output_names.index(name)]%}
Output({{name_in_forward_orig | to_opmaker_name}})
  {%- elif name.endswith("_grad") %}{# output grad#}
    {% set name_in_forward = name[:-5] %}
    {% if name_in_forward in output_names %}
      {% set name_in_forward_orig = output_orig_names[output_names.index(name_in_forward)] %}
OutputGrad({{name_in_forward_orig | to_opmaker_name}})
    {%- endif %}
  {%- endif %}
{%- endmacro %}

{% macro extract_output_from_forward(name, input_names, output_names,
  input_orig_names, output_orig_names, drop_empty_grad) %}{# inline #}
  {% if name[:-5] in input_names %}
    {% set name_in_forward = name[:-5] %}
    {% set name_in_forward_orig = input_orig_names[input_names.index(name_in_forward)]%}
      {%- if drop_empty_grad is true -%}
InputGrad({{name_in_forward_orig | to_opmaker_name}})
      {%- else -%}
InputGrad({{name_in_forward_orig | to_opmaker_name}}, false)
      {%- endif %}
  {%- elif (name) in input_names %}
    {% set name_in_forward_orig = input_orig_names[input_names.index(name)]%}
Input({{name  | to_opmaker_name}})
  {%- endif %}
{%- endmacro %}

{% macro extract_attr_from_forward(name, attr_names, attr_origin_names) %}
this->GetAttr("{{name}}")
{%- endmacro %}
