/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/meta_tensor.h"

namespace phi {

// Common InferMeta Functions for fusion operators.
// NOTE: The InferMeta Functions in this file are arranged in alphabetic order.

void FusedMultiTransformerInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& ln_scales,
    const paddle::optional<std::vector<const MetaTensor*>>& ln_biases,
    const std::vector<const MetaTensor*>& qkv_weights,
    const paddle::optional<std::vector<const MetaTensor*>>& qkv_biases,
    const paddle::optional<std::vector<const MetaTensor*>>& cache_kvs,
    const paddle::optional<std::vector<const MetaTensor*>>& pre_caches,
    const MetaTensor& rotary_tensor,
    const MetaTensor& beam_offset,
    const MetaTensor& time_step,
    const MetaTensor& seq_lengths,
    const MetaTensor& src_mask,
    const std::vector<const MetaTensor*>& out_linear_weights,
    const paddle::optional<std::vector<const MetaTensor*>>& out_linear_biases,
    const std::vector<const MetaTensor*>& ffn_ln_scales,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn_ln_biases,
    const std::vector<const MetaTensor*>& ffn1_weights,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn1_biases,
    const std::vector<const MetaTensor*>& ffn2_weights,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn2_biases,
    bool pre_layer_norm,
    float epsilon,
    float residual_alpha,
    float dropout_rate,
    int rotary_emb_dims,
    bool is_test,
    const std::string& dropout_implementation,
    const std::string& act_method,
    bool trans_qkvw,
    int ring_id,
    const std::string& norm_type,
    bool use_neox_rotary_style,
    int gqa_group_size,
    std::vector<MetaTensor*> cache_kv_outs,
    MetaTensor* out);

void AddActXPUInferMeta(const MetaTensor& x,
                        const MetaTensor& x_max,
                        const MetaTensor& y,
                        const MetaTensor& y_max,
                        int act_type,
                        MetaTensor* out,
                        MetaTensor* out_max);

void AddLayernormXPUInferMeta(const MetaTensor& x,
                              const MetaTensor& y,
                              const MetaTensor& scale,
                              const MetaTensor& bias,
                              int begin_norm_axis,
                              float epsilon,
                              MetaTensor* out);

void GroupNormalizeSiluXPUInferMeta(const MetaTensor& x,
                                    const MetaTensor& scale,
                                    const MetaTensor& bias,
                                    int groups,
                                    float epsilon,
                                    MetaTensor* out);

void LayerNormalizeReluXPUInferMeta(const MetaTensor& x,
                                    const MetaTensor& scale,
                                    const MetaTensor& bias,
                                    int begin_norm_axis,
                                    float epsilon,
                                    MetaTensor* out);

void BlhaGetMaxLenInferMeta(const MetaTensor& seq_lens_encoder,
                            const MetaTensor& seq_lens_decoder,
                            const MetaTensor& batch_size,
                            MetaTensor* max_enc_len_this_time,
                            MetaTensor* max_dec_len_this_time);

void BlockMultiheadAttentionInferMeta(const MetaTensor& qkv,
                                      const MetaTensor& key_cache,
                                      const MetaTensor& value_cache,
                                      const MetaTensor& seq_lens_encoder,
                                      const MetaTensor& seq_lens_decoder,
                                      const MetaTensor& seq_lens_this_time,
                                      const MetaTensor& padding_offsets,
                                      const MetaTensor& cum_offsets,
                                      const MetaTensor& cu_seqlens_q,
                                      const MetaTensor& cu_seqlens_k,
                                      const MetaTensor& block_tables,
                                      const MetaTensor& pre_key_cache,
                                      const MetaTensor& pre_value_cache,
                                      const MetaTensor& rope_emb,
                                      const MetaTensor& mask,
                                      const MetaTensor& tgt_mask,
                                      const MetaTensor& cache_k_quant_scales,
                                      const MetaTensor& cache_v_quant_scales,
                                      const MetaTensor& cache_k_dequant_scales,
                                      const MetaTensor& cache_v_dequant_scales,
                                      const MetaTensor& qkv_out_scale,
                                      const MetaTensor& qkv_bias,
                                      const MetaTensor& out_shift,
                                      const MetaTensor& out_smooth,
                                      const MetaTensor& max_enc_len_this_time,
                                      const MetaTensor& max_dec_len_this_time,
                                      int max_seq_len,
                                      int block_size,
                                      bool use_neox_style,
                                      bool dynamic_cachekv_quant,
                                      const int quant_round_type,
                                      const float quant_max_bound,
                                      const float quant_min_bound,
                                      const float out_scale,
                                      const std::string& compute_dtype,
                                      const float rope_theta,
                                      MetaTensor* fmha_out,
                                      MetaTensor* qkv_out,
                                      MetaTensor* key_cache_out,
                                      MetaTensor* value_cache_out);

void BlockMultiheadAttentionInferXPUMeta(
    const MetaTensor& qkv,
    const MetaTensor& key_cache,
    const MetaTensor& value_cache,
    const MetaTensor& seq_lens_encoder,
    const MetaTensor& seq_lens_decoder,
    const MetaTensor& seq_lens_this_time,
    const MetaTensor& padding_offsets,
    const MetaTensor& cum_offsets,
    const MetaTensor& cu_seqlens_q,
    const MetaTensor& cu_seqlens_k,
    const MetaTensor& cache_k_per_batch_maxs,
    const MetaTensor& cache_v_per_batch_maxs,
    const MetaTensor& block_tables,
    const MetaTensor& pre_key_cache,
    const MetaTensor& pre_value_cache,
    const MetaTensor& rope_emb,
    const MetaTensor& mask,
    const MetaTensor& tgt_mask,
    const MetaTensor& cache_k_quant_scales,
    const MetaTensor& cache_v_quant_scales,
    const MetaTensor& cache_k_dequant_scales,
    const MetaTensor& cache_v_dequant_scales,
    const MetaTensor& qkv_out_scale,
    const MetaTensor& qkv_bias,
    const MetaTensor& out_shift,
    const MetaTensor& out_smooth,
    const MetaTensor& max_enc_len_this_time,
    const MetaTensor& max_dec_len_this_time,
    int max_seq_len,
    int block_size,
    bool use_neox_style,
    bool dynamic_cachekv_quant,
    const int quant_round_type,
    const float quant_max_bound,
    const float quant_min_bound,
    const float out_scale,
    const std::string& compute_dtype,
    const float rope_theta,
    MetaTensor* fmha_out,
    MetaTensor* qkv_out,
    MetaTensor* key_cache_out,
    MetaTensor* value_cache_out);

void Conv1dXPUInferMeta(const MetaTensor& x,
                        const MetaTensor& x_max,
                        const MetaTensor& filter,
                        const MetaTensor& filter_max,
                        const MetaTensor& bias,
                        const MetaTensor& branch,
                        const MetaTensor& branch_max,
                        const std::vector<int>& paddings,
                        const std::string& padding_algorithm,
                        int dilations,
                        int strides,
                        int groups,
                        int act_type,
                        float act_param,
                        MetaTensor* out,
                        MetaTensor* out_max);

void Conv2dXPUInferMeta(const MetaTensor& x,
                        const MetaTensor& x_max,
                        const MetaTensor& filter,
                        const MetaTensor& filter_max,
                        const MetaTensor& bias,
                        const MetaTensor& branch,
                        const MetaTensor& branch_max,
                        const MetaTensor& scale_max,
                        const MetaTensor& out_max_in,
                        const std::vector<int>& paddings,
                        const std::vector<int>& dilations,
                        const std::vector<int>& strides,
                        const std::string& padding_algorithm,
                        int groups,
                        int act_type,
                        float act_param,
                        DataType out_dtype,
                        MetaTensor* out,
                        MetaTensor* out_max);

void SpatialTransformerResblockXPUInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& x_max,
    const std::vector<const MetaTensor*>& conv_bias,
    const std::vector<const MetaTensor*>& conv_filter,
    const std::vector<const MetaTensor*>& conv_filter_max,
    const std::vector<const MetaTensor*>& gn_bias,
    const std::vector<const MetaTensor*>& gn_scale,
    const std::vector<int>& dilations,
    const std::vector<int>& paddings,
    const std::vector<int>& strides,
    const std::vector<float>& gn_eps,
    const std::vector<int>& gn_groups,
    const std::vector<int>& groups,
    bool conv_fix,
    bool has_silu_fc_input,
    bool include_silu,
    MetaTensor* out,
    MetaTensor* out_max);

void EmbeddingWithEltwiseAddXPUInferMeta(
    const std::vector<const MetaTensor*>& ids,
    const std::vector<const MetaTensor*>& tables,
    const MetaTensor& mask,
    MetaTensor* out,
    MetaTensor* seq_lod,
    MetaTensor* max_seq_len);

void FcXPUInferMeta(const MetaTensor& x,
                    const MetaTensor& x_max,
                    const MetaTensor& w,
                    const MetaTensor& w_max,
                    const MetaTensor& bias,
                    const MetaTensor& scale_max,
                    const MetaTensor& out_max_in,
                    int in_num_col_dims,
                    bool transpose_x,
                    float alpha,
                    float beta,
                    int act_type,
                    float act_alpha,
                    DataType out_dtype,
                    MetaTensor* out,
                    MetaTensor* out_max);

void GenerateSequenceXPUInferMeta(const MetaTensor& x,
                                  DataType dtype,
                                  MetaTensor* out);

void MultiEncoderXPUInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& fc_input_max,
    const std::vector<const MetaTensor*>& fc_weight,
    const std::vector<const MetaTensor*>& fc_weight_max,
    const std::vector<const MetaTensor*>& fc_bias,
    const std::vector<const MetaTensor*>& ln_scale,
    const std::vector<const MetaTensor*>& ln_bias,
    const std::vector<const MetaTensor*>& smooth_scale_weight,
    const std::vector<const MetaTensor*>& roformer_embedding,
    const MetaTensor& mask,
    const MetaTensor& seq_lod,
    const MetaTensor& max_seq_len,
    int layer_num,
    bool norm_before,
    int hidden_dim,
    int head_num,
    int size_per_head,
    int ffn_hidden_dim_scale,
    int act_type,
    int relative_type,
    int slice_idx,
    bool is_per_channel,
    int max_pos_len,
    const std::vector<float>& softmax_max_value,
    const std::vector<std::string>& quant_types,
    MetaTensor* out,
    MetaTensor* x_fp16,
    MetaTensor* out_fp16);

void FusedAttentionInferMeta(const MetaTensor& x,
                             const MetaTensor& ln_scale,
                             const MetaTensor& ln_bias,
                             const MetaTensor& qkv_weight,
                             const MetaTensor& qkv_bias,
                             const MetaTensor& cache_kv,
                             const MetaTensor& src_mask,
                             const MetaTensor& out_linear_weight,
                             const MetaTensor& out_linear_bias,
                             const MetaTensor& ln_scale_2,
                             const MetaTensor& ln_bias_2,
                             int num_heads,
                             bool transpose_qkv_wb,
                             bool pre_layer_norm,
                             float epsilon,
                             float attn_dropout_rate,
                             bool is_test,
                             bool attn_dropout_fix_seed,
                             int attn_dropout_seed,
                             const std::string& attn_dropout_implementation,
                             float dropout_rate,
                             bool dropout_fix_seed,
                             int dropout_seed,
                             const std::string& dropout_implementation,
                             float ln_epsilon,
                             bool add_residual,
                             int ring_id,
                             MetaTensor* ln_mean,
                             MetaTensor* ln_var,
                             MetaTensor* ln_out,
                             MetaTensor* qkv_out,
                             MetaTensor* qkv_bias_out,
                             MetaTensor* transpose_out_2,
                             MetaTensor* qk_out,
                             MetaTensor* qktv_out,
                             MetaTensor* softmax_out,
                             MetaTensor* attn_dropout_mask_out,
                             MetaTensor* attn_dropout_out,
                             MetaTensor* src_mask_out,
                             MetaTensor* fmha_out,
                             MetaTensor* out_linear_out,
                             MetaTensor* dropout_mask_out,
                             MetaTensor* ln_mean_2,
                             MetaTensor* ln_var_2,
                             MetaTensor* bias_dropout_residual_out,
                             MetaTensor* cache_kv_out,
                             MetaTensor* out,
                             MetaConfig config = MetaConfig());

void FusedAttentionGradInferMeta(const MetaTensor& out_grad,
                                 const MetaTensor& x,
                                 const MetaTensor& qkv_weight,
                                 const MetaTensor& qkv_bias,
                                 const MetaTensor& qkv_bias_out,
                                 const MetaTensor& src_mask,
                                 const MetaTensor& src_mask_out,
                                 const MetaTensor& out_linear_weight,
                                 const MetaTensor& out_linear_bias,
                                 const MetaTensor& ln_scale,
                                 const MetaTensor& ln_bias,
                                 const MetaTensor& ln_scale_2,
                                 const MetaTensor& ln_bias_2,
                                 const MetaTensor& ln_out,
                                 const MetaTensor& ln_mean,
                                 const MetaTensor& ln_var,
                                 const MetaTensor& ln_mean_2,
                                 const MetaTensor& ln_var_2,
                                 const MetaTensor& bias_dropout_residual_out,
                                 const MetaTensor& qkv_out,
                                 const MetaTensor& transpose_out_2,
                                 const MetaTensor& qk_out,
                                 const MetaTensor& qktv_out,
                                 const MetaTensor& softmax_out,
                                 const MetaTensor& attn_dropout_mask_out,
                                 const MetaTensor& attn_dropout_out,
                                 const MetaTensor& fmha_out,
                                 const MetaTensor& out_linear_out,
                                 const MetaTensor& dropout_mask_out,
                                 int num_heads,
                                 bool transpose_qkv_wb,
                                 bool pre_layer_norm,
                                 float epsilon,
                                 float attn_dropout_rate,
                                 bool is_test,
                                 bool attn_dropout_fix_seed,
                                 int attn_dropout_seed,
                                 const std::string& attn_dropout_implementation,
                                 float dropout_rate,
                                 bool dropout_fix_seed,
                                 int dropout_seed,
                                 const std::string& dropout_implementation,
                                 float ln_epsilon,
                                 bool add_residual,
                                 int ring_id,
                                 MetaTensor* qkv_bias_grad,
                                 MetaTensor* qkv_bias_out_grad,
                                 MetaTensor* src_mask_out_grad,
                                 MetaTensor* out_linear_bias_grad,
                                 MetaTensor* ln_scale_grad,
                                 MetaTensor* ln_bias_grad,
                                 MetaTensor* ln_scale_2_grad,
                                 MetaTensor* ln_bias_2_grad,
                                 MetaTensor* x_grad,
                                 MetaTensor* qkv_weight_grad,
                                 MetaTensor* out_linear_weight_grad,
                                 MetaTensor* ln_out_grad,
                                 MetaTensor* bias_dropout_residual_out_grad,
                                 MetaTensor* qkv_out_grad,
                                 MetaTensor* qktv_out_grad,
                                 MetaTensor* transpose_out_2_grad,
                                 MetaTensor* qk_out_grad,
                                 MetaTensor* softmax_out_grad,
                                 MetaTensor* attn_dropout_out_grad,
                                 MetaTensor* fmha_out_grad,
                                 MetaTensor* out_linear_out_grad);

void FusedElemwiseAddActivationInferMeta(
    const MetaTensor& x,
    const MetaTensor& y,
    const std::vector<std::string>& functor_list,
    float scale,
    int axis,
    bool save_intermediate_out,
    MetaTensor* out,
    MetaTensor* intermediate_out);

void FusedElemwiseAddActivationGradInferMeta(
    const MetaTensor& x,
    const MetaTensor& y,
    const MetaTensor& out,
    const MetaTensor& intermediate_out,
    const MetaTensor& out_grad,
    const std::vector<std::string>& functor_list,
    float scale,
    int axis,
    bool save_intermediate_out,
    MetaTensor* x_grad,
    MetaTensor* y_grad);

void FusedFeedForwardInferMeta(const MetaTensor& x,
                               const MetaTensor& dropout1_seed,
                               const MetaTensor& dropout2_seed,
                               const MetaTensor& linear1_weight,
                               const MetaTensor& linear1_bias,
                               const MetaTensor& linear2_weight,
                               const MetaTensor& linear2_bias,
                               const MetaTensor& ln1_scale,
                               const MetaTensor& ln1_bias,
                               const MetaTensor& ln2_scale,
                               const MetaTensor& ln2_bias,
                               bool pre_layer_norm,
                               float ln1_epsilon,
                               float ln2_epsilon,
                               const std::string& act_method,
                               float dropout1_prob,
                               float dropout2_prob,
                               const std::string& dropout1_implementation,
                               const std::string& dropout2_implementation,
                               bool is_test,
                               bool dropout1_fix_seed,
                               bool dropout2_fix_seed,
                               int dropout1_seed_val,
                               int dropout2_seed_val,
                               bool add_residual,
                               int ring_id,
                               MetaTensor* out,
                               MetaTensor* dropout1_mask,
                               MetaTensor* dropout2_mask,
                               MetaTensor* ln1_mean,
                               MetaTensor* ln1_variance,
                               MetaTensor* ln2_mean,
                               MetaTensor* ln2_variance,
                               MetaTensor* linear1_out,
                               MetaTensor* ln1_out,
                               MetaTensor* dropout1_out,
                               MetaTensor* dropout2_out);

void FusedFeedForwardGradInferMeta(const MetaTensor& out_grad,
                                   const MetaTensor& x,
                                   const MetaTensor& linear1_weight,
                                   const MetaTensor& linear1_bias,
                                   const MetaTensor& linear2_weight,
                                   const MetaTensor& dropout1_mask,
                                   const MetaTensor& dropout2_mask,
                                   const MetaTensor& linear1_out,
                                   const MetaTensor& dropout1_out,
                                   const MetaTensor& dropout2_out,
                                   const MetaTensor& ln1_scale,
                                   const MetaTensor& ln1_bias,
                                   const MetaTensor& ln1_out,
                                   const MetaTensor& ln1_mean,
                                   const MetaTensor& ln1_variance,
                                   const MetaTensor& ln2_scale,
                                   const MetaTensor& ln2_bias,
                                   const MetaTensor& ln2_mean,
                                   const MetaTensor& ln2_variance,
                                   const MetaTensor& linear2_bias,
                                   bool pre_layer_norm,
                                   float ln1_epsilon,
                                   float ln2_epsilon,
                                   const std::string& act_method,
                                   float dropout1_prob,
                                   float dropout2_prob,
                                   const std::string& dropout1_implementation,
                                   const std::string& dropout2_implementation,
                                   bool is_test,
                                   bool dropout1_fix_seed,
                                   bool dropout2_fix_seed,
                                   int dropout1_seed_val,
                                   int dropout2_seed_val,
                                   bool add_residual,
                                   int ring_id,
                                   MetaTensor* x_grad,
                                   MetaTensor* linear1_weight_grad,
                                   MetaTensor* linear1_bias_grad,
                                   MetaTensor* linear2_weight_grad,
                                   MetaTensor* linear2_bias_grad,
                                   MetaTensor* ln1_scale_grad,
                                   MetaTensor* ln1_bias_grad,
                                   MetaTensor* ln2_scale_grad,
                                   MetaTensor* ln2_bias_grad);

void FusedGemmEpilogueInferMeta(const MetaTensor& x,
                                const MetaTensor& y,
                                const MetaTensor& bias,
                                bool trans_x,
                                bool trans_y,
                                const std::string& activation,
                                MetaTensor* out,
                                MetaTensor* reserve_space,
                                MetaConfig config = MetaConfig());

void FusedGemmEpilogueGradInferMeta(const MetaTensor& x,
                                    const MetaTensor& y,
                                    const MetaTensor& reserve_space,
                                    const MetaTensor& out_grad,
                                    bool trans_x,
                                    bool trans_y,
                                    const std::string& activation_grad,
                                    MetaTensor* x_grad,
                                    MetaTensor* y_grad,
                                    MetaTensor* bias_grad);

void FusedMultiTransformerXpuInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& ln_scale,
    const std::vector<const MetaTensor*>& ln_bias,
    const std::vector<const MetaTensor*>& qkvw,
    const std::vector<const MetaTensor*>& qkvw_max,
    const std::vector<const MetaTensor*>& qkv_bias,
    const std::vector<const MetaTensor*>& out_linear_w,
    const std::vector<const MetaTensor*>& out_linear_wmax,
    const std::vector<const MetaTensor*>& out_linear_bias,
    const std::vector<const MetaTensor*>& ffn_ln_scale,
    const std::vector<const MetaTensor*>& ffn_ln_bias,
    const std::vector<const MetaTensor*>& ffn1_weight,
    const std::vector<const MetaTensor*>& ffn1_weight_max,
    const std::vector<const MetaTensor*>& ffn1_bias,
    const std::vector<const MetaTensor*>& ffn2_weight,
    const std::vector<const MetaTensor*>& ffn2_weight_max,
    const std::vector<const MetaTensor*>& ffn2_bias,
    const std::vector<const MetaTensor*>& cache_kv,
    const std::vector<const MetaTensor*>& pre_caches,
    const MetaTensor& rotary_pos_emb,
    const MetaTensor& time_step,
    const MetaTensor& seq_lengths,
    const MetaTensor& src_mask,
    const MetaTensor& gather_index,
    const MetaTensor& max_buffer,
    bool pre_layer_norm,
    int rotary_emb_dims,
    float epsilon,
    float dropout_rate,
    bool is_test,
    const std::string& dropout_implementation,
    const std::string& act_method,
    bool trans_qkvw,
    int ring_id,
    int gather_axis,
    MetaTensor* out,
    std::vector<MetaTensor*> cache_kv_out);

void FusedMultiTransformerInt8XpuInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& ln_scale,
    const std::vector<const MetaTensor*>& ln_bias,
    const std::vector<const MetaTensor*>& qkv_in_max,
    const std::vector<const MetaTensor*>& qkvw,
    const std::vector<const MetaTensor*>& qkv_bias,
    const std::vector<const MetaTensor*>& qkv_scales,
    const std::vector<const MetaTensor*>& out_linear_in_max,
    const std::vector<const MetaTensor*>& out_linear_w,
    const std::vector<const MetaTensor*>& out_linear_bias,
    const std::vector<const MetaTensor*>& out_linear_scales,
    const std::vector<const MetaTensor*>& ffn_ln_scale,
    const std::vector<const MetaTensor*>& ffn_ln_bias,
    const std::vector<const MetaTensor*>& ffn1_in_max,
    const std::vector<const MetaTensor*>& ffn1_weight,
    const std::vector<const MetaTensor*>& ffn1_bias,
    const std::vector<const MetaTensor*>& ffn1_scales,
    const std::vector<const MetaTensor*>& ffn2_in_max,
    const std::vector<const MetaTensor*>& ffn2_weight,
    const std::vector<const MetaTensor*>& ffn2_bias,
    const std::vector<const MetaTensor*>& ffn2_scales,
    const std::vector<const MetaTensor*>& cache_kv,
    const std::vector<const MetaTensor*>& pre_caches,
    const MetaTensor& rotary_pos_emb,
    const MetaTensor& time_step,
    const MetaTensor& seq_lengths,
    const MetaTensor& src_mask,
    const MetaTensor& gather_index,
    const MetaTensor& max_buffer,
    bool pre_layer_norm,
    int rotary_emb_dims,
    float epsilon,
    float dropout_rate,
    bool is_test,
    const std::string& dropout_implementation,
    const std::string& act_method,
    bool trans_qkvw,
    int ring_id,
    int gather_axis,
    MetaTensor* out,
    std::vector<MetaTensor*> cache_kv_out);

void FusedMultiTransformerInt8InferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& ln_scale,
    const std::vector<const MetaTensor*>& ln_bias,
    const std::vector<const MetaTensor*>& qkv_w,
    const paddle::optional<std::vector<const MetaTensor*>>& qkv_bias,
    const paddle::optional<std::vector<const MetaTensor*>>& cache_kv,
    const MetaTensor& time_step,
    const MetaTensor& src_mask,
    const std::vector<const MetaTensor*>& out_linear_w,
    const paddle::optional<std::vector<const MetaTensor*>>& out_linear_bias,
    const std::vector<const MetaTensor*>& ffn_ln_scale,
    const std::vector<const MetaTensor*>& ffn_ln_bias,
    const std::vector<const MetaTensor*>& ffn1_weight,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn1_bias,
    const std::vector<const MetaTensor*>& ffn2_weight,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn2_bias,
    const paddle::optional<std::vector<const MetaTensor*>>& qkv_out_scale,
    const paddle::optional<std::vector<const MetaTensor*>>&
        out_linear_out_scale,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn1_out_scale,
    const paddle::optional<std::vector<const MetaTensor*>>& ffn2_out_scale,
    bool pre_layer_norm,
    float epsilon,
    float dropout_rate,
    bool is_test,
    const std::string& dropout_implementation,
    const std::string& act_method,
    bool trans_qkvw,
    int ring_id,
    int num_head,
    int dim_head,
    int dim_ffn,
    const std::vector<float>& qkv_in_scale,
    const std::vector<float>& out_linear_in_scale,
    const std::vector<float>& ffn1_in_scale,
    const std::vector<float>& ffn2_in_scale,
    int quant_round_type,
    float quant_max_bound,
    float quant_min_bound,
    std::vector<MetaTensor*> cache_kv_out,
    MetaTensor* out);

void YoloBoxXPUInferMeta(const MetaTensor& x,
                         const MetaTensor& x_max,
                         const MetaTensor& grid,
                         const MetaTensor& stride,
                         const MetaTensor& anchor_grid,
                         float offset,
                         MetaTensor* out,
                         MetaTensor* out_max);

void Conv2dTransposeXPUInferMeta(const MetaTensor& x,
                                 const MetaTensor& x_max,
                                 const MetaTensor& filter,
                                 const MetaTensor& filter_max,
                                 const MetaTensor& bias,
                                 const std::vector<int>& strides,
                                 const std::vector<int>& paddings,
                                 const std::vector<int>& output_padding,
                                 const IntArray& output_size,
                                 const std::string& padding_algorithm,
                                 int groups,
                                 const std::vector<int>& dilations,
                                 const std::string& data_format,
                                 bool has_bias,
                                 bool with_act,
                                 const std::string& act_type,
                                 MetaTensor* out,
                                 MetaTensor* out_max);

void FastWhereXPUInferMeta(const MetaTensor& condition,
                           const MetaTensor& x,
                           const MetaTensor& y,
                           MetaTensor* out);

void FastLayernormXPUInferMeta(const MetaTensor& x,
                               const MetaTensor& scale,
                               const MetaTensor& bias,
                               int begin_norm_axis,
                               float epsilon,
                               MetaTensor* out);

void BNActXPUInferMeta(const MetaTensor& x,
                       const MetaTensor& mean,
                       const MetaTensor& variance,
                       const MetaTensor& scale,
                       const MetaTensor& bias,
                       float momentum,
                       float epsilon,
                       const std::string& data_layout,
                       int act_type,
                       MetaTensor* y,
                       MetaConfig config = MetaConfig());

void AddCMulXPUInferMeta(const MetaTensor& x,
                         const MetaTensor& y,
                         const MetaTensor& w,
                         MetaTensor* out);

void LayerNormActXPUInferMeta(const MetaTensor& x,
                              const MetaTensor& scale,
                              const MetaTensor& bias,
                              int begin_norm_axis,
                              float epsilon,
                              int act_type,
                              float act_param,
                              MetaTensor* y);

void FusedScaleBiasReluConvBnInferMeta(const MetaTensor& x,
                                       const MetaTensor& w,
                                       const MetaTensor& scale,
                                       const MetaTensor& bias,
                                       const MetaTensor& bn_scale,
                                       const MetaTensor& bn_bias,
                                       const MetaTensor& input_running_mean,
                                       const MetaTensor& input_running_var,
                                       const std::vector<int>& paddings,
                                       const std::vector<int>& dilations,
                                       const std::vector<int>& strides,
                                       const std::string& padding_algorithm,
                                       int groups,
                                       const std::string& data_format,
                                       float momentum,
                                       float epsilon,
                                       bool fuse_prologue,
                                       bool exhaustive_search,
                                       int64_t accumulation_count,
                                       MetaTensor* out,
                                       MetaTensor* out_running_mean,
                                       MetaTensor* out_running_var,
                                       MetaTensor* saved_mean,
                                       MetaTensor* saved_var,
                                       MetaTensor* eq_scale,
                                       MetaTensor* eq_bias);

void FusedScaleBiasAddReluInferMeta(const MetaTensor& x1,
                                    const MetaTensor& scale1,
                                    const MetaTensor& bias1,
                                    const MetaTensor& x2,
                                    const MetaTensor& scale2,
                                    const MetaTensor& bias2,
                                    bool fuse_prologue,
                                    bool exhaustive_search,
                                    MetaTensor* out);

void FusedDconvDreluDbnInferMeta(const MetaTensor& grad_output,
                                 const MetaTensor& weight,
                                 const MetaTensor& grad_output_add,
                                 const MetaTensor& residual_input,
                                 const MetaTensor& bn1_eqscale,
                                 const MetaTensor& bn1_eqbias,
                                 const MetaTensor& conv_input,
                                 const MetaTensor& bn1_mean,
                                 const MetaTensor& bn1_inv_std,
                                 const MetaTensor& bn1_gamma,
                                 const MetaTensor& bn1_beta,
                                 const MetaTensor& bn1_input,
                                 const MetaTensor& bn2_mean,
                                 const MetaTensor& bn2_inv_std,
                                 const MetaTensor& bn2_gamma,
                                 const MetaTensor& bn2_beta,
                                 const MetaTensor& bn2_input,
                                 const std::vector<int>& paddings,
                                 const std::vector<int>& dilations,
                                 const std::vector<int>& strides,
                                 const std::string& padding_algorithm,
                                 int groups,
                                 const std::string& data_format,
                                 bool fuse_shortcut,
                                 bool fuse_dual,
                                 bool fuse_add,
                                 bool exhaustive_search,
                                 MetaTensor* grad_weight,
                                 MetaTensor* grad_bn1_input,
                                 MetaTensor* grad_bn1_gamma,
                                 MetaTensor* grad_bn1_beta,
                                 MetaTensor* grad_bn2_input,
                                 MetaTensor* grad_bn2_gamma,
                                 MetaTensor* grad_bn2_beta);

void SqueezeExcitationInferMeta(const MetaTensor& x,
                                const MetaTensor& filter,
                                const MetaTensor& filter_max,
                                const MetaTensor& bias,
                                const MetaTensor& branch,
                                const std::vector<int>& act_type,
                                const std::vector<float>& act_param,
                                const std::vector<int>& filter_dims,
                                MetaTensor* out);

void FusedEmbeddingEltWiseLayerNormInferMeta(
    const std::vector<const MetaTensor*>& ids,
    const std::vector<const MetaTensor*>& embs,
    const MetaTensor& bias,
    const MetaTensor& scale,
    const float epsilon,
    MetaTensor* out);

void FusionTransposeFlattenConcatInferMeta(
    const std::vector<const MetaTensor*>& x,
    const std::vector<int>& trans_axis,
    const int flatten_axis,
    const int concat_axis,
    MetaTensor* out);

void FusedFCElementwiseLayerNormInferMeta(const MetaTensor& x,
                                          const MetaTensor& w,
                                          const MetaTensor& y,
                                          const MetaTensor& bias0,
                                          const MetaTensor& scale,
                                          const MetaTensor& bias1,
                                          const int x_num_col_dims,
                                          const std::string& activation_type,
                                          const float epsilon,
                                          const int begin_norm_axis,
                                          MetaTensor* out,
                                          MetaTensor* mean,
                                          MetaTensor* variance,
                                          MetaConfig config = MetaConfig());

void FusedConv2dAddActInferMeta(const MetaTensor& input,
                                const MetaTensor& filter,
                                const MetaTensor& bias,
                                const MetaTensor& residual_data,
                                const std::vector<int>& strides,
                                const std::vector<int>& paddings,
                                const std::string& padding_algorithm,
                                const std::vector<int>& dilations,
                                int groups,
                                const std::string& data_format,
                                const std::string& activation,
                                const std::vector<int>& split_channels,
                                MetaTensor* output,
                                std::vector<MetaTensor*> outputs,
                                MetaConfig config);
void FusionRepeatedFCReluInferMeta(const MetaTensor& x,
                                   const std::vector<const MetaTensor*>& w,
                                   const std::vector<const MetaTensor*>& bias,
                                   std::vector<MetaTensor*> relu_out,
                                   MetaTensor* out);

void FusionSquaredMatSubInferMeta(const MetaTensor& x,
                                  const MetaTensor& y,
                                  const float scalar,
                                  MetaTensor* squared_x,
                                  MetaTensor* squared_y,
                                  MetaTensor* squared_xy,
                                  MetaTensor* out);

void FusionGRUInferMeta(const MetaTensor& x,
                        const MetaTensor& h0,
                        const MetaTensor& weight_x,
                        const MetaTensor& weight_h,
                        const MetaTensor& bias,
                        const std::string& activation,
                        const std::string& gate_activation,
                        const bool is_reverse,
                        const bool use_seq,
                        const bool origin_mode,
                        const bool force_fp32_output,
                        MetaTensor* reordered_h0,
                        MetaTensor* xx,
                        MetaTensor* batched_input,
                        MetaTensor* batched_out,
                        MetaTensor* hidden);

void FusionSeqConvEltAddReluInferMeta(const MetaTensor& x,
                                      const MetaTensor& filter,
                                      const MetaTensor& bias,
                                      const int context_length,
                                      const int context_start,
                                      const int context_stride,
                                      MetaTensor* out,
                                      MetaTensor* col_mat);

void FusionSeqExpandConcatFCInferMeta(const std::vector<const MetaTensor*>& x,
                                      const MetaTensor& fc_weight,
                                      const MetaTensor& fc_bias,
                                      const std::string& fc_activation,
                                      MetaTensor* out,
                                      MetaTensor* fc_out);

void FusedBiasDropoutResidualLnInferMeta(
    const MetaTensor& x,
    const MetaTensor& residual,
    const MetaTensor& bias,
    const MetaTensor& ln_scale,
    const MetaTensor& ln_bias,
    const float dropout_rate,
    const bool is_test,
    const bool dropout_fix_seed,
    const int dropout_seed,
    const std::string& dropout_implementation,
    const float ln_epsilon,
    MetaTensor* y,
    MetaTensor* bias_dropout_residual_out,
    MetaTensor* dropout_mask_out,
    MetaTensor* ln_mean,
    MetaTensor* ln_variance);

void FusedBiasDropoutResidualLnGradInferMeta(
    const MetaTensor& x,
    const MetaTensor& residual,
    const MetaTensor& bias,
    const MetaTensor& ln_scale,
    const MetaTensor& ln_bias,
    const MetaTensor& ln_mean,
    const MetaTensor& ln_variance,
    const MetaTensor& bias_dropout_residual_out,
    const MetaTensor& dropout_mask_out,
    const MetaTensor& y_grad,
    const float dropout_rate,
    const bool is_test,
    const bool dropout_fix_seed,
    const int dropout_seed,
    const std::string& dropout_implementation,
    const float ln_epsilon,
    MetaTensor* x_grad,
    MetaTensor* residual_grad,
    MetaTensor* bias_grad,
    MetaTensor* ln_scale_grad,
    MetaTensor* ln_bias_grad);

void FusedDotProductAttentionInferMeta(const MetaTensor& q,
                                       const MetaTensor& k,
                                       const MetaTensor& v,
                                       const MetaTensor& bias,
                                       MetaTensor* out,
                                       MetaTensor* softmax_out,
                                       MetaTensor* rng_state);

void FusedDotProductAttentionGradInferMeta(const MetaTensor& q,
                                           const MetaTensor& k,
                                           const MetaTensor& v,
                                           const MetaTensor& bias,
                                           MetaTensor* q_grad,
                                           MetaTensor* k_grad,
                                           MetaTensor* v_grad,
                                           MetaTensor* bias_grad);

void SkipLayerNormInferMeta(const MetaTensor& x,
                            const MetaTensor& y,
                            const MetaTensor& scale,
                            const MetaTensor& bias,
                            const float epsilon,
                            const int begin_norm_axis,
                            MetaTensor* out);

void SelfDPAttenInferMeta(const MetaTensor& x,
                          const float alpha,
                          const int head_number,
                          MetaTensor* out);

void FCInferMeta(const MetaTensor& input,
                 const MetaTensor& w,
                 const MetaTensor& bias,
                 const int in_num_col_dims,
                 const std::string& activation_type,
                 const bool padding_weights,
                 MetaTensor* out);

void FCOneDNNInferMeta(const MetaTensor& input,
                       const MetaTensor& w,
                       const MetaTensor& bias,
                       const int in_num_col_dims,
                       const std::string& activation_type,
                       const bool padding_weights,
                       const std::vector<int>& fused_reshape2_shape,
                       MetaTensor* out);

void VariableLengthMemoryEfficientAttentionInferMeta(
    const MetaTensor& query,
    const MetaTensor& key,
    const MetaTensor& value,
    const MetaTensor& seq_lens,
    const MetaTensor& kv_seq_lens,
    const MetaTensor& mask,
    float scale,
    bool causal,
    int pre_cache_length,
    MetaTensor* out);

void QKVAttentionXPUInferMeta(const MetaTensor& q,
                              const MetaTensor& k,
                              const MetaTensor& v,
                              const MetaTensor& q_max,
                              const MetaTensor& k_max,
                              const MetaTensor& v_max,
                              const MetaTensor& qk_max,
                              const MetaTensor& qkv_max,
                              float alpha,
                              int head_num,
                              int head_dim,
                              bool qkv_fc_fusion,
                              DataType out_dtype,
                              MetaTensor* qkv);
void SinePosXPUInferMeta(const MetaTensor& x,
                         const MetaTensor& y,
                         MetaTensor* out);
void Pad2dXPUInferMeta(const MetaTensor& x,
                       const std::vector<int>& paddings,
                       const std::string& mode,
                       float pad_value,
                       const std::string& data_format,
                       MetaTensor* out);
void RoformerRelativePosXPUInferMeta(const MetaTensor& x,
                                     const MetaTensor& sin_emb,
                                     const MetaTensor& cos_emb,
                                     int max_pos_len,
                                     MetaTensor* out);
void CrossAttentionXPUInferMeta(
    const MetaTensor& input_q,
    const MetaTensor& input_kv,
    const std::vector<const MetaTensor*>& fc_weight,
    const std::vector<const MetaTensor*>& fc_weight_max,
    const std::vector<const MetaTensor*>& fc_bias,
    const MetaTensor& mask,
    int head_num,
    int head_dim,
    float alpha,
    DataType out_dtype,
    MetaTensor* qkv,
    MetaTensor* qkv_max);

void MultiGruInferMeta(
    const MetaTensor& x,
    const std::vector<const MetaTensor*>& weight_x,
    const std::vector<const MetaTensor*>& weight_h,
    const paddle::optional<std::vector<const MetaTensor*>>& bias,
    const paddle::optional<std::vector<const MetaTensor*>>& scale_weights,
    const std::string& activation,
    const std::string& gate_activation,
    int layers,
    bool origin_mode,
    const std::string& mkldnn_data_type,
    float scale_data,
    float shift_data,
    bool force_fp32_output,
    MetaTensor* hidden);

void MaskAdaptiveXPUInferMeta(const MetaTensor& mask,
                              MetaTensor* length,
                              MetaTensor* seq_lod,
                              MetaTensor* pad_seq_len);

void SequenceUnpadXPUInferMeta(const MetaTensor& x,
                               const MetaTensor& length,
                               MetaTensor* out);

void FusionLstmInferMeta(const MetaTensor& x,
                         const MetaTensor& weight_x,
                         const MetaTensor& weight_h,
                         const MetaTensor& bias,
                         const MetaTensor& h0,
                         const MetaTensor& c0,
                         const bool use_peepholes,
                         const bool is_reverse,
                         const bool use_seq,
                         const std::string& gate_activation,
                         const std::string& cell_activation,
                         const std::string& candidate_activation,
                         const float scale_data,
                         const float shift_data,
                         const std::vector<float>& scale_weights,
                         const bool force_fp32_output,
                         MetaTensor* hidden,
                         MetaTensor* cell,
                         MetaTensor* xx,
                         MetaTensor* batched_input,
                         MetaTensor* batched_hidden,
                         MetaTensor* batched_cell,
                         MetaTensor* reordered_h0,
                         MetaTensor* reordered_c0,
                         MetaTensor* checked_cell);

void FusionSeqpoolCvmConcatInferMeta(const std::vector<const MetaTensor*>& x,
                                     const MetaTensor& cvm,
                                     const std::string& pooltype,
                                     bool use_cvm,
                                     int axis,
                                     MetaTensor* out,
                                     MetaConfig config = MetaConfig());

void FusedTokenPruneInferMeta(const MetaTensor& attn,
                              const MetaTensor& x,
                              const MetaTensor& mask,
                              const MetaTensor& new_mask,
                              bool keep_first_token,
                              bool keep_order,
                              MetaTensor* slimmed_x,
                              MetaTensor* cls_inds);

void FusedElemwiseActivationInferMeta(
    const MetaTensor& x,
    const MetaTensor& y,
    const std::vector<std::string>& functor_list,
    int axis,
    float scale,
    bool save_intermediate_out,
    MetaTensor* out,
    MetaTensor* intermediate_out,
    MetaConfig config = MetaConfig());

void FusedElemwiseActivationGradInferMeta(
    const MetaTensor& x,
    const MetaTensor& y,
    const MetaTensor& out,
    const MetaTensor& intermediate_out,
    const MetaTensor& out_grad,
    const std::vector<std::string>& functor_list,
    int axis,
    float scale,
    bool save_intermediate_out,
    MetaTensor* x_grad,
    MetaTensor* y_grad,
    MetaConfig config = MetaConfig());

void FP8OutHalfGemmFusedInferMeta(
    const MetaTensor& x,
    const MetaTensor& y,
    const MetaTensor& bias,
    const bool trans_x,
    const bool trans_y,
    const float scale,  // only support per-tensor quantization
    const std::string& output_dtype,
    const std::string& activation_type,
    MetaTensor* out);

void FusedEmbeddingFcLstmInferMeta(const MetaTensor& ids,
                                   const MetaTensor& embeddings,
                                   const MetaTensor& weight_h,
                                   const MetaTensor& bias,
                                   const MetaTensor& h0,
                                   const MetaTensor& c0,
                                   bool use_peepholes,
                                   bool is_reverse,
                                   bool use_seq,
                                   const std::string& gate_activation,
                                   const std::string& cell_activation,
                                   const std::string& candidate_activation,
                                   MetaTensor* hidden,
                                   MetaTensor* cell,
                                   MetaTensor* x_x,
                                   MetaTensor* batched_input,
                                   MetaTensor* batched_hidden,
                                   MetaTensor* batched_cell,
                                   MetaTensor* reordered_h0,
                                   MetaTensor* reordered_c0);

void FusedSeqpoolCvmInferMeta(const std::vector<const MetaTensor*>& x,
                              const MetaTensor& cvm,
                              const std::string& pooltype,
                              float pad_value,
                              bool use_cvm,
                              int cvm_offset,
                              std::vector<MetaTensor*> out,
                              MetaConfig config = MetaConfig());

void FusedSeqpoolCvmGradInferMeta(
    const std::vector<const MetaTensor*>& x,
    const MetaTensor& cvm,
    const std::vector<const MetaTensor*>& out_grad,
    const std::string& pooltype,
    float pad_value,
    bool use_cvm,
    int cvm_offset,
    std::vector<MetaTensor*> x_grad,
    MetaTensor* cvm_grad,
    MetaConfig config = MetaConfig());

void FusionSeqpoolConcatInferMeta(const std::vector<const MetaTensor*>& x,
                                  const std::string& pooltype,
                                  int axis,
                                  MetaTensor* out,
                                  MetaConfig config = MetaConfig());

void ResnetUnitInferMeta(const MetaTensor& x,
                         const MetaTensor& filter_x,
                         const MetaTensor& scale_x,
                         const MetaTensor& bias_x,
                         const MetaTensor& mean_x,
                         const MetaTensor& var_x,
                         const MetaTensor& z,
                         const MetaTensor& filter_z,
                         const MetaTensor& scale_z,
                         const MetaTensor& bias_z,
                         const MetaTensor& mean_z,
                         const MetaTensor& var_z,
                         int stride,
                         int stride_z,
                         int padding,
                         int dilation,
                         int group,
                         float momentum,
                         float epsilon,
                         const std::string& data_format,
                         bool fuse_add,
                         bool has_shortcut,
                         bool use_global_stats,
                         bool is_test,
                         bool use_addto,
                         const std::string& act_type,
                         MetaTensor* out,
                         MetaTensor* bit_mask,
                         MetaTensor* conv_x,
                         MetaTensor* saved_mean_x,
                         MetaTensor* saved_invstd_x,
                         MetaTensor* running_mean_x,
                         MetaTensor* running_var_x,
                         MetaTensor* conv_z,
                         MetaTensor* saved_mean_z,
                         MetaTensor* saved_invstd_z,
                         MetaTensor* running_mean_z,
                         MetaTensor* running_var_z);

void ResnetUnitGradInferMeta(const MetaTensor& x,
                             const MetaTensor& filter_x,
                             const MetaTensor& conv_x,
                             const MetaTensor& scale_x,
                             const MetaTensor& bias_x,
                             const MetaTensor& saved_mean_x,
                             const MetaTensor& saved_invstd_x,
                             const MetaTensor& z,
                             const MetaTensor& filter_z,
                             const MetaTensor& conv_z,
                             const MetaTensor& scale_z,
                             const MetaTensor& bias_z,
                             const MetaTensor& saved_mean_z,
                             const MetaTensor& saved_invstd_z,
                             const MetaTensor& out,
                             const MetaTensor& bit_mask,
                             const MetaTensor& out_grad,
                             int stride,
                             int stride_z,
                             int padding,
                             int dilation,
                             int group,
                             float momentum,
                             float epsilon,
                             const std::string& data_format,
                             bool fuse_add,
                             bool has_shortcut,
                             bool use_global_stats,
                             bool is_test,
                             bool use_addto,
                             const std::string& act_type,
                             MetaTensor* x_grad,
                             MetaTensor* filter_x_grad,
                             MetaTensor* scale_x_grad,
                             MetaTensor* bias_x_grad,
                             MetaTensor* z_grad,
                             MetaTensor* filter_z_grad,
                             MetaTensor* scale_z_grad,
                             MetaTensor* bias_z_grad);

void FusedGateAttentionInferMeta(const MetaTensor& query,
                                 const MetaTensor& key,
                                 const MetaTensor& query_weight,
                                 const MetaTensor& key_weight,
                                 const MetaTensor& value_weight,
                                 const MetaTensor& qkv_weight,
                                 const MetaTensor& nonbatched_bias,
                                 const MetaTensor& src_mask,
                                 const MetaTensor& gate_weight,
                                 const MetaTensor& gate_bias,
                                 const MetaTensor& out_linear_weight,
                                 const MetaTensor& out_linear_bias,
                                 bool has_gating,
                                 bool merge_qkv,
                                 bool use_flash_attn,
                                 MetaTensor* query_transpose_out,
                                 MetaTensor* key_transpose_out,
                                 MetaTensor* value_transpose_out,
                                 MetaTensor* qkv_transpose_out,
                                 MetaTensor* softmax_out,
                                 MetaTensor* softmax_lse,
                                 MetaTensor* fmha_out,
                                 MetaTensor* gate_out,
                                 MetaTensor* out,
                                 MetaConfig config = MetaConfig());

void FusedGateAttentionGradInferMeta(const MetaTensor& query,
                                     const MetaTensor& key,
                                     const MetaTensor& query_weight,
                                     const MetaTensor& key_weight,
                                     const MetaTensor& value_weight,
                                     const MetaTensor& qkv_weight,
                                     const MetaTensor& nonbatched_bias,
                                     const MetaTensor& src_mask,
                                     const MetaTensor& gate_weight,
                                     const MetaTensor& gate_bias,
                                     const MetaTensor& out_linear_weight,
                                     const MetaTensor& out_linear_bias,
                                     const MetaTensor& query_transpose_out,
                                     const MetaTensor& key_transpose_out,
                                     const MetaTensor& value_transpose_out,
                                     const MetaTensor& qkv_transpose_out,
                                     const MetaTensor& softmax_out,
                                     const MetaTensor& softmax_lse,
                                     const MetaTensor& fmha_out,
                                     const MetaTensor& gate_out,
                                     const MetaTensor& out_grad,
                                     bool has_gating,
                                     bool merge_qkv,
                                     bool use_flash_attn,
                                     MetaTensor* query_grad,
                                     MetaTensor* key_grad,
                                     MetaTensor* query_weight_grad,
                                     MetaTensor* key_weight_grad,
                                     MetaTensor* value_weight_grad,
                                     MetaTensor* qkv_weight_grad,
                                     MetaTensor* nonbatched_bias_grad,
                                     MetaTensor* gate_weight_grad,
                                     MetaTensor* gate_bias_grad,
                                     MetaTensor* out_linear_weight_grad,
                                     MetaTensor* out_linear_bias_grad,
                                     MetaConfig config = MetaConfig());

void ResnetBasicBlockInferMeta(const MetaTensor& x,
                               const MetaTensor& filter1,
                               const MetaTensor& scale1,
                               const MetaTensor& bias1,
                               const MetaTensor& mean1,
                               const MetaTensor& var1,
                               const MetaTensor& filter2,
                               const MetaTensor& scale2,
                               const MetaTensor& bias2,
                               const MetaTensor& mean2,
                               const MetaTensor& var2,
                               const MetaTensor& filter3,
                               const MetaTensor& scale3,
                               const MetaTensor& bias3,
                               const MetaTensor& mean3,
                               const MetaTensor& var3,
                               int stride1,
                               int stride2,
                               int stride3,
                               int padding1,
                               int padding2,
                               int padding3,
                               int dilation1,
                               int dilation2,
                               int dilation3,
                               int group,
                               float momentum,
                               float epsilon,
                               const std::string& data_format,
                               bool has_shortcut,
                               bool use_global_stats,
                               bool is_test,
                               bool trainable_statistics,
                               const std::string& act_type,
                               bool find_conv_input_max,
                               MetaTensor* out,
                               MetaTensor* conv1,
                               MetaTensor* saved_mean1,
                               MetaTensor* saved_invstd1,
                               MetaTensor* mean1_out,
                               MetaTensor* var1_out,
                               MetaTensor* conv2,
                               MetaTensor* conv2_input,
                               MetaTensor* saved_mean2,
                               MetaTensor* saved_invstd2,
                               MetaTensor* mean2_out,
                               MetaTensor* var2_out,
                               MetaTensor* conv3,
                               MetaTensor* saved_mean3,
                               MetaTensor* saved_invstd3,
                               MetaTensor* mean3_out,
                               MetaTensor* var3_out,
                               MetaTensor* max_input1,
                               MetaTensor* max_filter1,
                               MetaTensor* max_input2,
                               MetaTensor* max_filter2,
                               MetaTensor* max_input3,
                               MetaTensor* max_filter3,
                               MetaConfig config = MetaConfig());

void ResnetBasicBlockGradInferMeta(const MetaTensor& x,
                                   const MetaTensor& filter1,
                                   const MetaTensor& conv1,
                                   const MetaTensor& scale1,
                                   const MetaTensor& bias1,
                                   const MetaTensor& saved_mean1,
                                   const MetaTensor& saved_invstd1,
                                   const MetaTensor& filter2,
                                   const MetaTensor& conv2,
                                   const MetaTensor& conv2_input,
                                   const MetaTensor& scale2,
                                   const MetaTensor& bias2,
                                   const MetaTensor& saved_mean2,
                                   const MetaTensor& saved_invstd2,
                                   const MetaTensor& filter3,
                                   const MetaTensor& conv3,
                                   const MetaTensor& scale3,
                                   const MetaTensor& bias3,
                                   const MetaTensor& saved_mean3,
                                   const MetaTensor& saved_invstd3,
                                   const MetaTensor& max_input1,
                                   const MetaTensor& max_filter1,
                                   const MetaTensor& max_input2,
                                   const MetaTensor& max_filter2,
                                   const MetaTensor& max_input3,
                                   const MetaTensor& max_filter3,
                                   const MetaTensor& out,
                                   const MetaTensor& out_grad,
                                   int stride1,
                                   int stride2,
                                   int stride3,
                                   int padding1,
                                   int padding2,
                                   int padding3,
                                   int dilation1,
                                   int dilation2,
                                   int dilation3,
                                   int group,
                                   float momentum,
                                   float epsilon,
                                   const std::string& data_format,
                                   bool has_shortcut,
                                   bool use_global_stats,
                                   bool is_test,
                                   bool trainable_statistics,
                                   const std::string& act_type,
                                   bool find_conv_input_max,
                                   MetaTensor* x_grad,
                                   MetaTensor* filter1_grad,
                                   MetaTensor* scale1_grad,
                                   MetaTensor* bias1_grad,
                                   MetaTensor* filter2_grad,
                                   MetaTensor* scale2_grad,
                                   MetaTensor* bias2_grad,
                                   MetaTensor* filter3_grad,
                                   MetaTensor* scale3_grad,
                                   MetaTensor* bias3_grad,
                                   MetaConfig config = MetaConfig());

}  // namespace phi
