#ifndef NPU_TRITON_FUSED_GDN_GATING_H
#define NPU_TRITON_FUSED_GDN_GATING_H

#include <torch/torch.h>

namespace TritonTorch {

/**
 * @brief Launch the fused_gdn_gating kernel on NPU device
 * 
 * This kernel computes:
 *   g = -exp(A_log) * softplus(a + dt_bias)
 *   beta_output = sigmoid(b)
 * 
 * @param A_log Log of A parameter, shape: (num_heads), dtype: float32
 * @param a Input tensor a, shape: (batch, num_heads), dtype: float16
 * @param b Input tensor b, shape: (batch, num_heads), dtype: float16
 * @param dt_bias Bias tensor, shape: (num_heads), dtype: float32
 * @param beta Softplus beta parameter (default: 1.0)
 * @param threshold Softplus threshold parameter (default: 20.0)
 * @return std::pair<torch::Tensor, torch::Tensor> Pair of (g, beta_output) tensors
 *         g: shape (1, batch, num_heads), dtype: float32
 *         beta_output: shape (1, batch, num_heads), dtype: same as b
 */
std::pair<torch::Tensor, torch::Tensor> launch_fused_gdn_gating_kernel(
    const torch::Tensor& A_log,
    const torch::Tensor& a,
    const torch::Tensor& b,
    const torch::Tensor& dt_bias,
    float beta = 1.0f,
    float threshold = 20.0f
);

} // namespace TritonTorch

#endif // NPU_TRITON_FUSED_GDN_GATING_H

