// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved

#pragma once

#include <torch/extension.h>

#include "gating_attention.h"

namespace alphafold {
inline void bind(pybind11::module &m)
{
    auto submodule = m.def_submodule("alphafold");
    py::class_<GatingAttentionWeight>(submodule, "GatingAttentionWeight")
        .def(py::init<at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
                 at::Tensor &>(),
            py::arg("query_w"), py::arg("key_w"), py::arg("value_w"), py::arg("gate_w"), py::arg("gate_b"),
            py::arg("output_w"), py::arg("output_b"));
    submodule.def("gating_attention", &gating_attention, py::arg("q_data"), py::arg("m_data"), py::arg("bias"),
        py::arg("nonbatched_bias"), py::arg("weights"), py::arg("block_size") = std::nullopt);
}
}