// my_layer.cpp
#include <torch/extension.h>
#include <torch/library.h>

torch::Tensor my_gemv_cuda(const torch::Tensor &A, const torch::Tensor &x);

class MyGemvAutograd : public torch::autograd::Function<MyGemvAutograd>
{
public:
    static torch::Tensor forward(torch::autograd::AutogradContext *ctx, torch::Tensor A, torch::Tensor x)
    {
        ctx->save_for_backward({A, x});
        return my_gemv_cuda(A, x);  // 或者 CPU 分支
    }

    static torch::autograd::tensor_list backward(torch::autograd::AutogradContext *ctx, torch::autograd::tensor_list grad_outputs)
    {
        auto saved = ctx->get_saved_variables();
        auto A = saved[0], x = saved[1];
        auto grad_y = grad_outputs[0];  // (M,)

        // grad_A = grad_y.unsqueeze(1) * x.unsqueeze(0)  ==> (M, K)
        auto grad_A = grad_y.unsqueeze(1) * x.unsqueeze(0);
        // grad_x = A^T @ grad_y
        auto grad_x = torch::matmul(A.t(), grad_y);
        return {grad_A, grad_x};
    }
};

// 给 Python 暴露的 functional 接口
torch::Tensor my_gemv(const torch::Tensor &A, const torch::Tensor &x) { return MyGemvAutograd::apply(A, x); }

/* ========== nn.Module 封装 ========== */
struct MyLinearImpl : public torch::nn::Module
{
    MyLinearImpl(int64_t in_features, int64_t out_features)
        : weight(register_parameter("weight", torch::zeros({out_features, in_features}))),
          bias(register_parameter("bias", torch::zeros({out_features})))
    {
        torch::nn::init::kaiming_uniform_(weight, std::sqrt(5));
    }

    torch::Tensor forward(const torch::Tensor &x)
    {
        // y = x @ weight.t() + bias
        return my_gemv(weight, x) + bias;  // 复用刚才写的 kernel
    }

    torch::Tensor weight, bias;
};

// TORCH_MODULE_IMPL(MyLinear, MyLinearImpl);
TORCH_MODULE(MyLinear);

/* ========== pybind11 绑定 ========== */
#if 10
PYBIND11_MODULE(my_layer, m)
{
    m.def("my_gemv", &my_gemv, "custom gemv");

    py::class_<MyLinear::Impl, std::shared_ptr<MyLinear::Impl>>(m, "MyLinear")
        .def(py::init<int64_t, int64_t>())
        // .def("cuda", &MyLinear::Impl::to)
        .def("forward", &MyLinear::Impl::forward)
        .def("parameters", &MyLinear::Impl::parameters);
}
#endif

#if 0
TORCH_LIBRARY(my_layer, m)
{
    // m.def("my_gemv", &my_gemv, "custom gemv");

    m.class_<MyLinear::Impl, std::shared_ptr<MyLinear::Impl>>(m, "MyLinear")
        .def(py::init<int64_t, int64_t>())
        .def("forward", &MyLinear::Impl::forward)
        .def("parameters", &MyLinear::Impl::parameters);
}
#endif