#include <torch/extension.h>
#include <ATen/ATen.h>
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor. ")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous. ")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

void sigmoid_forward_launch(const float* const data_ptr,
    float* const output_ptr,
    const int rows,
    const int cols);

void sigmoid_backward_launch(const float* const data_ptr,
    float* const output_ptr,
    const int rows,
    const int cols);

inline torch::Tensor sigmoid_forward(torch::Tensor input){
    CHECK_INPUT(input)
    int rows = input.size(0);
    int cols = input.size(1);
    torch::Tensor output = torch::zeros_like(input);
    const float* const data_ptr = input.data_ptr<float>();
    float* const output_ptr = output.data_ptr<float>();
    sigmoid_forward_launch(data_ptr,output_ptr,rows,cols);
    return output;
}

inline torch::Tensor sigmoid_backward(torch::Tensor input){
    CHECK_INPUT(input)
    int rows = input.size(0);
    int cols = input.size(1);
    torch::Tensor output = torch::zeros_like(input);
    const float* const data_ptr = input.data_ptr<float>();
    float* const output_ptr = output.data_ptr<float>();
    sigmoid_backward_launch(data_ptr,output_ptr,rows,cols);
    return output;
}

// 使用PYBIND11_MODULE宏创建一个Python模块, 第一个test为python函数名,第二个为被绑定的cpp的函数名
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("sigmoid_forward", &sigmoid_forward, "descripition");
  m.def("sigmoid_backward", &sigmoid_backward, "descripition");
}


