#include <torch/extension.h>
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor. ")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous. ")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

void test_launch(const float* const data1_ptr,
    const float* const data2_ptr,
    float* const data_ptr,
    const int rows,
    const int cols);

inline void test(torch::Tensor input1,torch::Tensor input2,torch::Tensor output){
    CHECK_INPUT(input1)
    CHECK_INPUT(input2)
    CHECK_INPUT(output)
    int rows = output.size(0);
    int cols = output.size(1);
    printf("rows=%d,cols=%d\n",rows,cols);
    const float* const data1_ptr = input1.data_ptr<float>();
    const float* const data2_ptr = input2.data_ptr<float>();
    float* const data_ptr = output.data_ptr<float>();
    test_launch(data1_ptr, data2_ptr,data_ptr,rows,cols);
    return ;
}

// 使用PYBIND11_MODULE宏创建一个Python模块, 第一个test为python函数名,第二个为被绑定的cpp的函数名
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("test", &test, "test descripition");
}


