#include <torch/extension.h>

// feats:(N,8,F)
// points:(N,3)
// output:(N,F)

// global 表示它是在CPU上叫出来，GPU上执行 还有__host__表示在CPU上执行，__device__表示在GPU上调出来
template <typename scalar_t>
__global__ void trilinear_fw_kernel(
    const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> feats,
    const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> points,
    torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> feat_interp
){
    const int n = blockIdx.x * blockIdx.x + threadIdx.x;
    const int f = blockIdx.y * blockDim.y + threadIdx.y;

    if (n>=feats.size(0) || f>=feats.size(2)) return;

    //平行运算

    //points -1~1
    const scalar_t u = (points[n][0]+1)/2;
    const scalar_t v = (points[n][1]+1)/2;
    const scalar_t w = (points[n][2]+1)/2;

    const scalar_t a = (1-v)*(1-w);
    const scalar_t b = (1-v)*w;
    const scalar_t c = v*(1-w);
    const scalar_t d = 1-a-b-c;
    feat_interp[n][f] = (1-u)*(a*feats[n][0][f] +
                               b*feats[n][1][f] +
                               c*feats[n][2][f] +
                               d*feats[n][3][f]) + 
                            u*(a*feats[n][4][f] +
                               b*feats[n][5][f] +
                               c*feats[n][6][f] +
                               d*feats[n][7][f]);


}



torch::Tensor trilinear_fw_cu(
    torch::Tensor feats,
    torch::Tensor points
){
    const int N = feats.size(0), F = feats.size(2);

    //  先生成一个空的output
    // feat_interp = torch.zeros(N,F,dtype=torch.float32, device='cuda:1')
    // torch::zeros({N,F}, torch::dtype(torch::kInt32).device(feats.device));
    torch::Tensor feat_interp = torch::zeros({N,F}, feats.options());

    const dim3 threads(16,16);   //256
    const dim3 blocks((N+threads.x-1)/threads.x, (F+threads.y-1)/threads.y);  //paste

    AT_DISPATCH_FLOATING_TYPES(feats.type(), "trilinear_fw_cu",
    ([&]{
        trilinear_fw_kernel<scalar_t><<<blocks, threads>>>(
            // 把tensor转成packed_accessor才能进kernel, scalar_t表示未定的数据类型
            // 可填float32,3是shape的长度,RPT表示不会和其他有交集，
            feats.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
            points.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(),
            feat_interp.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>()
        );
    }));

    return feat_interp;
}