/* Furthest point sampling
 * Original author: Haoqiang Fan
 * Modified by Charles R. Qi
 * All Rights Reserved. 2017.
 */
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include <cuda_runtime.h>

using namespace tensorflow;

REGISTER_OP("ProbSample")
    .Input("inp: float32")
    .Input("inpr: float32")
    .Output("out: int32")
    .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
        ::tensorflow::shape_inference::ShapeHandle
            dims1;  // batch_size * ncategory
        TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &dims1));
        ::tensorflow::shape_inference::ShapeHandle
            dims2;  // batch_size * npoints
        TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &dims2));
        // batch_size * npoints
        ::tensorflow::shape_inference::ShapeHandle output =
            c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1)});
        c->set_output(0, output);
        return Status::OK();
    });
REGISTER_OP("FarthestPointSample")
    .Attr("npoint: int")
    .Input("inp: float32")
    .Output("out: int32")
    .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
        ::tensorflow::shape_inference::ShapeHandle
            dims1;  // batch_size * npoint * 3
        TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &dims1));
        int npoint;
        TF_RETURN_IF_ERROR(c->GetAttr("npoint", &npoint));
        ::tensorflow::shape_inference::ShapeHandle output =
            c->MakeShape({c->Dim(dims1, 0), npoint});
        c->set_output(0, output);
        return Status::OK();
    });
REGISTER_OP("GatherPoint")
    .Input("inp: float32")
    .Input("idx: int32")
    .Output("out: float32")
    .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
        ::tensorflow::shape_inference::ShapeHandle
            dims1;  // batch_size * ndataset * 3
        TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &dims1));
        ::tensorflow::shape_inference::ShapeHandle
            dims2;  // batch_size * npoints
        TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &dims2));
        // batch_size * npoints * 3
        ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape(
            {c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)});
        c->set_output(0, output);
        return Status::OK();
    });
REGISTER_OP("GatherPointGrad")
    .Input("inp: float32")
    .Input("idx: int32")
    .Input("out_g: float32")
    .Output("inp_g: float32")
    .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
        c->set_output(0, c->input(0));
        return Status::OK();
    });

void probsampleLauncher(int b, int n, int m, const float* inp_p,
                        const float* inp_r, float* temp, int* out);
class ProbSampleGpuOp : public OpKernel {
   public:
    explicit ProbSampleGpuOp(OpKernelConstruction* context)
        : OpKernel(context) {}
    void Compute(OpKernelContext* context) override {
        const Tensor& inp_tensor = context->input(0);
        const Tensor& inpr_tensor = context->input(1);
        auto inp_flat = inp_tensor.flat<float>();
        auto inpr_flat = inpr_tensor.flat<float>();
        const float* inp = &(inp_flat(0));
        const float* inpr = &(inpr_flat(0));
        OP_REQUIRES(
            context, inp_tensor.dims() == 2,
            errors::InvalidArgument(
                "ProbSample expects (batch_size,num_choices) inp shape"));
        int b = inp_tensor.shape().dim_size(0);
        int n = inp_tensor.shape().dim_size(1);
        OP_REQUIRES(
            context,
            inpr_tensor.dims() == 2 && inpr_tensor.shape().dim_size(0) == b,
            errors::InvalidArgument(
                "ProbSample expects (batch_size,num_points) inpr shape"));
        int m = inpr_tensor.shape().dim_size(1);
        Tensor* out_tensor = NULL;
        OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b, m},
                                                         &out_tensor));
        auto out_flat = out_tensor->flat<int>();
        int* out = &(out_flat(0));
        Tensor temp_tensor;
        OP_REQUIRES_OK(context,
                       context->allocate_temp(DataTypeToEnum<float>::value,
                                              TensorShape{b, n}, &temp_tensor));
        auto temp_flat = temp_tensor.flat<float>();
        float* temp = &(temp_flat(0));
        probsampleLauncher(b, n, m, inp, inpr, temp, out);
    }
};
REGISTER_KERNEL_BUILDER(Name("ProbSample").Device(DEVICE_GPU), ProbSampleGpuOp);

void farthestpointsamplingLauncher(int b, int n, int m, const float* inp,
                                   float* temp, int* out);
class FarthestPointSampleGpuOp : public OpKernel {
   public:
    explicit FarthestPointSampleGpuOp(OpKernelConstruction* context)
        : OpKernel(context) {
        OP_REQUIRES_OK(context, context->GetAttr("npoint", &npoint_));
        OP_REQUIRES(context, npoint_ > 0,
                    errors::InvalidArgument(
                        "FarthestPointSample expects positive npoint"));
    }
    void Compute(OpKernelContext* context) override {
        int m = npoint_;

        const Tensor& inp_tensor = context->input(0);
        OP_REQUIRES(
            context,
            inp_tensor.dims() == 3 && inp_tensor.shape().dim_size(2) == 3,
            errors::InvalidArgument("FarthestPointSample expects "
                                    "(batch_size,num_points,3) inp shape"));
        int b = inp_tensor.shape().dim_size(0);
        int n = inp_tensor.shape().dim_size(1);
        auto inp_flat = inp_tensor.flat<float>();
        const float* inp = &(inp_flat(0));
        Tensor* out_tensor;
        OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b, m},
                                                         &out_tensor));
        auto out_flat = out_tensor->flat<int>();
        int* out = &(out_flat(0));
        Tensor temp_tensor;
        OP_REQUIRES_OK(
            context, context->allocate_temp(DataTypeToEnum<float>::value,
                                            TensorShape{32, n}, &temp_tensor));
        auto temp_flat = temp_tensor.flat<float>();
        float* temp = &(temp_flat(0));
        farthestpointsamplingLauncher(b, n, m, inp, temp, out);
    }

   private:
    int npoint_;
};
REGISTER_KERNEL_BUILDER(Name("FarthestPointSample").Device(DEVICE_GPU),
                        FarthestPointSampleGpuOp);

void gatherpointLauncher(int b, int n, int m, const float* inp, const int* idx,
                         float* out);
class GatherPointGpuOp : public OpKernel {
   public:
    explicit GatherPointGpuOp(OpKernelConstruction* context)
        : OpKernel(context) {}
    void Compute(OpKernelContext* context) override {
        const Tensor& inp_tensor = context->input(0);
        OP_REQUIRES(
            context,
            inp_tensor.dims() == 3 && inp_tensor.shape().dim_size(2) == 3,
            errors::InvalidArgument(
                "GatherPoint expects (batch_size,num_points,3) inp shape"));
        int b = inp_tensor.shape().dim_size(0);
        int n = inp_tensor.shape().dim_size(1);
        const Tensor& idx_tensor = context->input(1);
        OP_REQUIRES(
            context,
            idx_tensor.dims() == 2 && idx_tensor.shape().dim_size(0) == b,
            errors::InvalidArgument(
                "GatherPoint expects (batch_size,num_result) idx shape"));
        int m = idx_tensor.shape().dim_size(1);
        auto inp_flat = inp_tensor.flat<float>();
        const float* inp = &(inp_flat(0));
        auto idx_flat = idx_tensor.flat<int>();
        const int* idx = &(idx_flat(0));
        Tensor* out_tensor = NULL;
        OP_REQUIRES_OK(context, context->allocate_output(
                                    0, TensorShape{b, m, 3}, &out_tensor));
        auto out_flat = out_tensor->flat<float>();
        float* out = &(out_flat(0));
        gatherpointLauncher(b, n, m, inp, idx, out);
    }
};
REGISTER_KERNEL_BUILDER(Name("GatherPoint").Device(DEVICE_GPU),
                        GatherPointGpuOp);

void scatteraddpointLauncher(int b, int n, int m, const float* out_g,
                             const int* idx, float* inp_g);
class GatherPointGradGpuOp : public OpKernel {
   public:
    explicit GatherPointGradGpuOp(OpKernelConstruction* context)
        : OpKernel(context) {}
    void Compute(OpKernelContext* context) override {
        const Tensor& inp_tensor = context->input(0);
        OP_REQUIRES(
            context,
            inp_tensor.dims() == 3 && inp_tensor.shape().dim_size(2) == 3,
            errors::InvalidArgument(
                "GatherPointGradGpuOp expects (batch_size,num_points,3) inp"));
        int b = inp_tensor.shape().dim_size(0);
        int n = inp_tensor.shape().dim_size(1);
        const Tensor& idx_tensor = context->input(1);
        OP_REQUIRES(
            context,
            idx_tensor.dims() == 2 && idx_tensor.shape().dim_size(0) == b,
            errors::InvalidArgument("GatherPointGradGpuOp expects "
                                    "(batch_size,num_result) idx shape"));
        int m = idx_tensor.shape().dim_size(1);
        auto inp_flat = inp_tensor.flat<float>();
        const float* inp = &(inp_flat(0));
        auto idx_flat = idx_tensor.flat<int>();
        const int* idx = &(idx_flat(0));
        const Tensor& out_g_tensor = context->input(2);
        OP_REQUIRES(
            context,
            out_g_tensor.dims() == 3 && out_g_tensor.shape().dim_size(0) == b &&
                out_g_tensor.shape().dim_size(1) == m &&
                out_g_tensor.shape().dim_size(2) == 3,
            errors::InvalidArgument("GatherPointGradGpuOp expects "
                                    "(batch_size,num_result,3) out_g shape"));
        auto out_g_flat = out_g_tensor.flat<float>();
        const float* out_g = &(out_g_flat(0));
        Tensor* inp_g_tensor = NULL;
        OP_REQUIRES_OK(context, context->allocate_output(
                                    0, TensorShape{b, n, 3}, &inp_g_tensor));
        auto inp_g_flat = inp_g_tensor->flat<float>();
        float* inp_g = &(inp_g_flat(0));
        cudaMemset(inp_g, 0, b * n * 3 * 4);
        scatteraddpointLauncher(b, n, m, out_g, idx, inp_g);
    }
};
REGISTER_KERNEL_BUILDER(Name("GatherPointGrad").Device(DEVICE_GPU),
                        GatherPointGradGpuOp);
