#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
REGISTER_OP("ProbSample")
	.Input("inp: float32")
	.Input("inpr: float32")
	.Output("out: int32");
REGISTER_OP("FarthestPointSample")
	.Input("npoint: int32")
	.Input("inp: float32")
	.Output("out: int32");
REGISTER_OP("GatherPoint")
	.Input("inp: float32")
	.Input("idx: int32")
	.Output("out: float32");
REGISTER_OP("GatherPointGrad")
	.Input("inp: float32")
	.Input("idx: int32")
	.Input("out_g: float32")
	.Output("inp_g: float32");
#include <cuda_runtime.h>
using namespace tensorflow;
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out);
class ProbSampleGpuOp: public OpKernel{
	public:
		explicit ProbSampleGpuOp(OpKernelConstruction* context):OpKernel(context){}
		void Compute(OpKernelContext * context)override{
			const Tensor& inp_tensor=context->input(0);
			const Tensor& inpr_tensor=context->input(1);
			auto inp_flat=inp_tensor.flat<float>();
			auto inpr_flat=inpr_tensor.flat<float>();
			const float * inp=&(inp_flat(0));
			const float * inpr=&(inpr_flat(0));
			OP_REQUIRES(context,inp_tensor.dims()==2,errors::InvalidArgument("ProbSample expects (batch_size,num_choices) inp shape"));
			int b=inp_tensor.shape().dim_size(0);
			int n=inp_tensor.shape().dim_size(1);
			OP_REQUIRES(context,inpr_tensor.dims()==2 && inpr_tensor.shape().dim_size(0)==b,errors::InvalidArgument("ProbSample expects (batch_size,num_points) inpr shape"));
			int m=inpr_tensor.shape().dim_size(1);
			//printf("b=%d n=%d m=%d\n",b,n,m);
			Tensor * out_tensor=NULL;
			OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor));
			auto out_flat=out_tensor->flat<int>();
			int * out=&(out_flat(0));
			Tensor temp_tensor;
			OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum<float>::value,TensorShape{b,n},&temp_tensor));
			auto temp_flat=temp_tensor.flat<float>();
			float * temp=&(temp_flat(0));
			probsampleLauncher(b,n,m,inp,inpr,temp,out);
		}
};
REGISTER_KERNEL_BUILDER(Name("ProbSample").Device(DEVICE_GPU), ProbSampleGpuOp);

void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out);
class FarthestPointSampleGpuOp: public OpKernel{
	public:
		explicit FarthestPointSampleGpuOp(OpKernelConstruction* context):OpKernel(context){}
		void Compute(OpKernelContext * context)override{
			const Tensor& npoint_tensor=context->input(0);
			OP_REQUIRES(context,IsLegacyScalar(npoint_tensor.shape()),errors::InvalidArgument("FarthestPointSample expects scalar npoint"));
			int m;
			cudaMemcpy(&m,&npoint_tensor.scalar<int>()(),4,cudaMemcpyDeviceToHost);

			const Tensor& inp_tensor=context->input(1);
			OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("FarthestPointSample expects (batch_size,num_points,3) inp shape"));
			int b=inp_tensor.shape().dim_size(0);
			int n=inp_tensor.shape().dim_size(1);
			auto inp_flat=inp_tensor.flat<float>();
			const float * inp=&(inp_flat(0));
			Tensor * out_tensor;
			OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor));
			auto out_flat=out_tensor->flat<int>();
			int * out=&(out_flat(0));
			Tensor temp_tensor;
			OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum<float>::value,TensorShape{32,n},&temp_tensor));
			auto temp_flat=temp_tensor.flat<float>();
			float * temp=&(temp_flat(0));
			farthestpointsamplingLauncher(b,n,m,inp,temp,out);
		}
};
REGISTER_KERNEL_BUILDER(Name("FarthestPointSample").Device(DEVICE_GPU),FarthestPointSampleGpuOp);

void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out);
class GatherPointGpuOp: public OpKernel{
	public:
		explicit GatherPointGpuOp(OpKernelConstruction * context):OpKernel(context){}
		void Compute(OpKernelContext * context)override{
			const Tensor& inp_tensor=context->input(0);
			OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPoint expects (batch_size,num_points,3) inp shape"));
			int b=inp_tensor.shape().dim_size(0);
			int n=inp_tensor.shape().dim_size(1);
			const Tensor& idx_tensor=context->input(1);
			OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPoint expects (batch_size,num_result) idx shape"));
			int m=idx_tensor.shape().dim_size(1);
			auto inp_flat=inp_tensor.flat<float>();
			const float * inp=&(inp_flat(0));
			auto idx_flat=idx_tensor.flat<int>();
			const int * idx=&(idx_flat(0));
			Tensor * out_tensor=NULL;
			OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m,3},&out_tensor));
			auto out_flat=out_tensor->flat<float>();
			float * out=&(out_flat(0));
			gatherpointLauncher(b,n,m,inp,idx,out);
		}
};
REGISTER_KERNEL_BUILDER(Name("GatherPoint").Device(DEVICE_GPU),GatherPointGpuOp);

void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g);
class GatherPointGradGpuOp: public OpKernel{
	public:
		explicit GatherPointGradGpuOp(OpKernelConstruction * context):OpKernel(context){}
		void Compute(OpKernelContext * context)override{
			const Tensor& inp_tensor=context->input(0);
			OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_points,3) inp"));
			int b=inp_tensor.shape().dim_size(0);
			int n=inp_tensor.shape().dim_size(1);
			const Tensor& idx_tensor=context->input(1);
			OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result) idx shape"));
			int m=idx_tensor.shape().dim_size(1);
			auto inp_flat=inp_tensor.flat<float>();
			const float * inp=&(inp_flat(0));
			auto idx_flat=idx_tensor.flat<int>();
			const int * idx=&(idx_flat(0));
			const Tensor& out_g_tensor=context->input(2);
			OP_REQUIRES(context,out_g_tensor.dims()==3 && out_g_tensor.shape().dim_size(0)==b && out_g_tensor.shape().dim_size(1)==m && out_g_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result,3) out_g shape"));
			auto out_g_flat=out_g_tensor.flat<float>();
			const float * out_g=&(out_g_flat(0));
			Tensor * inp_g_tensor=NULL;
			OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&inp_g_tensor));
			auto inp_g_flat=inp_g_tensor->flat<float>();
			float * inp_g=&(inp_g_flat(0));
			cudaMemset(inp_g,0,b*n*3*4);
			scatteraddpointLauncher(b,n,m,out_g,idx,inp_g);
		}
};
REGISTER_KERNEL_BUILDER(Name("GatherPointGrad").Device(DEVICE_GPU),GatherPointGradGpuOp);

