// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/grid_sample_kernel.h"

#include "glog/logging.h"

#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/grid_sample_utils.h"

namespace phi {

template <typename T, typename IndexT>
static __forceinline__ __device__ T Unnormalize(T coord,
                                                IndexT size,
                                                bool align_corners) {
  return align_corners ? ((coord + 1.f) / 2) * (size - 1)
                       : ((coord + 1.f) * size - 1) / 2;
}

template <typename T, typename IndexT>
static __forceinline__ __device__ T ClipIndexes(T in, IndexT max_value) {
  return min(static_cast<T>(max_value - 1), max(in, static_cast<T>(0)));
}

template <typename T, typename IndexT>
static __forceinline__ __device__ T ReflectIndexes(T in,
                                                   IndexT twice_low,
                                                   IndexT twice_high) {
  if (twice_low == twice_high) {
    return static_cast<T>(0);
  }
  T min = static_cast<T>(twice_low) / 2;
  T span = static_cast<T>(twice_high - twice_low) / 2;
  in = fabs(in - min);
  T extra = fmod(in, span);
  IndexT flips = floor(in / span);
  return (flips & 1) ? span - extra + min : extra + min;  // cond ? odd : even
}

template <typename T, typename IndexT>
static __forceinline__ __device__ T ComputePositions(T coord,
                                                     IndexT size,
                                                     PaddingMode padding_mode,
                                                     bool align_corners) {
  coord = Unnormalize(coord, size, align_corners);
  if (padding_mode == PaddingMode::border) {
    coord = ClipIndexes(coord, size);
  } else if (padding_mode == PaddingMode::reflect) {
    coord = align_corners ? ReflectIndexes<T, IndexT>(coord, 0, 2 * (size - 1))
                          : ReflectIndexes<T, IndexT>(coord, -1, 2 * size - 1);
    coord = ClipIndexes(coord, size);
  }
  return SafeDownGradeToIntRange(coord);
}

template <typename T, typename IndexT>
__global__ void GridSampleCudaKernel(IndexT n,
                                     IndexT out_c,
                                     IndexT out_hw,
                                     IndexT in_h,
                                     IndexT in_w,
                                     const T* __restrict__ input,
                                     const T* __restrict__ grid,
                                     T* __restrict__ output,
                                     const Mode mode,
                                     const PaddingMode padding_mode,
                                     bool align_corners) {
  IndexT nthreads = n * out_hw;
  IndexT inp_sN = out_c * (in_h * in_w);
  IndexT inp_sC = in_h * in_w;
  IndexT inp_sH = in_w;
  IndexT inp_sW = 1;
  IndexT grid_sNHW = 2;
  IndexT grid_sCoor = 1;
  IndexT out_sN = out_c * out_hw;
  IndexT out_sC = out_hw;
  IndexT out_sHW = 1;
  CUDA_KERNEL_LOOP_TYPE(index, nthreads, IndexT) {
    const IndexT hw = index % out_hw;
    const IndexT n = index / out_hw;
    const IndexT grid_offset = index * grid_sNHW;

    T ix = grid[grid_offset];
    T iy = grid[grid_offset + grid_sCoor];

    ix = ComputePositions(ix, in_w, padding_mode, align_corners);
    iy = ComputePositions(iy, in_h, padding_mode, align_corners);
    if (mode == Mode::bilinear) {
      IndexT ix_nw = floor(ix);
      IndexT iy_nw = floor(iy);
      IndexT ix_ne = ix_nw + 1;
      IndexT iy_ne = iy_nw;
      IndexT ix_sw = ix_nw;
      IndexT iy_sw = iy_nw + 1;
      IndexT ix_se = ix_nw + 1;
      IndexT iy_se = iy_nw + 1;

      T nw = (ix_se - ix) * (iy_se - iy);
      T ne = (ix - ix_sw) * (iy_sw - iy);
      T sw = (ix_ne - ix) * (iy - iy_ne);
      T se = (ix - ix_nw) * (iy - iy_nw);

      IndexT inp_offset_NC = n * inp_sN;
      T* out_ptr_NCHW = output + (n * out_sN + hw * out_sHW);

      for (IndexT c = 0; c < out_c;
           ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) {
        T value{0};
        if (InBounds(iy_nw, ix_nw, in_h, in_w)) {
          value += input[inp_offset_NC + iy_nw * inp_sH + ix_nw * inp_sW] * nw;
        }
        if (InBounds(iy_ne, ix_ne, in_h, in_w)) {
          value += input[inp_offset_NC + iy_ne * inp_sH + ix_ne * inp_sW] * ne;
        }
        if (InBounds(iy_sw, ix_sw, in_h, in_w)) {
          value += input[inp_offset_NC + iy_sw * inp_sH + ix_sw * inp_sW] * sw;
        }
        if (InBounds(iy_se, ix_se, in_h, in_w)) {
          value += input[inp_offset_NC + iy_se * inp_sH + ix_se * inp_sW] * se;
        }
        *out_ptr_NCHW = value;
      }
    } else if (mode == Mode::nearest) {
      IndexT ix_nearest = std::nearbyint(ix);
      IndexT iy_nearest = std::nearbyint(iy);
      IndexT inp_offset_NC = n * inp_sN;
      T* out_ptr_NCHW = output + (n * out_sN + hw * out_sHW);
      for (IndexT c = 0; c < out_c;
           ++c, inp_offset_NC += inp_sC, out_ptr_NCHW += out_sC) {
        if (InBounds(iy_nearest, ix_nearest, in_h, in_w)) {
          *out_ptr_NCHW =
              input[inp_offset_NC + iy_nearest * inp_sH + ix_nearest * inp_sW];
        } else {
          *out_ptr_NCHW = static_cast<T>(0);
        }
      }
    }
  }
}

template <typename T, typename IndexT>
__global__ void GridSample3DCudaKernel(const IndexT nthreads,
                                       IndexT out_c,
                                       IndexT out_d,
                                       IndexT out_h,
                                       IndexT out_w,
                                       IndexT in_d,
                                       IndexT in_h,
                                       IndexT in_w,
                                       const T* input,
                                       const T* grid,
                                       T* output,
                                       const Mode interpolation_mode,
                                       const PaddingMode padding_mode,
                                       bool align_corners) {
  IndexT inp_sW = 1;
  IndexT inp_sH = in_w;
  IndexT inp_sD = in_h * in_w;
  IndexT inp_sC = in_d * inp_sD;
  IndexT inp_sN = out_c * inp_sC;

  IndexT grid_sCoor = 1;
  IndexT grid_sW = 3;
  IndexT grid_sH = out_w * grid_sW;
  IndexT grid_sD = out_h * grid_sH;
  IndexT grid_sN = out_d * grid_sD;

  IndexT out_sW = 1;
  IndexT out_sH = out_w;
  IndexT out_sD = out_h * out_w;
  IndexT out_sC = out_d * out_sD;
  IndexT out_sN = out_c * out_sC;

  CUDA_KERNEL_LOOP_TYPE(index, nthreads, IndexT) {
    const IndexT w = index % out_w;
    const IndexT h = (index / out_w) % out_h;
    const IndexT d = (index / (out_h * out_w)) % out_d;
    const IndexT n = index / (out_d * out_h * out_w);
    const IndexT grid_offset =
        n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
    // get the corresponding input x, y, z coordinates from grid
    T ix = grid[grid_offset];
    T iy = grid[grid_offset + grid_sCoor];
    T iz = grid[grid_offset + 2 * grid_sCoor];
    ix = ComputePositions(ix, in_w, padding_mode, align_corners);
    iy = ComputePositions(iy, in_h, padding_mode, align_corners);
    iz = ComputePositions(iz, in_d, padding_mode, align_corners);
    if (interpolation_mode == Mode::bilinear) {
      // get corner pixel values from (x, y, z)
      // for 4d, we used north-east-south-west
      // for 5d, we add top-bottom
      IndexT ix_tnw = static_cast<IndexT>(std::floor(ix));
      IndexT iy_tnw = static_cast<IndexT>(std::floor(iy));
      IndexT iz_tnw = static_cast<IndexT>(std::floor(iz));

      IndexT ix_tne = ix_tnw + 1;
      IndexT iy_tne = iy_tnw;
      IndexT iz_tne = iz_tnw;

      IndexT ix_tsw = ix_tnw;
      IndexT iy_tsw = iy_tnw + 1;
      IndexT iz_tsw = iz_tnw;

      IndexT ix_tse = ix_tnw + 1;
      IndexT iy_tse = iy_tnw + 1;
      IndexT iz_tse = iz_tnw;

      IndexT ix_bnw = ix_tnw;
      IndexT iy_bnw = iy_tnw;
      IndexT iz_bnw = iz_tnw + 1;

      IndexT ix_bne = ix_tnw + 1;
      IndexT iy_bne = iy_tnw;
      IndexT iz_bne = iz_tnw + 1;

      IndexT ix_bsw = ix_tnw;
      IndexT iy_bsw = iy_tnw + 1;
      IndexT iz_bsw = iz_tnw + 1;

      IndexT ix_bse = ix_tnw + 1;
      IndexT iy_bse = iy_tnw + 1;
      IndexT iz_bse = iz_tnw + 1;

      // get surfaces to each neighbor:
      T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
      T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
      T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
      T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
      T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
      T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
      T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
      T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);

      const T* inp_ptr_NC = input + n * inp_sN;
      T* out_ptr_NCDHW =
          output + (n * out_sN + d * out_sD + h * out_sH + w * out_sW);
      for (IndexT c = 0; c < out_c;
           ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
        *out_ptr_NCDHW = static_cast<T>(0);
        if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] *
              tnw;
        }
        if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] *
              tne;
        }
        if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] *
              tsw;
        }
        if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] *
              tse;
        }
        if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] *
              bnw;
        }
        if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] *
              bne;
        }
        if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] *
              bsw;
        }
        if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) {
          *out_ptr_NCDHW +=
              inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] *
              bse;
        }
      }
    } else if (interpolation_mode == Mode::nearest) {
      IndexT ix_nearest = static_cast<IndexT>(std::nearbyint(ix));
      IndexT iy_nearest = static_cast<IndexT>(std::nearbyint(iy));
      IndexT iz_nearest = static_cast<IndexT>(std::nearbyint(iz));

      // assign nearest neighbor pixel value to output pixel
      const T* inp_ptr_NC = input + n * inp_sN;
      T* out_ptr_NCDHW =
          output + (n * out_sN + d * out_sD + h * out_sH + w * out_sW);
      for (IndexT c = 0; c < out_c;
           ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
        if (InBounds3D(iz_nearest, iy_nearest, ix_nearest, in_d, in_h, in_w)) {
          *out_ptr_NCDHW =
              inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH +
                         ix_nearest * inp_sW];
        } else {
          *out_ptr_NCDHW = static_cast<T>(0);
        }
      }
    }
  }
}

template <typename T, typename Context>
void GridSampleKernel(const Context& dev_ctx,
                      const DenseTensor& x,
                      const DenseTensor& grid,
                      const std::string& mode,
                      const std::string& padding_mode,
                      bool align_corners,
                      DenseTensor* out) {
  if (out && out->numel() == 0) {
    dev_ctx.template Alloc<T>(out);
    return;
  }
  PaddingMode enum_padding_mode;
  Mode enum_mode;
  if (padding_mode == "border") {
    enum_padding_mode = PaddingMode::border;
  } else if (padding_mode == "reflection") {
    enum_padding_mode = PaddingMode::reflect;
  } else {
    enum_padding_mode = PaddingMode::zeros;
  }

  if (mode == "nearest") {
    enum_mode = Mode::nearest;
  } else {
    enum_mode = Mode::bilinear;
  }

#ifndef PADDLE_WITH_HIP
  if (condCudnnGridSampler<T>(x, grid) &&
      enum_padding_mode == PaddingMode::zeros && enum_mode == Mode::bilinear &&
      align_corners) {
    const int64_t N = x.dims()[0];
    const int64_t C = x.dims()[1];
    const int64_t H_in = x.dims()[2];
    const int64_t W_in = x.dims()[3];
    const int64_t H_out = grid.dims()[1];
    const int64_t W_out = grid.dims()[2];

    out->Resize({N, C, H_out, W_out});
    auto* out_data = dev_ctx.template Alloc<T>(out);

    cudnnHandle_t handle = dev_ctx.cudnn_handle();

    // Create and set Tensor descriptors (NCHW) for x and out
    cudnnTensorDescriptor_t x_desc, y_desc;
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnCreateTensorDescriptor(&x_desc));
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnCreateTensorDescriptor(&y_desc));

    const cudnnDataType_t cudnn_dtype =
        std::is_same<T, float>::value ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE;

    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnSetTensor4dDescriptor(x_desc,
                                                 CUDNN_TENSOR_NCHW,
                                                 cudnn_dtype,
                                                 static_cast<int>(N),
                                                 static_cast<int>(C),
                                                 static_cast<int>(H_in),
                                                 static_cast<int>(W_in)));

    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnSetTensor4dDescriptor(y_desc,
                                                 CUDNN_TENSOR_NCHW,
                                                 cudnn_dtype,
                                                 static_cast<int>(N),
                                                 static_cast<int>(C),
                                                 static_cast<int>(H_out),
                                                 static_cast<int>(W_out)));

    // Spatial Transformer descriptor: specifies sampler type and output
    // dimension (N, C, H_out, W_out)
    cudnnSpatialTransformerDescriptor_t st_desc;
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnCreateSpatialTransformerDescriptor(&st_desc));
    int st_dims[4] = {static_cast<int>(N),
                      static_cast<int>(C),
                      static_cast<int>(H_out),
                      static_cast<int>(W_out)};
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnSetSpatialTransformerNdDescriptor(
            st_desc, CUDNN_SAMPLER_BILINEAR, cudnn_dtype, 4, st_dims));

    const T* x_data = x.data<T>();
    const T* grid_data = grid.data<T>();
    using AlphaBetaT = typename std::
        conditional<std::is_same<T, float>::value, float, double>::type;
    const AlphaBetaT alpha = static_cast<AlphaBetaT>(1.0);
    const AlphaBetaT beta = static_cast<AlphaBetaT>(0.0);

    PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSpatialTfSamplerForward(
        handle,
        st_desc,
        static_cast<const void*>(&alpha),
        x_desc,
        static_cast<const void*>(x_data),
        static_cast<const void*>(grid_data),
        static_cast<const void*>(&beta),
        y_desc,
        static_cast<void*>(out_data)));

    // resource release
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnDestroySpatialTransformerDescriptor(st_desc));
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnDestroyTensorDescriptor(x_desc));
    PADDLE_ENFORCE_GPU_SUCCESS(
        phi::dynload::cudnnDestroyTensorDescriptor(y_desc));
    return;
  }
#endif

  bool use_int32_index = x.numel() <= std::numeric_limits<int>::max() &&
                         grid.numel() <= std::numeric_limits<int>::max() &&
                         out->numel() <= std::numeric_limits<int>::max();

  if (x.dims().size() == 4) {
    const int64_t n = grid.dims()[0];
    const int64_t out_h = grid.dims()[1];
    const int64_t out_w = grid.dims()[2];
    const int64_t c = x.dims()[1];
    const int64_t in_h = x.dims()[2];
    const int64_t in_w = x.dims()[3];
    VLOG(3) << "n: " << n << "; c: " << c << "; out_h: " << out_h
            << "; out_w: " << out_w;

    auto* output_data = dev_ctx.template Alloc<T>(out);
    VLOG(3) << "out dims: " << out->dims()[0] << "; " << out->dims()[1] << "; "
            << out->dims()[2] << "; " << out->dims()[3];

    int64_t count = n * out_h * out_w;
    auto cu_stream = dev_ctx.stream();
    backends::gpu::GpuLaunchConfig config =
        backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count);

#define LAUNCH_KERNEL(INDEX_TYPE)                                         \
  GridSampleCudaKernel<T, INDEX_TYPE>                                     \
      <<<config.block_per_grid, config.thread_per_block, 0, cu_stream>>>( \
          n,                                                              \
          c,                                                              \
          out_h * out_w,                                                  \
          in_h,                                                           \
          in_w,                                                           \
          x.data<T>(),                                                    \
          grid.data<T>(),                                                 \
          output_data,                                                    \
          enum_mode,                                                      \
          enum_padding_mode,                                              \
          align_corners)
    if (use_int32_index) {
      LAUNCH_KERNEL(int);
    } else {
      LAUNCH_KERNEL(int64_t);
    }
#undef LAUNCH_KERNEL
  } else {
    const int64_t n = grid.dims()[0];
    const int64_t out_d = grid.dims()[1];
    const int64_t out_h = grid.dims()[2];
    const int64_t out_w = grid.dims()[3];
    const int64_t c = x.dims()[1];
    const int64_t in_d = x.dims()[2];
    const int64_t in_h = x.dims()[3];
    const int64_t in_w = x.dims()[4];

    VLOG(3) << "n: " << n << "; c: " << c << "; out_d: " << out_d
            << "; out_h: " << out_h << "; out_w: " << out_w;

    auto* output_data = dev_ctx.template Alloc<T>(out);
    VLOG(3) << "out dims: " << out->dims()[0] << "; " << out->dims()[1] << "; "
            << out->dims()[2] << "; " << out->dims()[3] << "; "
            << out->dims()[4];

    int64_t count = n * out_d * out_h * out_w;
    auto cu_stream = dev_ctx.stream();
    backends::gpu::GpuLaunchConfig config =
        backends::gpu::GetGpuLaunchConfig1D(dev_ctx, count);

#define LAUNCH_KERNEL(INDEX_TYPE)                                         \
  GridSample3DCudaKernel<T, INDEX_TYPE>                                   \
      <<<config.block_per_grid, config.thread_per_block, 0, cu_stream>>>( \
          count,                                                          \
          c,                                                              \
          out_d,                                                          \
          out_h,                                                          \
          out_w,                                                          \
          in_d,                                                           \
          in_h,                                                           \
          in_w,                                                           \
          x.data<T>(),                                                    \
          grid.data<T>(),                                                 \
          output_data,                                                    \
          enum_mode,                                                      \
          enum_padding_mode,                                              \
          align_corners)
    if (use_int32_index) {
      LAUNCH_KERNEL(int);
    } else {
      LAUNCH_KERNEL(int64_t);
    }
#undef LAUNCH_KERNEL
  }
}

}  // namespace phi

PD_REGISTER_KERNEL(
    grid_sample, GPU, ALL_LAYOUT, phi::GridSampleKernel, float, double) {}
