#include <cstdio>

#include "container.h"
#include "cuda_fp16.h"
#include "macro.h"
#include "math_util.h"
#include "msda.h"
#include "param.h"

template <typename Params>
DEVICE_INLINE void bilinear_int8(
    const Params &params,
    utils::AlignedArray<int, 4 * Params::kAccessSize4B> &output,
    const int8_t *i_data, const int idx_scale, const char2 loc, int iof4) {
  const int selectors[4] = {0x3214, 0x3240, 0x3410, 0x4210};
  const int selector = selectors[iof4];

  const float h = fmaf(loc.y, params.qscale_offset[idx_scale].y, -0.5f);
  const float w = fmaf(loc.x, params.qscale_offset[idx_scale].x, -0.5f);
  const int height = params.spatial_shapes[idx_scale].y;
  const int width = params.spatial_shapes[idx_scale].x;
  if (!(h > -1 && w > -1 && h < height && w < width)) {
    return;
  }

  const int h_low = floorf(h);
  const int w_low = floorf(w);
  const int h_high = h_low + 1;
  const int w_high = w_low + 1;

  const float lh = h - h_low, lw = w - w_low;
  const float hh = 1 - lh, hw = 1 - lw;

  const int h_low_ptr_offset = h_low * params.stride_height(idx_scale);
  const int h_high_ptr_offset =
      h_low_ptr_offset + params.stride_height(idx_scale);
  const int w_low_ptr_offset = w_low * params.stride_width(idx_scale);
  const int w_high_ptr_offset =
      w_low_ptr_offset + params.stride_width(idx_scale);

  float w0f = hh * hw * 127.f;
  float w1f = hh * lw * 127.f;
  float w2f = lh * hw * 127.f;
  float w3f = lh * lw * 127.f;
  int w0, w1, w2, w3;
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w0) : "f"(w0f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w1) : "f"(w1f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w2) : "f"(w2f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w3) : "f"(w3f));
  int weight = __byte_perm(__byte_perm(w0, w1, 0x0040),
                           __byte_perm(w2, w3, 0x0040), 0x5410);

  utils::AlignedArray<int, Params::kAccessSize4B> a0, a1, a2, a3;

  if (h_low >= 0 && w_low >= 0) {
    const int ptr1 = (h_low_ptr_offset + w_low_ptr_offset);
    a0 = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr1]);
  } else {
    a0.fill(0);
  }

  if (h_low >= 0 && w_high <= width - 1) {
    const int ptr2 = (h_low_ptr_offset + w_high_ptr_offset);
    a1 = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr2]);
  } else {
    a1.fill(0);
  }

  if (h_high <= height - 1 && w_low >= 0) {
    const int ptr3 = (h_high_ptr_offset + w_low_ptr_offset);
    a2 = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr3]);
  } else {
    a2.fill(0);
  }

  if (h_high <= height - 1 && w_high <= width - 1) {
    const int ptr4 = (h_high_ptr_offset + w_high_ptr_offset);
    a3 = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr4]);
  } else {
    a3.fill(0);
  }

  const float scale_area = 1.0 / 127.0;
#pragma unroll
  for (int ii = 0; ii < Params::kAccessSize4B; ii++) {
    int b0, b1, b2, b3;
    float s0, s1, s2, s3;

    b0 = __byte_perm(__byte_perm(a0[ii], a1[ii], 0x0040),
                     __byte_perm(a2[ii], a3[ii], 0x0040), 0x5410);
    s0 = __dp4a(b0, weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b0) : "f"(s0));
    output[4 * ii + 0] = __byte_perm(output[4 * ii + 0], b0, selector);

    b1 = __byte_perm(__byte_perm(a0[ii], a1[ii], 0x0051),
                     __byte_perm(a2[ii], a3[ii], 0x0051), 0x5410);
    s1 = __dp4a(b1, weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b1) : "f"(s1));
    output[4 * ii + 1] = __byte_perm(output[4 * ii + 1], b1, selector);

    b2 = __byte_perm(__byte_perm(a0[ii], a1[ii], 0x0062),
                     __byte_perm(a2[ii], a3[ii], 0x0062), 0x5410);
    s2 = __dp4a(b2, weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b2) : "f"(s2));
    output[4 * ii + 2] = __byte_perm(output[4 * ii + 2], b2, selector);

    b3 = __byte_perm(__byte_perm(a0[ii], a1[ii], 0x0073),
                     __byte_perm(a2[ii], a3[ii], 0x0073), 0x5410);
    s3 = __dp4a(b3, weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b3) : "f"(s3));
    output[4 * ii + 3] = __byte_perm(output[4 * ii + 3], b3, selector);
  }
}

template <typename Params>
__global__
    std::enable_if_t<std::is_same<typename Params::Element, int8_t>::value &&
                     Params::kSourceLayout == Layout::NCHW32>
    msda_kernel(const Params params) {
  const int idx_batch = blockIdx.z;
  const int idx_packed = blockIdx.y;
  const int idx_heads_channels =
      (idx_packed * Params::kNumThreadsContiguousCTA + int(threadIdx.x)) *
      Params::kAccessSize;

  if (idx_heads_channels >= params.NumChannels) return;

  int idx_heads, idx_channels;
  params.div_mod_channels_per_head(idx_heads, idx_channels, idx_heads_channels);

  const int idx_queries = blockIdx.x * blockDim.y + threadIdx.y;

  if (idx_queries >= params.NumQueries) return;

  const int idx_in_packed = threadIdx.x * Params::kAccessSize;

  int idx_NQH =
      (idx_batch * params.NumQueries + idx_queries) * params.NumHeads +
      idx_heads;

  int off_sampling = idx_NQH * params.NumPoints;

  int8_t *data_output = reinterpret_cast<int8_t *>(params.data_output) +
                        idx_NQH * params.NumChannelsPerHead + idx_channels;

  const int8_t *data_attn_weight =
      reinterpret_cast<const int8_t *>(params.data_attn_weight) + off_sampling;

  const char2 *data_sampling_offsets =
      reinterpret_cast<const char2 *>(params.data_sampling_offsets) +
      off_sampling;

  utils::AlignedArray<int, Params::kAccessSize> output_i32;
  utils::AlignedArray<float, Params::kAccessSize> output_f32;
  output_f32.fill(0);
  for (int idx_scale = 0; idx_scale < params.NumScales; ++idx_scale) {
    output_i32.fill(0);
    float qscale_i2o = params.qscale_i2o[idx_scale];

    const int8_t *data_value =
        reinterpret_cast<const int8_t *>(params.data_value[idx_scale]) +
        idx_batch * params.stride_batch(idx_scale) +
        idx_packed * params.stride_packed(idx_scale) + idx_in_packed;

    for (int idx_point = 0; idx_point < params.NumPointsPerScale;
         idx_point += 4) {
      utils::AlignedArray<int, Params::kAccessSize> interp;
      interp.fill(0);
      int weight = *reinterpret_cast<const int *>(data_attn_weight);
      data_attn_weight += 4;
      utils::AlignedArray<char2, 4> locs =
          *reinterpret_cast<const utils::AlignedArray<char2, 4> *>(
              data_sampling_offsets);
      data_sampling_offsets += 4;
      bilinear_int8<Params>(params, interp, data_value, idx_scale, locs[0], 0);
      bilinear_int8<Params>(params, interp, data_value, idx_scale, locs[1], 1);
      bilinear_int8<Params>(params, interp, data_value, idx_scale, locs[2], 2);
      bilinear_int8<Params>(params, interp, data_value, idx_scale, locs[3], 3);
#pragma unroll
      for (int ii = 0; ii < 4 * Params::kAccessSize4B; ii++) {
        output_i32[ii] = __dp4a(interp[ii], weight, output_i32[ii]);
        if (idx_point + 4 >= params.NumPointsPerScale) {
          output_f32[ii] = fmaf(qscale_i2o, output_i32[ii], output_f32[ii]);
        }
      }
    }
  }
  utils::AlignedArray<int, Params::kAccessSize4B> res;
#pragma unroll
  for (int ii = 0; ii < Params::kAccessSize4B; ii++) {
    int b0, b1, b2, b3;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b0)
                 : "f"(output_f32[ii * 4]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b1)
                 : "f"(output_f32[ii * 4 + 1]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b2)
                 : "f"(output_f32[ii * 4 + 2]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b3)
                 : "f"(output_f32[ii * 4 + 3]));
    res[ii] = __byte_perm(__byte_perm(b0, b1, 0x0040),
                          __byte_perm(b2, b3, 0x0040), 0x5410);
  }
  *reinterpret_cast<utils::AlignedArray<int, Params::kAccessSize4B> *>(
      data_output) = res;
}

template <typename Params>
DEVICE_INLINE bool bilinear_int8_load(
    const Params &params,
    utils::Array<utils::AlignedArray<int, Params::kAccessSize4B>, 4>
        &interp_src,
    int &interp_weight, const int8_t *i_data, const int idx_scale,
    const char2 loc) {
  const float h = fmaf(loc.y, params.qscale_offset[idx_scale].y, -0.5f);
  const float w = fmaf(loc.x, params.qscale_offset[idx_scale].x, -0.5f);
  const int height = params.spatial_shapes[idx_scale].y;
  const int width = params.spatial_shapes[idx_scale].x;
  if (!(h > -1 && w > -1 && h < height && w < width)) {
    return false;
  }

  int h_low = floorf(h);
  int w_low = floorf(w);
  int h_high = h_low + 1;
  int w_high = w_low + 1;

  float lh = h - h_low, lw = w - w_low;
  float hh = 1 - lh, hw = 1 - lw;

  float w0f = hh * hw * 127.f;
  float w1f = hh * lw * 127.f;
  float w2f = lh * hw * 127.f;
  float w3f = lh * lw * 127.f;
  int w0, w1, w2, w3;
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w0) : "f"(w0f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w1) : "f"(w1f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w2) : "f"(w2f));
  asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(w3) : "f"(w3f));
  interp_weight = __byte_perm(__byte_perm(w0, w1, 0x0040),
                              __byte_perm(w2, w3, 0x0040), 0x5410);

  const int h_low_ptr_offset = h_low * params.stride_height(idx_scale);
  const int h_high_ptr_offset =
      h_low_ptr_offset + params.stride_height(idx_scale);
  const int w_low_ptr_offset = w_low * params.stride_width(idx_scale);
  const int w_high_ptr_offset =
      w_low_ptr_offset + params.stride_width(idx_scale);

  if (h_low >= 0 && w_low >= 0) {
    const int ptr1 = (h_low_ptr_offset + w_low_ptr_offset);
    interp_src[0] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr1]);
  } else {
    interp_src[0].fill(0);
  }

  if (h_low >= 0 && w_high <= width - 1) {
    const int ptr2 = (h_low_ptr_offset + w_high_ptr_offset);
    interp_src[1] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr2]);
  } else {
    interp_src[1].fill(0);
  }

  if (h_high <= height - 1 && w_low >= 0) {
    const int ptr3 = (h_high_ptr_offset + w_low_ptr_offset);
    interp_src[2] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr3]);
  } else {
    interp_src[2].fill(0);
  }

  if (h_high <= height - 1 && w_high <= width - 1) {
    const int ptr4 = (h_high_ptr_offset + w_high_ptr_offset);
    interp_src[3] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr4]);
  } else {
    interp_src[3].fill(0);
  }
  return true;
}

template <int AccessSize4B>
DEVICE_INLINE void bilinear_int8_math(
    utils::Array<int, 4 * AccessSize4B> &output,
    utils::Array<utils::AlignedArray<int, AccessSize4B>, 4> &interp_src,
    int &interp_weight, const int iof4) {
  const float scale_area = 1.0 / 127.0;
#pragma unroll
  for (int ii = 0; ii < AccessSize4B; ii++) {
    int b0, b1, b2, b3;
    float s0, s1, s2, s3;
    const int selectors[4] = {0x3214, 0x3240, 0x3410, 0x4210};
    const int selector = selectors[iof4];

    b0 = __byte_perm(__byte_perm(interp_src[0][ii], interp_src[1][ii], 0x0040),
                     __byte_perm(interp_src[2][ii], interp_src[3][ii], 0x0040),
                     0x5410);
    s0 = __dp4a(b0, interp_weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b0) : "f"(s0));
    output[4 * ii + 0] = __byte_perm(output[4 * ii + 0], b0, selector);

    b1 = __byte_perm(__byte_perm(interp_src[0][ii], interp_src[1][ii], 0x0051),
                     __byte_perm(interp_src[2][ii], interp_src[3][ii], 0x0051),
                     0x5410);
    s1 = __dp4a(b1, interp_weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b1) : "f"(s1));
    output[4 * ii + 1] = __byte_perm(output[4 * ii + 1], b1, selector);

    b2 = __byte_perm(__byte_perm(interp_src[0][ii], interp_src[1][ii], 0x0062),
                     __byte_perm(interp_src[2][ii], interp_src[3][ii], 0x0062),
                     0x5410);
    s2 = __dp4a(b2, interp_weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b2) : "f"(s2));
    output[4 * ii + 2] = __byte_perm(output[4 * ii + 2], b2, selector);

    b3 = __byte_perm(__byte_perm(interp_src[0][ii], interp_src[1][ii], 0x0073),
                     __byte_perm(interp_src[2][ii], interp_src[3][ii], 0x0073),
                     0x5410);
    s3 = __dp4a(b3, interp_weight, 0) * scale_area;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(b3) : "f"(s3));
    output[4 * ii + 3] = __byte_perm(output[4 * ii + 3], b3, selector);
  }
}

template <typename Params>
__global__
    std::enable_if_t<std::is_same<typename Params::Element, int8_t>::value &&
                     Params::kSourceLayout == Layout::NHWC>
    msda_kernel(const Params params) {
  const int idx_batch = blockIdx.z;
  const int idx_packed = blockIdx.y;
  const int idx_heads_channels =
      (idx_packed * Params::kNumThreadsContiguousCTA + int(threadIdx.x)) *
      Params::kAccessSize;

  if (idx_heads_channels >= params.NumChannels) return;

  int idx_heads, idx_channels;
  params.div_mod_channels_per_head(idx_heads, idx_channels, idx_heads_channels);

  const int idx_queries = blockIdx.x * blockDim.y + threadIdx.y;

  if (idx_queries >= params.NumQueries) return;
  const int idx_in_packed = threadIdx.x * Params::kAccessSize;

  int idx_NQH =
      (idx_batch * params.NumQueries + idx_queries) * params.NumHeads +
      idx_heads;

  int off_sampling = idx_NQH * params.NumPoints;

  int8_t *data_output = reinterpret_cast<int8_t *>(params.data_output) +
                        idx_NQH * params.NumChannelsPerHead + idx_channels;

  const int8_t *data_attn_weight =
      reinterpret_cast<const int8_t *>(params.data_attn_weight) + off_sampling;

  const char2 *data_sampling_offsets =
      reinterpret_cast<const char2 *>(params.data_sampling_offsets) +
      off_sampling;

  utils::AlignedArray<int, Params::kAccessSize> output_i32;
  utils::AlignedArray<float, Params::kAccessSize> output_f32;
  output_i32.fill(0);
  output_f32.fill(0);

  static constexpr int Stages = 3;

  char2 locs[Stages];
  bool locs_v[Stages];
  int interp_weight[Stages];
  utils::Array<utils::AlignedArray<int, Params::kAccessSize4B>, 4>
      interp_source[Stages];
  // int weights[Stages];
  const int8_t *data_value =
      reinterpret_cast<const int8_t *>(params.data_value[0]) +
      idx_batch * params.stride_batch(0) +
      idx_packed * params.stride_packed(0) + idx_in_packed;

  ////////////////// intro ///////////////////

#pragma unroll
  for (int stg = 0; stg < Stages - 1; stg++) {
    locs[stg] = *data_sampling_offsets++;
  }

  int idx_point = 0;
  int idx_scale = 0;

#pragma unroll
  for (int stg = 0; stg < Stages - 1; stg++) {
    int stg_loc = (stg + Stages - 1) % Stages;
    locs[stg_loc] = *data_sampling_offsets++;

    locs_v[stg] =
        bilinear_int8_load(params, interp_source[stg], interp_weight[stg],
                           data_value, idx_scale, locs[stg]);
    ++idx_point;
    if (idx_point == params.NumPointsPerScale) {
      idx_point = 0;
      ++idx_scale;
      data_value =
          reinterpret_cast<const int8_t *>(params.data_value[idx_scale]) +
          idx_batch * params.stride_batch(idx_scale) +
          idx_packed * params.stride_packed(idx_scale) + idx_in_packed;
    }
  }

  ///////////////// main loop ///////////////

  int idx_loop = params.NumPoints;
  while (true) {
#pragma unroll
    for (int stg = 0; stg < Stages; stg++) {
      int weight = *reinterpret_cast<const int *>(data_attn_weight);
      data_attn_weight += 4;

      utils::AlignedArray<int, Params::kAccessSize> output;
      output.fill(0);
#pragma unroll
      for (int iof4 = 0; iof4 < 4; iof4++) {
        if (idx_loop > (2 * Stages - 2)) {
          int stg_loc = (4 * stg + iof4 + 2 * Stages - 2) % Stages;
          locs[stg_loc] = *data_sampling_offsets++;
        }
        if (idx_loop > (Stages - 1)) {
          int stg_interp = (4 * stg + iof4 + Stages - 1) % Stages;
          locs_v[stg_interp] = bilinear_int8_load(
              params, interp_source[stg_interp], interp_weight[stg_interp],
              data_value, idx_scale, locs[stg_interp]);
        }
        ++idx_point;
        if (idx_point == params.NumPointsPerScale) {
          idx_point = 0;
          ++idx_scale;
          data_value =
              reinterpret_cast<const int8_t *>(params.data_value[idx_scale]) +
              idx_batch * params.stride_batch(idx_scale) +
              idx_packed * params.stride_packed(idx_scale) + idx_in_packed;
        }
        int stg_math = (4 * stg + iof4) % Stages;
        if (locs_v[stg_math])
          bilinear_int8_math(output, interp_source[stg_math],
                             interp_weight[stg_math], iof4);
        --idx_loop;
      }
#pragma unroll
      for (int ii = 0; ii < Params::kAccessSize; ii++) {
        output_i32[ii] = __dp4a(output[ii], weight, output_i32[ii]);
      }
      if (idx_point == Stages - 1) {
        float qscale_i2o = params.qscale_i2o[idx_scale - 1];
#pragma unroll
        for (int ii = 0; ii < Params::kAccessSize; ii++) {
          output_f32[ii] = fmaf(qscale_i2o, output_i32[ii], output_f32[ii]);
          output_i32[ii] = 0;
        }
      }
      if (idx_loop == 0) break;
    }
    if (idx_loop == 0) break;
  }

  utils::AlignedArray<int, Params::kAccessSize4B> res;
#pragma unroll
  for (int ii = 0; ii < Params::kAccessSize4B; ii++) {
    int b0, b1, b2, b3;
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b0)
                 : "f"(output_f32[ii * 4]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b1)
                 : "f"(output_f32[ii * 4 + 1]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b2)
                 : "f"(output_f32[ii * 4 + 2]));
    asm volatile("cvt.rni.sat.s8.f32 %0, %1;"
                 : "=r"(b3)
                 : "f"(output_f32[ii * 4 + 3]));
    res[ii] = __byte_perm(__byte_perm(b0, b1, 0x0040),
                          __byte_perm(b2, b3, 0x0040), 0x5410);
  }
  *reinterpret_cast<utils::AlignedArray<int, Params::kAccessSize4B> *>(
      data_output) = res;
}

template <typename Params>
DEVICE_INLINE bool bilinear_half2_load(
    const Params &params,
    utils::Array<utils::AlignedArray<int, Params::kAccessSize4B>, 4>
        &interp_src,
    utils::Array<half, 4> &interp_weight, const half *i_data,
    const int idx_scale, const half2 loc) {
  const half2 wh =
      __hfma2(loc, params.spatial_shapes_half2[idx_scale], half2{-0.5f, -0.5f});
  if (!(__hbgt2(wh, half2{-1, -1}) &&
        __hblt2(wh, params.spatial_shapes_half2[idx_scale]))) {
    return false;
  }

  int height = params.spatial_shapes[idx_scale].y;
  int width = params.spatial_shapes[idx_scale].x;

  half2 wh_low = h2floor(wh);
  int w_low = wh_low.x;
  int w_high = w_low + 1;
  int h_low = wh_low.y;
  int h_high = h_low + 1;

  half2 lw_lh_ = wh - wh_low;
  half2 hw_hh_ = half2{1.0f, 1.0f} - lw_lh_;
  int lw_lh = reinterpret_cast<int &>(lw_lh_);
  int hw_hh = reinterpret_cast<int &>(hw_hh_);
  int hh_lw = __byte_perm(lw_lh, hw_hh, 0x1076);
  int lh_hw = __byte_perm(lw_lh, hw_hh, 0x5432);
  half2 hhhw_hhlw =
      reinterpret_cast<half2 &>(hw_hh) * reinterpret_cast<half2 &>(hh_lw);
  half2 lhlw_lhhw =
      reinterpret_cast<half2 &>(lw_lh) * reinterpret_cast<half2 &>(lh_hw);

  interp_weight[0] = hhhw_hhlw.x;  // hh * hw;
  interp_weight[1] = hhhw_hhlw.y;  // hh * lw;
  interp_weight[2] = lhlw_lhhw.y;  // lh * hw;
  interp_weight[3] = lhlw_lhhw.x;  // lh * lw;

  const int h_low_ptr_offset = h_low * params.stride_height(idx_scale);
  const int h_high_ptr_offset =
      h_low_ptr_offset + params.stride_height(idx_scale);
  const int w_low_ptr_offset = w_low * params.stride_width(idx_scale);
  const int w_high_ptr_offset =
      w_low_ptr_offset + params.stride_width(idx_scale);

  if (h_low >= 0 && w_low >= 0) {
    const int ptr1 = (h_low_ptr_offset + w_low_ptr_offset);
    interp_src[0] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr1]);
  } else {
    interp_src[0].fill(0);
  }

  if (h_low >= 0 && w_high <= width - 1) {
    const int ptr2 = (h_low_ptr_offset + w_high_ptr_offset);
    interp_src[1] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr2]);
  } else {
    interp_src[1].fill(0);
  }

  if (h_high <= height - 1 && w_low >= 0) {
    const int ptr3 = (h_high_ptr_offset + w_low_ptr_offset);
    interp_src[2] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr3]);
  } else {
    interp_src[2].fill(0);
  }

  if (h_high <= height - 1 && w_high <= width - 1) {
    const int ptr4 = (h_high_ptr_offset + w_high_ptr_offset);
    interp_src[3] = reinterpret_cast<
        const utils::AlignedArray<int, Params::kAccessSize4B> &>(i_data[ptr4]);
  } else {
    interp_src[3].fill(0);
  }
  return true;
}

template <int AccessSize4B>
DEVICE_INLINE void bilinear_half2_math(
    utils::AlignedArray<int, AccessSize4B> &output,
    utils::Array<utils::AlignedArray<int, AccessSize4B>, 4> &interp_src,
    utils::Array<half, 4> &interp_weight, half attn_weight) {
#pragma unroll
  for (int ii = 0; ii < AccessSize4B; ii++) {
    half2 interp = __hfma2(reinterpret_cast<half2 &>(interp_src[0][ii]),
                           {interp_weight[0], interp_weight[0]}, half2{0, 0});
    interp = __hfma2(reinterpret_cast<half2 &>(interp_src[1][ii]),
                     {interp_weight[1], interp_weight[1]}, interp);
    interp = __hfma2(reinterpret_cast<half2 &>(interp_src[2][ii]),
                     {interp_weight[2], interp_weight[2]}, interp);
    interp = __hfma2(reinterpret_cast<half2 &>(interp_src[3][ii]),
                     {interp_weight[3], interp_weight[3]}, interp);
    reinterpret_cast<half2 &>(output[ii]) =
        __hfma2(interp, {attn_weight, attn_weight},
                reinterpret_cast<half2 &>(output[ii]));
  }
}

template <typename Params>
__global__ std::enable_if_t<std::is_same<typename Params::Element, half>::value>
msda_kernel(const Params params) {
  const int idx_batch = blockIdx.z;
  const int idx_packed = blockIdx.y;
  const int idx_heads_channels =
      (idx_packed * Params::kNumThreadsContiguousCTA + int(threadIdx.x)) *
      Params::kAccessSize;

  if (idx_heads_channels >= params.NumChannels) return;

  int idx_heads, idx_channels;
  params.div_mod_channels_per_head(idx_heads, idx_channels, idx_heads_channels);

  const int idx_queries = blockIdx.x * blockDim.y + threadIdx.y;

  if (idx_queries >= params.NumQueries) return;

  const int idx_in_packed = threadIdx.x * Params::kAccessSize;

  int idx_NQH =
      (idx_batch * params.NumQueries + idx_queries) * params.NumHeads +
      idx_heads;

  int off_sampling = idx_NQH * params.NumPoints;

  half *data_output = reinterpret_cast<half *>(params.data_output) +
                      idx_NQH * params.NumChannelsPerHead + idx_channels;

  const half *data_attn_weight =
      reinterpret_cast<const half *>(params.data_attn_weight) + off_sampling;

  const half2 *data_sampling_offsets =
      reinterpret_cast<const half2 *>(params.data_sampling_offsets) +
      off_sampling;

  utils::AlignedArray<int, Params::kAccessSize4B> output;
  output.fill(0);

  static constexpr int Stages = 3;

  half2 locs[Stages];
  bool locs_v[Stages];
  utils::Array<half, 4> interp_weight[Stages];
  utils::Array<utils::AlignedArray<int, Params::kAccessSize4B>, 4>
      interp_source[Stages];
  half weights[Stages];

  const half *data_value =
      reinterpret_cast<const half *>(params.data_value[0]) +
      idx_batch * params.stride_batch(0) +
      idx_packed * params.stride_packed(0) + idx_in_packed;

////////////////// intro ///////////////////
#pragma unroll
  for (int stg = 0; stg < Stages - 1; stg++) {
    locs[stg] = *data_sampling_offsets++;
  }

  int idx_point = 0;
  int idx_scale = 0;

#pragma unroll
  for (int stg = 0; stg < Stages - 1; stg++) {
    int stg_loc = (stg + Stages - 1) % Stages;
    locs[stg_loc] = *data_sampling_offsets++;

    weights[stg] = *data_attn_weight++;
    locs_v[stg] =
        bilinear_half2_load(params, interp_source[stg], interp_weight[stg],
                            data_value, idx_scale, locs[stg]);
    ++idx_point;
    if (idx_point == params.NumPointsPerScale) {
      idx_point = 0;
      ++idx_scale;
      data_value =
          reinterpret_cast<const half *>(params.data_value[idx_scale]) +
          idx_batch * params.stride_batch(idx_scale) +
          idx_packed * params.stride_packed(idx_scale) + idx_in_packed;
    }
  }

  ///////////////// main loop ////////////////

  int idx_loop = params.NumPoints;
  while (true) {
#pragma unroll
    for (int stg = 0; stg < Stages; stg++) {
      if (idx_loop > (2 * Stages - 2)) {
        int stg_loc = (stg + 2 * Stages - 2) % Stages;
        locs[stg_loc] = *data_sampling_offsets++;
      }
      if (idx_loop > (Stages - 1)) {
        int stg_interp = (stg + Stages - 1) % Stages;
        weights[stg_interp] = *data_attn_weight++;
        locs_v[stg_interp] = bilinear_half2_load(
            params, interp_source[stg_interp], interp_weight[stg_interp],
            data_value, idx_scale, locs[stg_interp]);
      }
      ++idx_point;
      if (idx_point == params.NumPointsPerScale) {
        idx_point = 0;
        ++idx_scale;
        data_value =
            reinterpret_cast<const half *>(params.data_value[idx_scale]) +
            idx_batch * params.stride_batch(idx_scale) +
            idx_packed * params.stride_packed(idx_scale) + idx_in_packed;
      }
      if (locs_v[stg])
        bilinear_half2_math(output, interp_source[stg], interp_weight[stg],
                            weights[stg]);
      --idx_loop;
      if (idx_loop == 0) break;
    }
    if (idx_loop == 0) break;
  }
  *reinterpret_cast<utils::AlignedArray<int, Params::kAccessSize4B> *>(
      data_output) = output;
}

template <typename Element, Layout SourceLayout, int AccessSize>
int msda(cudaStream_t stream, void *data_output,
         const void **data_multiscale_value, const void *data_sampling_offsets,
         const void *data_attn_weight, const int2 *multiscale_spatial_shapes,
         const int BatchSize, const int NumQueries, const int NumHeads,
         const int NumScales, const int NumPointsPerScale,
         const int NumChannelsPerHead, const float *multiscale_qscale_value,
         const float qscale_offset, const float qscale_weight,
         const float qscale_out) {
  Params<Element, SourceLayout, AccessSize> params(
      data_output, data_multiscale_value, data_sampling_offsets,
      data_attn_weight, multiscale_spatial_shapes, BatchSize, NumQueries,
      NumHeads, NumScales, NumPointsPerScale, NumChannelsPerHead,
      multiscale_qscale_value, qscale_offset, qscale_weight, qscale_out);
  msda_kernel<<<params.grid_size(), params.block_size(), 0, stream>>>(params);
  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    printf("msda: %s\n", cudaGetErrorString(err));
  }
  return 0;
}

#define SPECIALIZATION(DTYPE, LAYOUT, CA)                                    \
  template int msda<DTYPE, LAYOUT, CA>(                                      \
      cudaStream_t stream, void *data_output,                                \
      const void **data_multiscale_value, const void *data_sampling_offsets, \
      const void *data_attn_weight, const int2 *multiscale_spatial_shapes,   \
      const int BatchSize, const int NumQueries, const int NumHeads,         \
      const int NumScales, const int NumPointsPerScale,                      \
      const int NumChannelsPerHead, const float *multiscale_qscale_value,    \
      const float qscale_offset, const float qscale_weight,                  \
      const float qscale_out);

SPECIALIZATION(int8_t, Layout::NCHW32, 16)
SPECIALIZATION(int8_t, Layout::NCHW32, 8)
SPECIALIZATION(int8_t, Layout::NCHW32, 4)
SPECIALIZATION(int8_t, Layout::NHWC, 16)
SPECIALIZATION(int8_t, Layout::NHWC, 8)
SPECIALIZATION(int8_t, Layout::NHWC, 4)

SPECIALIZATION(half, Layout::NCHW32, 8)
SPECIALIZATION(half, Layout::NCHW32, 4)
SPECIALIZATION(half, Layout::NHWC, 8)
SPECIALIZATION(half, Layout::NHWC, 4)
SPECIALIZATION(half, Layout::CONCAT_NCHW32, 8)
SPECIALIZATION(half, Layout::CONCAT_NCHW32, 4)
SPECIALIZATION(half, Layout::CONCAT_NHWC, 8)
SPECIALIZATION(half, Layout::CONCAT_NHWC, 4)
