#include <c10/cuda/CUDAStream.h>
#include <torch/extension.h>

#include "cuda_fp16.h"
#include "msda.h"

inline void check_cuda_contiguous(const torch::Tensor& tensor,
                                  const std::string& name) {
  if (!tensor.is_cuda())
    throw std::runtime_error(name + " needs to be on cuda!");
  if (!tensor.is_contiguous())
    throw std::runtime_error(name + " needs to be contiguous!");
}

#define MSDA_CHECK(T) check_cuda_contiguous((T), (#T));

torch::Tensor msda_ext(
    const std::vector<torch::Tensor>& multiscale_value,
    const torch::Tensor& sampling_offsets, const torch::Tensor& attn_weight,
    const std::vector<std::tuple<int, int>>& multiscale_shapes,
    const std::vector<float>& multiscale_qscale_value,
    const float qscale_offset, const float qscale_weight,
    const float qscale_out, const Layout value_layout) {
  auto stream = c10::cuda::getCurrentCUDAStream();

  MSDA_CHECK(sampling_offsets);
  MSDA_CHECK(attn_weight);

  at::ScalarType _st = ::detail::scalar_type(multiscale_value[0].scalar_type());

  const int batch_size = sampling_offsets.size(0);
  const int num_queries = sampling_offsets.size(1);
  const int num_heads = sampling_offsets.size(2);
  const int num_scales = sampling_offsets.size(3);
  const int num_points_per_scale = sampling_offsets.size(4);
  int num_all_channels;

  switch (value_layout) {
    case Layout::NHWC:
    case Layout::CONCAT_NHWC:
      num_all_channels = multiscale_value[0].size(-1);
      break;
    case Layout::NCHW32:
    case Layout::CONCAT_NCHW32:
    default:  // NCHWx
      num_all_channels = multiscale_value[0].size(1);
      break;
  }
  if (num_all_channels % num_heads)
    throw std::runtime_error(
        "input channels=" + std::to_string(num_all_channels) +
        " must be multiples of heads=" + std::to_string(num_heads));

  const int num_channels_per_head = num_all_channels / num_heads;

  torch::Tensor output = multiscale_value[0].new_empty(
      {batch_size, num_queries, num_heads, num_channels_per_head});
  const void* data_multiscale_value[32];
  int2 multiscale_spatial_shapes[32];
  void* data_output = output.data_ptr();
  const void* data_sampling_offsets = sampling_offsets.data_ptr();
  const void* data_attn_weight = attn_weight.data_ptr();

  std::vector<torch::Tensor> multiscale_value_reformat;

  int channel_pack;
  switch (value_layout) {
    case Layout::NCHW32:
    case Layout::CONCAT_NCHW32:
      channel_pack = 32;
      break;
    default:  // NHWC
      channel_pack = 1;
      break;
  }

  for (int s = 0; s < num_scales; s++) {
    switch (value_layout) {
      case Layout::CONCAT_NHWC: {
        multiscale_spatial_shapes[s] =
            make_int2(std::get<0>(multiscale_shapes[s]),
                      std::get<1>(multiscale_shapes[s]));
        if (s == 0) {
          MSDA_CHECK(multiscale_value[s]);
          data_multiscale_value[s] = multiscale_value[s].data_ptr();
        }
        break;
      }
      case Layout::NHWC: {
        MSDA_CHECK(multiscale_value[s]);
        multiscale_spatial_shapes[s] =
            make_int2(multiscale_value[s].size(1), multiscale_value[s].size(2));
        data_multiscale_value[s] = multiscale_value[s].data_ptr();
        break;
      }
      case Layout::CONCAT_NCHW32: {
        multiscale_spatial_shapes[s] =
            make_int2(std::get<0>(multiscale_shapes[s]),
                      std::get<1>(multiscale_shapes[s]));
        if (s == 0) {
          MSDA_CHECK(multiscale_value[s]);
          int padding =
              (channel_pack - (num_all_channels % channel_pack)) % channel_pack;
          torch::Tensor reformat;
          if (padding > 0) {
            reformat = torch::pad(multiscale_value[s], {0, 0, 0, padding},
                                  "constant", 0);
          } else {
            reformat = multiscale_value[s];
          }
          reformat = reformat.reshape(
              {reformat.size(0), -1, channel_pack, reformat.size(2)});
          reformat = reformat.permute({0, 1, 3, 2}).contiguous();
          multiscale_value_reformat.push_back(reformat);
          data_multiscale_value[s] = reformat.data_ptr();
        }
        break;
      }
      default: {  // NCHWx
        MSDA_CHECK(multiscale_value[s]);
        multiscale_spatial_shapes[s] =
            make_int2(multiscale_value[s].size(2), multiscale_value[s].size(3));
        int padding =
            (channel_pack - (num_all_channels % channel_pack)) % channel_pack;
        torch::Tensor reformat;
        if (padding > 0) {
          reformat = torch::pad(multiscale_value[s], {0, 0, 0, 0, 0, padding},
                                "constant", 0);
        } else {
          reformat = multiscale_value[s];
        }
        reformat = reformat.reshape({reformat.size(0), -1, channel_pack,
                                     reformat.size(2), reformat.size(3)});
        reformat = reformat.permute({0, 1, 3, 4, 2}).contiguous();
        multiscale_value_reformat.push_back(reformat);
        data_multiscale_value[s] = reformat.data_ptr();
        break;
      }
    }
  }

  switch (_st) {
    case at::ScalarType::Char: {
      if (num_points_per_scale % 4)
        throw std::runtime_error("number of points per scale=" +
                                 std::to_string(num_points_per_scale) +
                                 " must be multiples of 4");
      if (num_channels_per_head % 4)
        throw std::runtime_error("number of channels per head=" +
                                 std::to_string(num_channels_per_head) +
                                 " must be multiples of 4");
      decltype(&msda<int8_t, Layout::NCHW32, 16>) func;
      switch (value_layout) {
        case Layout::NHWC: {
          if (num_channels_per_head % 16 == 0) {
            func = &msda<int8_t, Layout::NHWC, 16>;
          } else if (num_channels_per_head % 8 == 0) {
            func = &msda<int8_t, Layout::NHWC, 8>;
          } else {
            func = &msda<int8_t, Layout::NHWC, 4>;
          }
          break;
        }
        default: {  // NCHW32
          if (num_channels_per_head % 16 == 0) {
            func = &msda<int8_t, Layout::NCHW32, 16>;
          } else if (num_channels_per_head % 8 == 0) {
            func = &msda<int8_t, Layout::NCHW32, 8>;
          } else {
            func = &msda<int8_t, Layout::NCHW32, 4>;
          }
          break;
        }
      }
      func(stream, data_output, data_multiscale_value, data_sampling_offsets,
           data_attn_weight, multiscale_spatial_shapes, batch_size, num_queries,
           num_heads, num_scales, num_points_per_scale, num_channels_per_head,
           multiscale_qscale_value.data(), qscale_offset, qscale_weight,
           qscale_out);
      break;
    }
    case at::ScalarType::Half: {
      if (num_channels_per_head % 4)
        throw std::runtime_error("number of channels per head=" +
                                 std::to_string(num_channels_per_head) +
                                 " must be multiples of 4");
      decltype(&msda<half, Layout::NCHW32, 8>) func =
          &msda<half, Layout::NCHW32, 8>;
      switch (value_layout) {
        case Layout::CONCAT_NHWC: {
          if (num_channels_per_head % 8 == 0) {
            func = &msda<half, Layout::CONCAT_NHWC, 8>;
          } else {
            func = &msda<half, Layout::CONCAT_NHWC, 4>;
          }
          break;
        }
        case Layout::CONCAT_NCHW32: {
          if (num_channels_per_head % 8 == 0) {
            func = &msda<half, Layout::CONCAT_NCHW32, 8>;
          } else {
            func = &msda<half, Layout::CONCAT_NCHW32, 4>;
          }
          break;
        }
        case Layout::NHWC: {
          if (num_channels_per_head % 8 == 0) {
            func = &msda<half, Layout::NHWC, 8>;
          } else {
            func = &msda<half, Layout::NHWC, 4>;
          }
          break;
        }
        default: {  // NCHW32
          if (num_channels_per_head % 8 == 0) {
            func = &msda<half, Layout::NCHW32, 8>;
          } else {
            func = &msda<half, Layout::NCHW32, 4>;
          }
          break;
        }
      }
      func(stream, data_output, data_multiscale_value, data_sampling_offsets,
           data_attn_weight, multiscale_spatial_shapes, batch_size, num_queries,
           num_heads, num_scales, num_points_per_scale, num_channels_per_head,
           multiscale_qscale_value.data(), qscale_offset, qscale_weight,
           qscale_out);
      break;
    }
    default:
      throw std::runtime_error("does not support this input type yet");
      break;
  }
  return output;
}

PYBIND11_MODULE(libmsda_torch, m) {
  py::enum_<Layout>(m, "layout")
      .value("NCHW", Layout::NCHW32)
      .value("NHWC", Layout::NHWC)
      .value("CONCAT_NCHW", Layout::CONCAT_NCHW32)
      .value("CONCAT_NHWC", Layout::CONCAT_NHWC)
      .export_values();

  m.def("msda", &msda_ext, pybind11::arg("multiscale_value"),
        pybind11::arg("sampling_offsets"), pybind11::arg("attn_weight"),
        pybind11::arg("multiscale_shapes") =
            std::vector<std::tuple<int, int>>(32, {0, 0}),
        pybind11::arg("multiscale_qscale_value") = std::vector<float>(32, 1.0f),
        pybind11::arg("qscale_offset") = 1.0f,
        pybind11::arg("qscale_weight") = 1.0f,
        pybind11::arg("qscale_out") = 1.0f,
        pybind11::arg("value_layout") = Layout::NCHW32);
}
