#include <ATen/Context.h>
#include <ATen/native/TensorIterator.h>

#include <core/detail/IndexUtils.h>
#include <runtime/Utils.h>
#include <utils/DPCPP.h>

#include "Loops.h"
#include "comm/ATDispatch.h"
#include "comm/Atomics.h"
#include "comm/Numerics.h"
#include "comm/RegistrationDeclarations.h"

using namespace torch_ipex::xpu::dpcpp::detail;
using namespace torch_ipex::xpu::dpcpp;

namespace at {
namespace AtenIpexTypeXPU {
namespace impl {

template <typename input_t, typename IndexType>
static IndexType getBin(
    input_t bVal,
    input_t minvalue,
    input_t maxvalue,
    int nbins) {
  IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue));
  // (only applicable for histc)
  // while each bin is inclusive at the lower end and exclusive at the higher,
  // i.e. [start, end)
  // the last bin is inclusive at both, i.e. [start, end], in order to include
  // maxvalue if exists
  // therefore when bin == nbins, adjust bin to the last bin
  if (bin == nbins)
    bin -= 1;
  return bin;
}

template <
    typename output_t,
    typename input_t,
    typename IndexType,
    int ADims,
    bool has_weight,
    typename Op>
struct kernelHistogram1DKernelFunctor {
  void operator()(sycl::item<1> item_id) const {
    auto out_ptr = out_data;
    auto in_ptr = in_data;
    auto weight_ptr = weight_data;

    auto linearIndex = item_id.get_id(0);
    // Convert `linearIndex` into an offset of `b`
    const IndexType bOffset =
        IndexToOffset<input_t, IndexType>::get(linearIndex, b);
    const auto bVal = in_ptr[bOffset];
    if (bVal >= minvalue && bVal <= maxvalue) {
      // Use value at `b` as an offset of `a`
      const IndexType bin =
          getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins);
      const IndexType aOffset = IndexToOffset<output_t, IndexType>::get(bin, a);
      atomicAdd(
          (dpcpp_global_ptr_pt<output_t>)&out_ptr[aOffset],
          getOp(weight_ptr, linearIndex));
    }
  }
  kernelHistogram1DKernelFunctor(
      TensorInfo<output_t, IndexType> a_,
      TensorInfo<input_t, IndexType> b_,
      TensorInfo<output_t, IndexType> c_,
      int nbins_,
      input_t minvalue_,
      input_t maxvalue_,
      IndexType totalElements_,
      Op getOp_,
      output_t* out_data_,
      input_t* in_data_,
      output_t* weight_data_)
      : a(a_),
        b(b_),
        c(c_),
        nbins(nbins_),
        minvalue(minvalue_),
        maxvalue(maxvalue_),
        totalElements(totalElements_),
        getOp(getOp_),
        out_data(out_data_),
        in_data(in_data_),
        weight_data(weight_data_) {}

 private:
  TensorInfo<output_t, IndexType> a;
  TensorInfo<input_t, IndexType> b;
  TensorInfo<output_t, IndexType> c;
  int nbins;
  input_t minvalue;
  input_t maxvalue;
  IndexType totalElements;
  Op getOp;
  output_t* out_data;
  input_t* in_data;
  output_t* weight_data;
};

/*
  Kernel for computing the histogram of the input.
 */
template <
    typename output_t,
    typename input_t,
    typename IndexType,
    int ADims,
    bool has_weight,
    typename Op>
void kernelHistogram1D(
    TensorInfo<output_t, IndexType> a, /* output */
    TensorInfo<input_t, IndexType> b, /* input */
    TensorInfo<output_t, IndexType> c, /* weight */
    int nbins,
    input_t minvalue,
    input_t maxvalue,
    IndexType totalElements,
    Op getOp) {
  auto& dpcpp_queue = dpcppGetCurrentQueue();

  auto cgf = DPCPP_Q_CGF(__cgh) {
    auto out_data = a.data;
    auto in_data = b.data;
    auto weight_data = c.data;

    kernelHistogram1DKernelFunctor<
        output_t,
        input_t,
        IndexType,
        ADims,
        has_weight,
        Op>
        kfn(a,
            b,
            c,
            nbins,
            minvalue,
            maxvalue,
            totalElements,
            getOp,
            out_data,
            in_data,
            weight_data);

    __cgh.parallel_for<decltype(kfn)>(
        sycl::range</*dim=*/1>(totalElements), kfn);
  };
  DPCPP_Q_SUBMIT(dpcpp_queue, cgf);
}

#define HANDLE_CASE(WEIGHTS_OP, WITH_WEIGHT)                       \
  kernelHistogram1D<output_t, input_t, IndexType, 1, WITH_WEIGHT>( \
      aInfo,                                                       \
      bInfo,                                                       \
      cInfo,                                                       \
      nbins,                                                       \
      minvalue,                                                    \
      maxvalue,                                                    \
      totalElements,                                               \
      WEIGHTS_OP);

template <typename output_t, typename IndexType, typename info_t>
struct dpcpp_tensor_histogram_functor {
  auto operator()(output_t* cPtr, IndexType cIndex) const {
    const IndexType cOffset =
        IndexToOffset<output_t, IndexType>::get(cIndex, cInfo);
    return cPtr[cOffset];
  }

  dpcpp_tensor_histogram_functor(info_t cInfo) : cInfo(cInfo) {}

 private:
  info_t cInfo;
};

template <typename output_t, typename IndexType>
struct dpcpp_tensor_histogram_functor_2 {
  auto operator()(output_t*, IndexType) const {
    return static_cast<output_t>(1);
  }
};

template <typename output_t, typename input_t, bool HasWeights>
bool dpcpp_tensor_histogram(
    at::Tensor a, /* output */
    at::Tensor b, /* input */
    at::Tensor c, /* weights(optional) */
    int64_t nbins,
    input_t minvalue,
    input_t maxvalue) {
  checkBackend("dpcpp_tensor_histogram", {a, b}, Backend::XPU);
  if (HasWeights) {
    checkBackend("dpcpp_tensor_histogram", {c}, Backend::XPU);
  }

  auto totalElements = b.numel();
  if (totalElements == 0) {
    return false;
  }

  using IndexType = int64_t;
  auto aInfo = getTensorInfo<output_t, IndexType>(a);
  auto bInfo = getTensorInfo<input_t, IndexType>(b);
  if (HasWeights) {
    auto cInfo = getTensorInfo<output_t, IndexType>(c);
    const dpcpp_tensor_histogram_functor<output_t, IndexType, decltype(cInfo)>
        getWeightsOp(cInfo);
    HANDLE_CASE(getWeightsOp, true);
  } else {
    TensorInfo<output_t, IndexType> cInfo;
    // set the dummy cinfo with the ptr to the output
    cInfo.data = aInfo.data;
    static const dpcpp_tensor_histogram_functor_2<output_t, IndexType>
        getDummyOp;
    HANDLE_CASE(getDummyOp, false);
  }

  return true;
}

template <typename scalar_t, typename input_t>
Tensor histc_template(
    const Tensor& self,
    int64_t nbins,
    input_t min,
    input_t max) {
  input_t minvalue = min;
  input_t maxvalue = max;
  if (min == max && self.numel() > 0) {
    minvalue = *self.min().cpu().const_data_ptr<input_t>();
    maxvalue = *self.max().cpu().const_data_ptr<input_t>();
  }
  if (minvalue == maxvalue) {
    minvalue = minvalue - 1;
    maxvalue = maxvalue + 1;
  }
  TORCH_CHECK(
      !(Numerics<input_t>::isinf(minvalue) ||
        Numerics<input_t>::isinf(maxvalue) ||
        Numerics<input_t>::isnan(minvalue) ||
        Numerics<input_t>::isnan(maxvalue)),
      "range of [",
      minvalue,
      ", ",
      maxvalue,
      "] is not finite");
  TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
  Tensor output = at::zeros({nbins}, self.options());
  auto ret = dpcpp_tensor_histogram<scalar_t, input_t, false>(
      output, self, Tensor(), nbins, minvalue, maxvalue);
  return output;
}

} // namespace impl

Tensor histc(
    const Tensor& self,
    int64_t bins,
    const Scalar& min,
    const Scalar& max) {
  TORCH_CHECK(bins > 0, "bins should be > 0, but is ", bins, " instead");
  if (self.scalar_type() == ScalarType::Half) {
    AT_ERROR("HalfTensor is not supported");
  }
  return IPEX_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] {
    return impl::histc_template<scalar_t>(
        self, bins, min.to<scalar_t>(), max.to<scalar_t>());
  });
}

Tensor& histc_out(
    const Tensor& self,
    int64_t bins,
    const Scalar& min,
    const Scalar& max,
    Tensor& out) {
  Tensor out_tmp = at::AtenIpexTypeXPU::histc(self, bins, min, max);
  out.resize_as_(out_tmp).copy_(out_tmp);
  return out;
}

} // namespace AtenIpexTypeXPU
} // namespace at
