#include "agrad.hxx"
#include "utils.hxx"
#include "cuda/ops/gat_ops_impl.cuh"

enum SDDMMType {
  sddmm_std, sddmm_add, sddmm_sub, sddmm_mul, sddmm_div
};

/**
 * @brief used variations of spmm & sddmm
 * rules:
 *  - for binary op: a=lhs data, b=rhs data, 
 *      which means reduce works for dense output 
 *      and binary works for sparse output
 * @tparam scalar_t 
 */

/// this functor represents dot product sddmm, under this senario both binary & reduce 
/// represents dense semantics.
/// @note sparse operand is also computed by `binary` currently, which could cause problem
template <typename scalar_t>
struct sddmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

/// this case computes an trivial addition of both feature 
template <typename scalar_t>
struct sddmmAdd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a+b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

/// the following case only evolves one-side scalar feature. no reduction is required
template <typename scalar_t>
struct sddmmSub : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a-b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmMul : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmDiv : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a/b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};


#define SDDMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SDDMMType::sddmm_std) {\
    typedef sddmmStd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_add) {\
    typedef sddmmAdd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_sub){\
   typedef sddmmSub<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_mul) {\
    typedef sddmmMul<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_div) {\
    typedef sddmmDiv<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)


torch::Tensor customgat::apply_edge(int64_t opcode,
                                    torch::Tensor row_offset,
                                    torch::Tensor col_indx,
                                    torch::optional<torch::Tensor> row_indx,
                                    torch::optional<torch::Tensor> edge_weight,
                                    torch::optional<torch::Tensor> feat_l,
                                    torch::optional<torch::Tensor> feat_r)
{
  bool use_e = edge_weight.has_value();
  bool sort = row_indx.has_value();
  bool use_lhs = feat_l.has_value();
  bool use_rhs = feat_r.has_value();
  
  uint nv = row_offset.size(0)-1; // sparse size
  uint vlen = 1; // dense columns
  uint ld_src = 0;
  float *feat_l_ptr = nullptr, *feat_r_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(col_indx);

  CHECK_INPUT(row_offset.dtype() == torch::kI32);
  CHECK_INPUT(col_indx.dtype() == torch::kI32);
  if (use_e) { CHECK_CUDA(edge_weight.value()); CHECK_INPUT(edge_weight.value().dtype() == torch::kF32); }
  if (sort) { CHECK_CUDA(row_indx.value()); CHECK_INPUT(row_indx.value().dtype() == torch::kI32);}
  if (use_lhs) { 
    CHECK_CUDA(feat_l.value()); 
    CHECK_INPUT(feat_l.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_l.value().stride(1) == 1); //row major is required 
    feat_l_ptr = feat_l.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_l.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_l.value().stride(0));
  }
  if (use_rhs) { 
    CHECK_CUDA(feat_r.value()); 
    CHECK_INPUT(feat_r.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_r.value().stride(1) == 1); //row major is required 
    feat_r_ptr = feat_r.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_r.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_r.value().stride(0));
  }

  auto outOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
  auto new_weight = torch::zeros(col_indx.sizes(), outOpt);

  uint  *A_row = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>()); // torch does not suppport uint32
  uint  *A_col = reinterpret_cast<uint32_t*>(col_indx.data_ptr<int>()); 
  float *A_val = new_weight.data_ptr<float>();
  
  uint* A_rptr = sort? reinterpret_cast<uint32_t*>(row_indx.value().data_ptr<int>()) : nullptr;
  float* A_val_in = use_e? edge_weight.value().data_ptr<float>() : nullptr;

  SDDMM_DISPATCH(float, opcode,\
      _sddmm_alt1<F, uint, float, push_seq, none, dnc, shfl_red, 128, 1, 1, 1, 1>\
      (A_rptr, A_row, A_col, A_val_in, feat_l_ptr, feat_r_ptr, ld_src, A_val, nv, (uint)col_indx.size(0), vlen));

  return new_weight;
}

#undef SDDMM_DISPATCH

