#include "agrad.hxx"
#include "utils.hxx"
#include "cuda/ops/gcn_ops_impl.cuh"

enum SpMMType {
  spmm_std, spmm_max_edge, spmm_sum_edge
};

/**
 * @brief used variations of spmm & sddmm
 * rules:
 *  - for binary op: a=lhs data, b=rhs data, 
 *      which means reduce works for dense output 
 *      and binary works for sparse output
 * @tparam scalar_t 
 */
template <typename scalar_t>
struct spmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

template <typename scalar_t>
struct spmmMaxEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = MAX(*addr, n); return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) 
  { // float atomicMax
    int* address_as_i = (int*) addr;
    int old = *address_as_i, assumed;
    do {
      assumed = old;
      old = ::atomicCAS(address_as_i, assumed,
        __float_as_int(::fmaxf(n, __int_as_float(assumed))));
    } while (assumed != old);
    return __int_as_float(old);
  }
  static constexpr scalar_t init_val = (scalar_t) -FLT_MAX;
};

template <typename scalar_t>
struct spmmSumEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

#define SPMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SpMMType::spmm_std) {\
    typedef spmmStd<val_t> F;\
    constexpr DnCompute dnc = u_e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_max_edge) {\
    typedef spmmMaxEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_sum_edge) {\
    typedef spmmSumEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)

torch::Tensor customgcn::propagate(
  int64_t opcode,
  torch::Tensor row_offset, 
  torch::Tensor adj_list, 
  torch::optional<torch::Tensor> row_index,
  torch::optional<torch::Tensor> edge_data,
  torch::optional<torch::Tensor> in_feat) 
{
  // using namespace std;
  // static int file_no = 0;
  // ofstream fs;
  bool use_x = in_feat.has_value();
  bool use_edata = edge_data.has_value();
  bool use_sort = row_index.has_value();

  auto resOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
    
  int nodes, nfeats, ld_in;
  uint *row_idx_ptr = nullptr;
  float *edata_ptr = nullptr, *ndata_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  if(use_sort) { 
    CHECK_CUDA(row_index.value()); 
    row_idx_ptr = reinterpret_cast<uint32_t*>(row_index.value().data_ptr<int>());
  }
  if(use_edata) { 
    CHECK_CUDA(edge_data.value()); 
    CHECK_INPUT(edge_data.value().dtype() == torch::kF32); 
    edata_ptr = edge_data.value().data_ptr<float>();
  }
  if(use_x) { 
    CHECK_CUDA(in_feat.value()); 
    CHECK_INPUT(in_feat.value().dtype() == torch::kF32); 
    CHECK_INPUT(in_feat.value().stride(1) == 1); //row major is required 
    ndata_ptr = in_feat.value().data_ptr<float>();
    nodes = in_feat.value().size(0);
    nfeats = in_feat.value().size(1);
    ld_in = in_feat.value().stride(0);
  } else {
    nodes = row_offset.size(0)-1;
    nfeats = 1;
    ld_in = 0;
  }

  auto row_offset_ptr = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
  auto adj_list_ptr = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
  
  auto out_feat = torch::zeros({(int64_t)nodes, (int64_t)nfeats}, resOpt);
  auto dst_ptr = out_feat.data_ptr<float>();

  SPMM_DISPATCH(float, opcode,\
      _spmm_manscript<F, uint, float, pull_dyn, shared, dnc, atomic, 32, 32, 1, 1, 1>\
      (row_idx_ptr, row_offset_ptr, adj_list_ptr, edata_ptr,\
       ndata_ptr, ld_in, dst_ptr, nfeats,\
       nodes, adj_list.size(0), nfeats, nullptr));
       
  return out_feat;
}

#undef SPMM_DISPATCH