#include <iostream>
#include <math.h>
#include <vector>
#include <torch/script.h>
#include <ATen/cuda/CUDAContext.h>

#include "agrad.hxx"
#include "utils.hxx"
// #include "cuda/ops/gcn_ops_impl.cuh"
// #include "cuda/ops/gat_ops_impl.cuh"
// #include "cuda/ops/hybtile_kernels.cuh"
// #include "cuda/ops/gcoo_kernels.cuh"


/*

enum SpMMType {
  spmm_std, spmm_max_edge, spmm_sum_edge
};

enum SDDMMType {
  sddmm_std, sddmm_add, sddmm_sub, sddmm_mul, sddmm_div
};

*/

/**
 * @brief used variations of spmm & sddmm
 * rules:
 *  - for binary op: a=lhs data, b=rhs data, 
 *      which means reduce works for dense output 
 *      and binary works for sparse output
 * @tparam scalar_t 
 */

/*
template <typename scalar_t>
struct spmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

template <typename scalar_t>
struct spmmMaxEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = MAX(*addr, n); return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) 
  { // float atomicMax
    int* address_as_i = (int*) addr;
    int old = *address_as_i, assumed;
    do {
      assumed = old;
      old = ::atomicCAS(address_as_i, assumed,
        __float_as_int(::fmaxf(n, __int_as_float(assumed))));
    } while (assumed != old);
    return __int_as_float(old);
  }
  static constexpr scalar_t init_val = (scalar_t) -FLT_MAX;
};

template <typename scalar_t>
struct spmmSumEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};


/// this functor represents dot product sddmm, under this senario both binary & reduce 
/// represents dense semantics.
/// @note sparse operand is also computed by `binary` currently, which could cause problem
template <typename scalar_t>
struct sddmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

/// this case computes an trivial addition of both feature 
template <typename scalar_t>
struct sddmmAdd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a+b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

/// the following case only evolves one-side scalar feature. no reduction is required
template <typename scalar_t>
struct sddmmSub : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a-b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmMul : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmDiv : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a/b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

#define SPMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SpMMType::spmm_std) {\
    typedef spmmStd<val_t> F;\
    constexpr DnCompute dnc = u_e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_max_edge) {\
    typedef spmmMaxEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_sum_edge) {\
    typedef spmmSumEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)

#define SDDMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SDDMMType::sddmm_std) {\
    typedef sddmmStd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_add) {\
    typedef sddmmAdd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_sub){\
   typedef sddmmSub<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_mul) {\
    typedef sddmmMul<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_div) {\
    typedef sddmmDiv<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)

torch::Tensor customgcn::propagate(
  int64_t opcode,
  torch::Tensor row_offset, 
  torch::Tensor adj_list, 
  torch::optional<torch::Tensor> row_index,
  torch::optional<torch::Tensor> edge_data,
  torch::optional<torch::Tensor> in_feat) 
{
  // using namespace std;
  // static int file_no = 0;
  // ofstream fs;
  bool use_x = in_feat.has_value();
  bool use_edata = edge_data.has_value();
  bool use_sort = row_index.has_value();

  auto resOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
    
  int nodes, nfeats, ld_in;
  uint *row_idx_ptr = nullptr;
  float *edata_ptr = nullptr, *ndata_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  if(use_sort) { 
    CHECK_CUDA(row_index.value()); 
    row_idx_ptr = reinterpret_cast<uint32_t*>(row_index.value().data_ptr<int>());
  }
  if(use_edata) { 
    CHECK_CUDA(edge_data.value()); 
    CHECK_INPUT(edge_data.value().dtype() == torch::kF32); 
    edata_ptr = edge_data.value().data_ptr<float>();
  }
  if(use_x) { 
    CHECK_CUDA(in_feat.value()); 
    CHECK_INPUT(in_feat.value().dtype() == torch::kF32); 
    CHECK_INPUT(in_feat.value().stride(1) == 1); //row major is required 
    ndata_ptr = in_feat.value().data_ptr<float>();
    nodes = in_feat.value().size(0);
    nfeats = in_feat.value().size(1);
    ld_in = in_feat.value().stride(0);
  } else {
    nodes = row_offset.size(0)-1;
    nfeats = 1;
    ld_in = 0;
  }

  auto row_offset_ptr = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
  auto adj_list_ptr = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
  
  auto out_feat = torch::zeros({(int64_t)nodes, (int64_t)nfeats}, resOpt);
  auto dst_ptr = out_feat.data_ptr<float>();

  SPMM_DISPATCH(float, opcode,\
      _spmm_manscript<F, uint, float, pull_dyn, shared, dnc, atomic, 32, 32, 1, 1, 1>\
      (row_idx_ptr, row_offset_ptr, adj_list_ptr, edata_ptr,\
       ndata_ptr, ld_in, dst_ptr, nfeats,\
       nodes, adj_list.size(0), nfeats, nullptr));
       
  return out_feat;
}

torch::Tensor customgat::apply_edge(int64_t opcode,
                                    torch::Tensor row_offset,
                                    torch::Tensor col_indx,
                                    torch::optional<torch::Tensor> row_indx,
                                    torch::optional<torch::Tensor> edge_weight,
                                    torch::optional<torch::Tensor> feat_l,
                                    torch::optional<torch::Tensor> feat_r)
{
  bool use_e = edge_weight.has_value();
  bool sort = row_indx.has_value();
  bool use_lhs = feat_l.has_value();
  bool use_rhs = feat_r.has_value();
  
  uint nv = row_offset.size(0)-1; // sparse size
  uint vlen = 1; // dense columns
  uint ld_src = 0;
  float *feat_l_ptr = nullptr, *feat_r_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(col_indx);

  CHECK_INPUT(row_offset.dtype() == torch::kI32);
  CHECK_INPUT(col_indx.dtype() == torch::kI32);
  if (use_e) { CHECK_CUDA(edge_weight.value()); CHECK_INPUT(edge_weight.value().dtype() == torch::kF32); }
  if (sort) { CHECK_CUDA(row_indx.value()); CHECK_INPUT(row_indx.value().dtype() == torch::kI32);}
  if (use_lhs) { 
    CHECK_CUDA(feat_l.value()); 
    CHECK_INPUT(feat_l.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_l.value().stride(1) == 1); //row major is required 
    feat_l_ptr = feat_l.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_l.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_l.value().stride(0));
  }
  if (use_rhs) { 
    CHECK_CUDA(feat_r.value()); 
    CHECK_INPUT(feat_r.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_r.value().stride(1) == 1); //row major is required 
    feat_r_ptr = feat_r.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_r.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_r.value().stride(0));
  }

  auto outOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
  auto new_weight = torch::zeros(col_indx.sizes(), outOpt);

  uint  *A_row = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>()); // torch does not suppport uint32
  uint  *A_col = reinterpret_cast<uint32_t*>(col_indx.data_ptr<int>()); 
  float *A_val = new_weight.data_ptr<float>();
  
  uint* A_rptr = sort? reinterpret_cast<uint32_t*>(row_indx.value().data_ptr<int>()) : nullptr;
  float* A_val_in = use_e? edge_weight.value().data_ptr<float>() : nullptr;

  SDDMM_DISPATCH(float, opcode,\
      _sddmm_alt1<F, uint, float, push_seq, none, dnc, shfl_red, 128, 1, 1, 1, 1>\
      (A_rptr, A_row, A_col, A_val_in, feat_l_ptr, feat_r_ptr, ld_src, A_val, nv, (uint)col_indx.size(0), vlen));

  return new_weight;
}
*/

uint _thd = 256;
uint _bsz = 256*6;
uint _h_thresh = 256*18;
uint _d_thresh = 1<<30;

runtime::helper_t<4> global_helper;

void init_stream()
{
  global_helper.build();
}