#include <iostream>
#include <math.h>
#include <vector>
#include <torch/script.h>
#include <ATen/cuda/CUDAContext.h>

#include "../agrad.hxx"
#include "../utils.hxx"
#include "spmm_kernels.cuh"
#include "sddmm_kernels.cuh"
#include "hybtile_kernels.cuh"
#include "gcoo_kernels.cuh"

// debug headers
// #include <fstream>

// #if __CUDA_ARCH__ < 350
// __device__ double atomicAdd(double* address, double val)
// {
//     unsigned long long int* address_as_ull =
//                               (unsigned long long int*)address;
//     unsigned long long int old = *address_as_ull, assumed;

//     do {
//         assumed = old;
//         old = atomicCAS(address_as_ull, assumed,
//                         __double_as_longlong(val + 
//                                              __longlong_as_double(assumed)));

//     // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
//     } while (assumed != old);

//     return __longlong_as_double(old);
// }
// #endif

// 从正确性的角度来说，用float当数据类型是合适的

/* 历史的垃圾堆
template<typename scalar_t, int fdim=10>
struct feat_t {
  scalar_t data[fdim];

  __host__ __device__ feat_t() {
    for (int i=0; i<fdim; ++i) data[i] = 0;
  }
  __host__ __device__ feat_t(const feat_t<scalar_t, fdim>& other)
  {
    for (int i=0; i<fdim; ++i) data[i] = other.data[i];
  }
  __host__ __device__ feat_t<scalar_t, fdim>& operator=(const feat_t<scalar_t, fdim>& other)
  {
    for (int i=0; i<fdim; ++i) data[i] = other.data[i];
    return *this;
  }
  __device__ void flatten(scalar_t *ptr, const int64_t index[2], const int64_t strides[2]) const
  {
    ptr[strides[0]*index[0]+strides[1]*index[1]] = data[index[1]];
  }
};

template<typename F, typename scalar_t>
void set_vertex_data(F& f, scalar_t* dataptr, int64_t strides[2])
{
  f.data.init_wa([](int i){ return feat_t<scalar_t, F::_fet_dim>(); });
  f.data.init_ra([dataptr, strides](int i){
    feat_t<scalar_t, F::_fet_dim> ret;
    for (int j=0; j<F::_fet_dim; ++j)
    {
      ret.data[j] = dataptr[strides[0]*i+strides[1]*j];
    }
    return ret;
  });
  f.data.set_zero(feat_t<scalar_t, F::_fet_dim>());
}

template<typename F, typename scalar_t>
__global__ void _extract_vertex_kernel(F& f, scalar_t* dout, int64_t strides[2])
{
  const int gtid = threadIdx.x + blockIdx.x * blockDim.x;
  const int STRIDE = gridDim.x * blockDim.x;
  for(int i = gtid; gtid < f.data.size * F::_fet_dim; i += STRIDE){
    int64_t vid = gtid / f.data.size;
    int64_t cid = gtid % f.data.size;
    int64_t pair[] = {vid, cid};
    f.wa_of(vid)->flatten(dout, pair, strides);
  }
}

#define pack_t feat_t<scalar_t, fdim>
template<typename scalar_t, int fdim=10> // 除非重构gswitch，否则这个不好解决，把linear和这个fuse起来也是虚妄
struct GCN:Functor<VC, pack_t, pack_t, Empty>
{
  const static int _fet_dim = fdim;

  __device__ Status filter(int vid, dG g)
  {
    *Functor<VC, pack_t, pack_t, Empty>::ra_of(vid) = *Functor<VC, pack_t, pack_t, Empty>::wa_of(vid);
    return Active;
  }

  __device__ pack_t emit(int vid, Empty *ph, dG g)
  {
    return *Functor<VC, pack_t, pack_t, Empty>::ra_of(vid);
  }

  __device__ bool comp(pack_t* feat_v, pack_t feat_u, dG g)
  {
    for (int i=0; i<fdim; ++i)
    {
      feat_v->data[i] += feat_u.data[i];
    }
    return true;
  }

  __device__ bool compAtomic(pack_t* feat_v, pack_t feat_u, dG g)
  {
    for(int i=0; i<fdim; ++i)
    {
      atomicAdd(&feat_v->data[i], feat_u.data[i]);
    }
    return true;
  }
};
#undef pack_t

template<typename scalar_t>
void run_gswitch_vc(scalar_t* input, scalar_t* output, int64_t strides[2], int64_t nlayers)
{
  GCN<scalar_t, FEAT> f;
  set_vertex_data<GCN<scalar_t,FEAT>, scalar_t>(f, input, strides);
  init_conf(stats, fets, conf, graph, f);

  active_set_t as = build_active_set(graph.dg.nvertexs, conf);
  as.init(ALL_ACTIVE);

  for(int level=0; level<nlayers; ++level)
  {
    inspector.inspect(as, graph.dg, f, stats, fets, conf);
    selector.select(stats, fets, conf);
    executor.filter(as, graph.dg, f, stats, fets, conf);
    graph.dg.update_level();
    executor.expand(as, graph.dg, f, stats, fets, conf);
  }
  _extract_vertex_kernel<<<64,512>>>(f, output, strides);
  CUBARRIER();
}
*/

helper_t global_helper;

enum SpMMType {
  spmm_std, spmm_max_edge, spmm_sum_edge
};

enum SDDMMType {
  sddmm_std, sddmm_add, sddmm_sub, sddmm_mul, sddmm_div
};

/**
 * @brief used variations of spmm & sddmm
 * rules:
 *  - for binary op: a=lhs data, b=rhs data, 
 *      which means reduce works for dense output 
 *      and binary works for sparse output
 * @tparam scalar_t 
 */
template <typename scalar_t>
struct spmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

template <typename scalar_t>
struct spmmMaxEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = MAX(*addr, n); return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) 
  { // float atomicMax
    int* address_as_i = (int*) addr;
    int old = *address_as_i, assumed;
    do {
      assumed = old;
      old = ::atomicCAS(address_as_i, assumed,
        __float_as_int(::fmaxf(n, __int_as_float(assumed))));
    } while (assumed != old);
    return __int_as_float(old);
  }
  static constexpr scalar_t init_val = (scalar_t) -FLT_MAX;
};

template <typename scalar_t>
struct spmmSumEdge : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};


/// this functor represents dot product sddmm, under this senario both binary & reduce 
/// represents dense semantics.
/// @note sparse operand is also computed by `binary` currently, which could cause problem
template <typename scalar_t>
struct sddmmStd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr += n; return *addr;}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t* addr, scalar_t n) { return atomicAdd(addr, n); }
};

/// this case computes an trivial addition of both feature 
template <typename scalar_t>
struct sddmmAdd : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a+b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

/// the following case only evolves one-side scalar feature. no reduction is required
template <typename scalar_t>
struct sddmmSub : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a-b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmMul : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a*b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};

template <typename scalar_t>
struct sddmmDiv : Func<scalar_t>
{
  static __device__ __forceinline__ scalar_t binary(scalar_t a, scalar_t b) { return a/b; }
  static __device__ __forceinline__ scalar_t reduce(scalar_t* addr, scalar_t n) { *addr = n; return *addr;}
};


#define SPMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SpMMType::spmm_std) {\
    typedef spmmStd<val_t> F;\
    constexpr DnCompute dnc = u_e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_max_edge) {\
    typedef spmmMaxEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SpMMType::spmm_sum_edge) {\
    typedef spmmSumEdge<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)

#define SDDMM_DISPATCH(val_t, code, ...)\
do {\
  if (code == SDDMMType::sddmm_std) {\
    typedef sddmmStd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_add) {\
    typedef sddmmAdd<val_t> F;\
    constexpr DnCompute dnc = u_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_sub){\
   typedef sddmmSub<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_mul) {\
    typedef sddmmMul<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  } else if (code == SDDMMType::sddmm_div) {\
    typedef sddmmDiv<val_t> F;\
    constexpr DnCompute dnc = e_v;\
    __VA_ARGS__;\
  }\
} while (0)

/*
void customgcn::init_layer(const std::string path)
{
fets.centric = VC;
  fets.pattern = ASSO;
  fets.fromall = true;
  fets.toall = true;
  
  cmd_opt.verbose = true;
  
  // 先这样吧
  edgelist_t<float> el;
  el.read_mtx(path, false, false, true);
  el.with_weight = true;
  for (int i=0; i<el.nedges; ++i)
  {
    el.evector.push_back(1.0);
  }
  G graph = build_graph<VC>(el, fets);
  // for ( int i=0; i<fets.nvertexs; i++)
  //     std::cout << graph.hg.start_pos[i] << " ";
  // std::cout << std::endl;
}


std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>,
           torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
customgcn::init_graph_cpp(
  const int nvertex, const int nedge, const bool directed, 
  torch::Tensor edge_tensor, torch::optional<torch::Tensor> weight_tensor)
{
  cmd_opt.verbose = true;
  CHECK_INPUT(edge_tensor.dtype() == at::ScalarType::Int || edge_tensor.dtype() == at::ScalarType::Long);
  // non-weighted_graph
  torch::Tensor *edgelist = nullptr;
  auto ro = torch::zeros(nvertex);
  auto al = torch::zeros(nedge);
  auto r_ro = torch::zeros(nvertex);
  auto r_al = torch::zeros(nedge);

  if(edge_tensor.size(1) != 2) 
    edgelist = new torch::Tensor(edge_tensor.t().contiguous());
  else
    edgelist = &edge_tensor;

  // printf("edgelist with data @ %lx\n", edge_tensor.data_ptr<int64_t>());
  if(!weight_tensor.has_value())
  {
    edgelist_t<float> el;
    AT_DISPATCH_INTEGRAL_TYPES(
      edgelist->scalar_type(), "init_list", [&] {
        host_graph_t<CSR, scalar_t> graph;
        int vmin=INT_MAX, vmax=0;
        std::vector<int>& vvector = el.vvector;
        std::vector<float>& evector = el.evector;
        auto src_ptr = edgelist->data_ptr<scalar_t>();
        // printf("preparing ok on array %lx on cuda %ld\n", src_ptr, src_ptr[0]);
        for (int i=0; i<edgelist->size(0); ++i)
        {
          // printf("starting for edge[%d]\n",i);
          int v1 = static_cast<int>(src_ptr[2*i]);
          // printf("half way for edge[%d]\n",i);
          int v0 = static_cast<int>(src_ptr[2*i+1]);
          // printf("ok for edge[%d]\n",i);
          vmin = std::min(v0,vmin);
          vmin = std::min(v1,vmin);
          vmax = std::max(v0,vmax);
          vmax = std::max(v1,vmax);

          if(v0 == v1) continue;
          vvector.push_back(v0);
          vvector.push_back(v1);
          evector.push_back(1.0);
        }
        
        el.vmin = vmin;
        el.vmax = vmax;
        el.nvertexs = (vmax-vmin+1);
        el.nedges = vvector.size()>>1;
        el.directed = directed;
        el.with_weight = true;
    });
    graph.build_reentrant(el, true);
  } 
  else 
  {
    edgelist_t<float> el;
    torch::Tensor edge_data = weight_tensor.value();
    CHECK_INPUT(edge_tensor.dtype() == at::ScalarType::Float); // only float is supported
    AT_DISPATCH_INTEGRAL_TYPES(
      edgelist->scalar_type(), "init_list", [&] {
        int vmin=INT_MAX, vmax=0;
        std::vector<int>& vvector = el.vvector;
        std::vector<float>& evector = el.evector;
        auto src_ptr = edgelist->data_ptr<scalar_t>();
        auto weight_ptr = edge_data.data_ptr<float>(); // this is why only float is supported
        for (int i=0; i<edgelist->size(0); ++i)
        {
          int v1 = static_cast<int>(src_ptr[2*i]);
          int v0 = static_cast<int>(src_ptr[2*i+1]);

          vmin = std::min(v0,vmin);
          vmin = std::min(v1,vmin);
          vmax = std::max(v0,vmax);
          vmax = std::max(v1,vmax);

          if(v0 == v1) continue;
          vvector.push_back(v0);
          vvector.push_back(v1);
          evector.push_back(weight_ptr[i]);
        }
        
        el.vmin = vmin;
        el.vmax = vmax;
        el.nvertexs = (vmax-vmin+1);
        el.nedges = vvector.size()>>1;
        el.directed = directed;
        el.with_weight = true;
    });
    graph.build_reentrant(el,)
  }

  if (edgelist != &edge_tensor) delete edgelist;
}
*/

// int64_t customgcn::get_num_vertex() {return graph.hg.nvertexs;}



// template <typename T> void __global__
// _gcn_nomalize_chunked_heavy_kernel(
//   const int* __restrict__ block_offset_edat,
//   const int* __restrict__ tiled_edat,
//   const T * __restrict__ tiled_wdat,
//   const int* __restrict__ odegree,
//   T* __restrict__ out_wdat,
//   const int hcnt, const int hszie)
// {
//   const int Tx     = threadIdx.x;
//   const int Bx     = blockIdx.x;
//   const int Stride = blockDim.x;

//   const int src_base = Bx*BSZ；
//   const int 
//   int src_deg = 0;
//   for (int i=block_offset_edat[Bx]+Tx; i<block_offset_edat[Bx+1]; i+=Stride)
//   {
//     int vtx = tiled_edat[i];
//     if (vtx == -1) continue;
//     if (vtx < 0)
//     {
//       int cur_dst = HASH(vtx);
//       src_deg = odegree[cur_dst];
//     } else {
//       int dst_deg = odegree[vtx];
//       out_wdat[vtx] = tiled_wdat[vtx] / sqrtf(src_deg*dst_deg);
//     }
//   }
// }

// template <typename T> void __global__
// _gcn_normalize_chunked_lite_kernel(
//   const int*  __restrict__ block_offset_elst,
//   const short* 
// )

uint _thd = 256;
uint _bsz = 256*6;
uint _h_thresh = 256*18;
uint _d_thresh = 1<<30;

torch::Tensor customgcn::propagate(
  int64_t opcode,
  torch::Tensor row_offset, 
  torch::Tensor adj_list, 
  torch::optional<torch::Tensor> row_index,
  torch::optional<torch::Tensor> edge_data,
  torch::optional<torch::Tensor> in_feat) 
{
  // using namespace std;
  // static int file_no = 0;
  // ofstream fs;
  bool use_x = in_feat.has_value();
  bool use_edata = edge_data.has_value();
  bool use_sort = row_index.has_value();

  auto resOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
    
  int nodes, nfeats, ld_in;
  uint *row_idx_ptr = nullptr;
  float *edata_ptr = nullptr, *ndata_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  if(use_sort) { 
    CHECK_CUDA(row_index.value()); 
    row_idx_ptr = reinterpret_cast<uint32_t*>(row_index.value().data_ptr<int>());
  }
  if(use_edata) { 
    CHECK_CUDA(edge_data.value()); 
    CHECK_INPUT(edge_data.value().dtype() == torch::kF32); 
    edata_ptr = edge_data.value().data_ptr<float>();
  }
  if(use_x) { 
    CHECK_CUDA(in_feat.value()); 
    CHECK_INPUT(in_feat.value().dtype() == torch::kF32); 
    CHECK_INPUT(in_feat.value().stride(1) == 1); //row major is required 
    ndata_ptr = in_feat.value().data_ptr<float>();
    nodes = in_feat.value().size(0);
    nfeats = in_feat.value().size(1);
    ld_in = in_feat.value().stride(0);
  } else {
    nodes = row_offset.size(0)-1;
    nfeats = 1;
    ld_in = 0;
  }

  auto row_offset_ptr = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
  auto adj_list_ptr = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
  
  auto out_feat = torch::zeros({(int64_t)nodes, (int64_t)nfeats}, resOpt);
  auto dst_ptr = out_feat.data_ptr<float>();

  SPMM_DISPATCH(float, opcode,\
      _spmm_manscript<F, uint, float, pull_dyn, shared, dnc, atomic, 32, 32, 1, 1, 1>\
      (row_idx_ptr, row_offset_ptr, adj_list_ptr, edata_ptr,\
       ndata_ptr, ld_in, dst_ptr, nfeats,\
       nodes, adj_list.size(0), nfeats, nullptr));

  // AT_DISPATCH_FLOATING_TYPES(
  //   input.scalar_type(), "custom gcn", [&] {
  //     auto input_ptr = input.data_ptr<scalar_t>();
  //     scalar_t *ew_ptr = nullptr;
  //     if (edge_data.has_value()) ew_ptr = edge_data.value().data_ptr<scalar_t>();
        
  //     // run_gswitch_vc<scalar_t>(input_ptr, output_ptr, strides, nlayers);

  //     // _spmm<spmmStd<scalar_t>, scalar_t>(row_idx_ptr, row_offset_ptr, adj_list_ptr, ew_ptr,
  //     //      acd::getTensorInfo<scalar_t, int>(input), acd::getTensorInfo<scalar_t, int>(output),
  //     //      adj_list.size(0));
  //     // if (feat_dim <= 16)
  //     SPMM_DISPATCH(scalar_t, opcode,
  //     _spmm_llvlib<F, uint, scalar_t, 128, 128, Alternative5::BufferType::none, Alternative5::BufferType::local, 1, 1, 1, 1>
  //               (row_idx_ptr, row_offset_ptr, adj_list_ptr, ew_ptr,
  //               acd::getTensorInfo<scalar_t, uint>(input), 
  //               acd::getTensorInfo<scalar_t, uint>(output), 
  //               adj_list.size(0)));
  //   });
  return out_feat;
}


torch::Tensor customgcn::propagate_backward(
  torch::Tensor input, 
  torch::Tensor row_index,
  torch::Tensor row_offset, 
  torch::Tensor adj_list, 
  torch::optional<torch::Tensor> edge_data) 
{
  auto output = torch::zeros_like(input);
  CHECK_CUDA(input);
  CHECK_CUDA(output);
  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  if(edge_data.has_value()) CHECK_CUDA(edge_data.value());

  auto row_offset_ptr = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
  auto adj_list_ptr = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
  AT_DISPATCH_FLOATING_TYPES(
    input.scalar_type(), "custom gcn", [&] {
      auto input_ptr = input.data_ptr<scalar_t>();
      auto output_ptr = output.data_ptr<scalar_t>();
      scalar_t *ew_ptr = nullptr;
      if (edge_data.has_value()) ew_ptr = edge_data.value().data_ptr<scalar_t>();
      
      // run_gswitch_vc<scalar_t>(input_ptr, output_ptr, strides, nlayers);
      _spmm_backward<spmmStd<scalar_t>, scalar_t>(row_offset_ptr, adj_list_ptr, ew_ptr,
           acd::getTensorInfo<scalar_t, uint>(input), acd::getTensorInfo<scalar_t, uint>(output),
           adj_list.size(0));
    });
  return output;
}


std::tuple<torch::Tensor, torch::Tensor>
customgcn::fused_linear_propagate(
  torch::Tensor input,
  torch::Tensor weight,
  torch::optional<torch::Tensor> bias,
  torch::Tensor row_offset,
  torch::Tensor adj_list,
  torch::optional<torch::Tensor> edge_data)
{
  auto inter = torch::zeros({input.size(0), weight.size(0)}).cuda();
  auto output = torch::zeros({input.size(0), weight.size(0)}).cuda();

  CHECK_CUDA(input);
  CHECK_CUDA(weight);
  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  CHECK_INPUT(input.size(1) == weight.size(1));
  if (bias.has_value())
  {
    CHECK_CUDA(bias.value());
    CHECK_INPUT(bias.value().size(0) == weight.size(0));
  }
  if (edge_data.has_value()) CHECK_CUDA(edge_data.value());

  AT_DISPATCH_FLOATING_TYPES(
    input.scalar_type(), "fused spmm linear", [&]{
      auto row_offset_data = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
      auto adj_list_data = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
      scalar_t* ew_data = nullptr;
      scalar_t* bias_data = nullptr;
      if (edge_data.has_value()) ew_data = edge_data.value().data_ptr<scalar_t>();
      if (bias.has_value()) bias_data = bias.value().data_ptr<scalar_t>();
      _spmm_linear_fused<spmmStd<scalar_t>, scalar_t>(row_offset_data, adj_list_data, ew_data,
                         acd::getTensorInfo<scalar_t, uint>(input), 
                         acd::getTensorInfo<scalar_t, uint>(weight), 
                         bias_data,
                         acd::getTensorInfo<scalar_t, uint>(inter), 
                         acd::getTensorInfo<scalar_t, uint>(output), 
                         adj_list.size(0));
    });

  return {output, inter};
}


std::tuple<torch::Tensor, torch::Tensor>
customgcn::fused_linear_propagate_reversed(
  torch::Tensor input,
  torch::Tensor weight,
  torch::Tensor row_offset,
  torch::Tensor adj_list,
  torch::optional<torch::Tensor> edge_data)
{
  auto inter = torch::zeros_like(input).cuda();
  auto output = torch::zeros({input.size(0), weight.size(0)}).cuda();

  CHECK_CUDA(input);
  CHECK_CUDA(weight);
  CHECK_CUDA(row_offset);
  CHECK_CUDA(adj_list);
  CHECK_INPUT(input.size(1) == weight.size(1));

  if (edge_data.has_value()) CHECK_CUDA(edge_data.value());

  AT_DISPATCH_FLOATING_TYPES(
    input.scalar_type(), "fused spmm linear reversed", [&]{
      auto row_offset_data = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>());
      auto adj_list_data = reinterpret_cast<uint32_t*>(adj_list.data_ptr<int>());
      scalar_t* ew_data = nullptr;
      if (edge_data.has_value()) ew_data = edge_data.value().data_ptr<scalar_t>();
      _spmm_linear_fused_reverse<spmmStd<scalar_t>, scalar_t>(row_offset_data, adj_list_data, ew_data,
                         acd::getTensorInfo<scalar_t, uint>(input), 
                         acd::getTensorInfo<scalar_t, uint>(weight), 
                         acd::getTensorInfo<scalar_t, uint>(inter), 
                         acd::getTensorInfo<scalar_t, uint>(output), 
                         adj_list.size(0));
    });

  return {output, inter};
}

torch::Tensor customgcn::propagate_chunked(
  torch::optional<torch::Tensor> bos, torch::optional<torch::Tensor> boe, torch::optional<torch::Tensor> bosb, torch::optional<torch::Tensor> hte, torch::optional<torch::Tensor> htw, 
  torch::optional<torch::Tensor> bosbd, torch::optional<torch::Tensor> dmd, 
  torch::optional<torch::Tensor> boel, torch::optional<torch::Tensor> lsl, torch::optional<torch::Tensor> ldps, torch::optional<torch::Tensor> lwl, 
  torch::Tensor bodl, torch::Tensor ad, 
  int64_t dcnt, int64_t hcnt, int64_t lcnt, int64_t sbsize,
  torch::Tensor src_tensor)
{
  CHECK_CUDA(src_tensor);
  if (bos.has_value()) CHECK_CUDA(bos.value());
  if (boel.has_value()) CHECK_CUDA(boel.value());
  // using a transposed output for better locality -- only if this strategy is faster
  auto src_relay  = (src_tensor.stride(1) == src_tensor.size(0)) ? src_tensor : src_tensor.t().contiguous().t();
  auto dst_tensor = torch::zeros_like(src_relay); 

  int *blockOffsetSdat, *blockOffsetEdat, *blockOffsetSb, *blockOffsetSbDn, *hTiledEdge, *lDstPosSb, *blockOffsetElst, *blockOffsetDlst;
  short *lSrcLst, *aggDst;

  if (dcnt > 0)
  {
    blockOffsetSbDn = bosbd.value().data_ptr<int>();
  } else blockOffsetSbDn = nullptr;

  if (hcnt > 0)
  {
    blockOffsetSdat = bos.value().data_ptr<int>();
    blockOffsetEdat = boe.value().data_ptr<int>();
    blockOffsetSb   = bosb.value().data_ptr<int>();
    hTiledEdge      = hte.value().data_ptr<int>();
  }
  else {
    blockOffsetSdat = blockOffsetEdat = blockOffsetSb = hTiledEdge = nullptr;
  }

  if (lcnt > 0)
  {
    blockOffsetElst = boel.value().data_ptr<int>();
    lSrcLst         = lsl.value().data_ptr<short>();
    lDstPosSb       = ldps.value().data_ptr<int>();
  } 
  else {
    blockOffsetElst = lDstPosSb = nullptr;
    lSrcLst = nullptr;
  }
  
  blockOffsetDlst = bodl.data_ptr<int>();
  aggDst          = ad.data_ptr<short>();

  AT_DISPATCH_FLOATING_TYPES(
    src_tensor.scalar_type(), "propagate chunk entrance", [&]{
      scalar_t *hTiledWeight=nullptr, *lWeightLst=nullptr, *dMatDat=nullptr;
      if (htw.has_value()) hTiledWeight = htw.value().data_ptr<scalar_t>();
      if (lwl.has_value()) lWeightLst   = lwl.value().data_ptr<scalar_t>();
      if (dcnt > 0)        dMatDat      = dmd.value().data_ptr<scalar_t>();

      // printf("i dont believe it! using data_ptr=%lx\n", src_tensor.data_ptr());
      // auto cpu_src = src_tensor.cpu();
      // auto cpu_ptr = cpu_src.data_ptr<scalar_t>();
      // for (int i = 0; i<cpu_src.size(0); ++i)
      // {
      //   for (int j=0; j<cpu_src.size(1); j++)
      //   {
      //     printf("%.6f ", cpu_ptr[i*cpu_src.stride(0)+j*cpu_src.stride(1)]);
      //   }
      //   printf("] \n [ ");
      // }

      _spmm_partitioned<spmmStd<scalar_t>, scalar_t>(
        blockOffsetSdat, blockOffsetEdat, blockOffsetSb, hTiledEdge, hTiledWeight,
        blockOffsetSbDn, dMatDat,
        blockOffsetElst, lSrcLst, lWeightLst, lDstPosSb,
        blockOffsetDlst, aggDst,
        acd::getTensorInfo<scalar_t, int>(src_relay),
        acd::getTensorInfo<scalar_t, int>(dst_tensor),
        (int)dcnt, (int)hcnt, (int)lcnt, (int)sbsize);
    }
  );
  return dst_tensor;
}


torch::Tensor
customgcn::propagate_gcoo(
  torch::Tensor input, 
  torch::Tensor el, 
  torch::Tensor go, 
  torch::optional<torch::Tensor> wl)
{
  CHECK_CUDA(input);
  CHECK_CUDA(el);
  CHECK_CUDA(go);

  const int nedge = el.size(0) >> 1;
  const int ngrps = go.size(0)-1;

  int *dst_list = el.data_ptr<int>();
  int *src_list = dst_list + nedge;
  int *grp_off  = go.data_ptr<int>();

  auto output = torch::zeros_like(input);

  AT_DISPATCH_FLOATING_TYPES(
    input.scalar_type(), "propagate gcoo", [&]{
      scalar_t * weight_list = nullptr;
      if (wl.has_value()) weight_list = wl.value().data_ptr<scalar_t>();

      _gcoo_spmm<spmmStd<scalar_t>, scalar_t>(src_list, dst_list, weight_list, grp_off, 
                 acd::getTensorInfo<scalar_t, int>(input), 
                 acd::getTensorInfo<scalar_t, int>(output),
                 nedge, ngrps);
  });
  return output;
}

torch::Tensor customgat::apply_edge(int64_t opcode,
                                    torch::Tensor row_offset,
                                    torch::Tensor col_indx,
                                    torch::optional<torch::Tensor> row_indx,
                                    torch::optional<torch::Tensor> edge_weight,
                                    torch::optional<torch::Tensor> feat_l,
                                    torch::optional<torch::Tensor> feat_r)
{
  bool use_e = edge_weight.has_value();
  bool sort = row_indx.has_value();
  bool use_lhs = feat_l.has_value();
  bool use_rhs = feat_r.has_value();
  
  uint nv = row_offset.size(0)-1; // sparse size
  uint vlen = 1; // dense columns
  uint ld_src = 0;
  float *feat_l_ptr = nullptr, *feat_r_ptr = nullptr;

  CHECK_CUDA(row_offset);
  CHECK_CUDA(col_indx);

  CHECK_INPUT(row_offset.dtype() == torch::kI32);
  CHECK_INPUT(col_indx.dtype() == torch::kI32);
  if (use_e) { CHECK_CUDA(edge_weight.value()); CHECK_INPUT(edge_weight.value().dtype() == torch::kF32); }
  if (sort) { CHECK_CUDA(row_indx.value()); CHECK_INPUT(row_indx.value().dtype() == torch::kI32);}
  if (use_lhs) { 
    CHECK_CUDA(feat_l.value()); 
    CHECK_INPUT(feat_l.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_l.value().stride(1) == 1); //row major is required 
    feat_l_ptr = feat_l.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_l.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_l.value().stride(0));
  }
  if (use_rhs) { 
    CHECK_CUDA(feat_r.value()); 
    CHECK_INPUT(feat_r.value().dtype() == torch::kF32); 
    CHECK_INPUT(feat_r.value().stride(1) == 1); //row major is required 
    feat_r_ptr = feat_r.value().data_ptr<float>();
    vlen = std::max(vlen, (uint)feat_r.value().size(1)); 
    ld_src = std::max(ld_src, (uint)feat_r.value().stride(0));
  }

  auto outOpt = torch::TensorOptions().dtype(torch::kF32).device(torch::DeviceType::CUDA);
  auto new_weight = torch::zeros(col_indx.sizes(), outOpt);

  uint  *A_row = reinterpret_cast<uint32_t*>(row_offset.data_ptr<int>()); // torch does not suppport uint32
  uint  *A_col = reinterpret_cast<uint32_t*>(col_indx.data_ptr<int>()); 
  float *A_val = new_weight.data_ptr<float>();
  
  uint* A_rptr = sort? reinterpret_cast<uint32_t*>(row_indx.value().data_ptr<int>()) : nullptr;
  float* A_val_in = use_e? edge_weight.value().data_ptr<float>() : nullptr;

  SDDMM_DISPATCH(float, opcode,\
      _sddmm_alt1<F, uint, float, push_seq, none, dnc, shfl_red, 128, 1, 1, 1, 1>\
      (A_rptr, A_row, A_col, A_val_in, feat_l_ptr, feat_r_ptr, ld_src, A_val, nv, (uint)col_indx.size(0), vlen));

  return new_weight;
}

void init_stream()
{
  global_helper.build();
}