#include <cassert>
#include <iostream>
#include <cstdlib>
#include <omp.h>
#include <cmath>
#include <cublas_v2.h>
#include <cusparse.h>
#include <tuple>

#include "../format/CsrGraph.cuh"
#include "../format/GcooGraph.cuh"

#include "spmm_kernels.cuh"
#include "gcoo_kernels.cuh"

#define CSR

#define CUDRIVER_CHECK(expr) ASSERT(CUDA_SUCCESS == (expr), "driver api failed")

uint _thd = 256;
uint _bsz = 128*4;
uint _h_thresh = 128*18;
uint _d_thresh = 1<<30;

struct TestFunc : Func<float>{
  static __device__ __forceinline__ float binary(float a, float b) {return a*b;}
  static __device__ __forceinline__ float reduce(float *addr, float n) {*addr += n; return *addr;}
  static __device__ __forceinline__ float reduceAtomic(float *addr, float n) {return atomicAdd(addr, n);}
};

template <typename scalar_t>
void init_random_mat(scalar_t* buffer, uint numel)
{
  for ( uint i=0; i<numel; ++i )
  {
    buffer[i] = (scalar_t)(std::rand() % 100) / 13;
  }
}

template<typename Graph>
void cpu_check(Graph g, const float *src, float* dst, uint vlen)
{
  #pragma omp parallel for num_threads(OMP_THD)
  for( size_t y=0; y<g.nvertex; ++y )
  {
    for( uint x=0; x<vlen; ++x )
    {
      for ( uint indptr=g.row_offset[y]; indptr<g.row_offset[y+1]; ++indptr)
      {
        uint k = g.col_idx[indptr];
        float e = g.edge_val[indptr];
        dst[y*vlen+x] += src[k*vlen+x]*e;
        // dst[y+x*g.nvertex] += src[k+x*g.nvertex]*e;
      }
    }
  }
}


#define CHECK_CUBLAS(func)                                          \
do {                                                                \
    cublasStatus_t status = (func);                                 \
    if (status != CUBLAS_STATUS_SUCCESS) {                          \
        printf("CUSPARSE API failed at line %d with error: (%d)\n", \
               __LINE__, status);                                   \
        exit(-1);                                                   \
    }                                                               \
} while (0)

#if __CUDACC_VER_MAJOR__ >= 11

#define CHECK_CUSPARSE(func)                                                   \
do {                                                                           \
    cusparseStatus_t status = (func);                                          \
    if (status != CUSPARSE_STATUS_SUCCESS) {                                   \
        printf("CUSPARSE API failed at line %d with error: %s (%d)\n",         \
               __LINE__, cusparseGetErrorString(status), status);              \
        exit(-1);                                                              \
    }                                                                          \
} while (0)

template<typename Graph>
float cusparse_check_mixed(Graph g, const float *src, float* w, float *dst, uint vlen_in, uint vlen_out, uint iters=1)
{
  cublasHandle_t blas_h;
  cusparseHandle_t sparse_h;
  cusparseSpMatDescr_t matA;
  cusparseDnMatDescr_t matB, matC;
  cudaEvent_t s, e;

  // first build the standard col_idx on device 
  size_t ext_buf_size;
  void * ext_buffer;

  float *d_src, *d_dst, *d_w, *d_buffer;
  
  float alpha = 1.0;
  float beta = 0.0;

  float time;

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(float)*g.nvertex*vlen_in));
  H_ERR(cudaMalloc(&d_buffer, sizeof(float)*g.nvertex*vlen_out));
  H_ERR(cudaMalloc(&d_dst, sizeof(float)*g.nvertex*vlen_out));
  H_ERR(cudaMalloc(&d_w, sizeof(float)*vlen_in*vlen_out));

  H_ERR(TODEV(d_src, src, g.nvertex*vlen_out));
  H_ERR(TODEV(d_w, w, vlen_in*vlen_out));
  H_ERR(CLEAN(d_dst, g.nvertex*vlen_out));

  CHECK_CUBLAS(cublasCreate(&blas_h));

  CHECK_CUSPARSE(cusparseCreate(&sparse_h));
  CHECK_CUSPARSE(cusparseCreateCsr(&matA, g.nvertex, g.nvertex, g.nedge, g.row_offset, g.col_idx, (void*)g.edge_val,
                          CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));
  CHECK_CUSPARSE(cusparseCreateDnMat(&matB, g.nvertex, vlen_out, vlen_out, (void*)d_buffer, CUDA_R_32F, CUSPARSE_ORDER_ROW));
  CHECK_CUSPARSE(cusparseCreateDnMat(&matC, g.nvertex, vlen_out, vlen_out, (void*)d_dst, CUDA_R_32F, CUSPARSE_ORDER_ROW));



  CHECK_CUSPARSE(cusparseSpMM_bufferSize(
    sparse_h,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    &alpha, matA, matB, &beta, matC, CUDA_R_32F,
    CUSPARSE_SPMM_ALG_DEFAULT, &ext_buf_size
  )); 
  H_ERR(cudaMalloc(&ext_buffer, ext_buf_size));
  std::cout << "temp buffer 4 cusparse size = " << ext_buf_size << std::endl;

  H_ERR(cudaEventRecord(s));
  for ( uint i=0; i<iters; i++)
  {
    CHECK_CUBLAS(cublasSgemm(blas_h, CUBLAS_OP_N, CUBLAS_OP_N,
                             vlen_out, g.nvertex, vlen_in,
                             &alpha,
                             d_w, vlen_out,
                             d_src, vlen_in,
                             &beta,
                             d_buffer, vlen_out));
    CHECK_CUSPARSE(cusparseSpMM(
      sparse_h, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      &alpha, matA, matB, &beta, matC, CUDA_R_32F,
      CUSPARSE_SPMM_ALG_DEFAULT, ext_buffer
    ));
    
    // H_ERR(cudaDeviceSynchronize()); 
  }
  H_ERR(cudaEventRecord(e));
  H_ERR(cudaDeviceSynchronize());

  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_dst, dst, g.nvertex*vlen_out));

  cudaFree(ext_buffer);
  cusparseDestroySpMat(matA);
  cusparseDestroyDnMat(matB);
  cusparseDestroyDnMat(matC);

  cudaFree(d_src);
  cudaFree(d_dst);

  cudaEventDestroy(s);
  cudaEventDestroy(e);

  // no retrieving the results

  return time / iters;
}


template<typename Graph>
float cusparse_check(Graph g, const float *src, float *dst, uint vlen, uint iters=1)
{
  cusparseHandle_t sparse_h;
  cusparseSpMatDescr_t matA;
  cusparseDnMatDescr_t matB, matC;
  cudaEvent_t s, e;

  // first build the standard col_idx on device 
  size_t ext_buf_size;
  void * ext_buffer;

  float *d_src, *d_dst;
  
  float alpha = 1.0;
  float beta = 0.0;

  float time;

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(float)*g.nvertex*vlen));
  H_ERR(cudaMalloc(&d_dst, sizeof(float)*g.nvertex*vlen));

  H_ERR(TODEV(d_src, src, g.nvertex*vlen));
  H_ERR(CLEAN(d_dst, g.nvertex*vlen));

  CHECK_CUSPARSE(cusparseCreate(&sparse_h));
  CHECK_CUSPARSE(cusparseCreateCsr(&matA, g.nvertex, g.nvertex, g.nedge, g.row_offset, g.col_idx, (void*)g.edge_val,
                          CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));
  CHECK_CUSPARSE(cusparseCreateDnMat(&matB, g.nvertex, vlen, vlen, (void*)d_src, CUDA_R_32F, CUSPARSE_ORDER_ROW));
  CHECK_CUSPARSE(cusparseCreateDnMat(&matC, g.nvertex, vlen, vlen, (void*)d_dst, CUDA_R_32F, CUSPARSE_ORDER_ROW));


  CHECK_CUSPARSE(cusparseSpMM_bufferSize(
    sparse_h,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    &alpha, matA, matB, &beta, matC, CUDA_R_32F,
    CUSPARSE_SPMM_ALG_DEFAULT, &ext_buf_size
  )); 
  H_ERR(cudaMalloc(&ext_buffer, ext_buf_size));
  std::cout << "temp buffer 4 cusparse size = " << ext_buf_size << std::endl;
  
  for ( uint i=0; i<CEIL(iters,10); i++)
  {
    cusparseSpMM(
      sparse_h, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      &alpha, matA, matB, &beta, matC, CUDA_R_32F,
      CUSPARSE_SPMM_ALG_DEFAULT, ext_buffer
    );
  }
  
  H_ERR(cudaEventRecord(s, 0));
  double t1 = mwtime();
  for ( uint i=0; i<iters; i++)
  {
    cusparseSpMM(
      sparse_h, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      &alpha, matA, matB, &beta, matC, CUDA_R_32F,
      CUSPARSE_SPMM_ALG_DEFAULT, ext_buffer
    );
  }
  H_ERR(cudaEventRecord(e, 0));
  H_ERR(cudaEventSynchronize(e));
  double t2 = mwtime();

  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_dst, dst, g.nvertex*vlen));

  cudaFree(ext_buffer);
  cusparseDestroySpMat(matA);
  cusparseDestroyDnMat(matB);
  cusparseDestroyDnMat(matC);

  cudaFree(d_src);
  cudaFree(d_dst);

  cudaEventDestroy(s);
  cudaEventDestroy(e);

  // no retrieving the results
  printf("CPU time record = %fms\n", (t2-t1)/iters);
  return time / iters;
}

#else
#define CHECK_CUSPARSE(func) func

template<class Graph>
float cusparse_check(Graph& g, const float *src, float *dst, uint vlen, uint iters=1)
{
  cusparseHandle_t handle;
  cusparseMatDescr_t desc;
  cudaEvent_t s,e;


  const uint num_dense_mat = g.nvertex*vlen;
  float *d_src, *d_dst;
  float alpha = 1.0;
  float beta = 0.0;

  float time;
  H_ERR(cudaEventCreate(&s));
  H_ERR(cudaEventCreate(&e));

  H_ERR(cudaMalloc(&d_src, sizeof(float)*num_dense_mat*iters));
  H_ERR(cudaMalloc(&d_dst, sizeof(float)*num_dense_mat*iters));

  H_ERR(TODEV(d_src, src, num_dense_mat*iters));
  H_ERR(CLEAN(d_dst, num_dense_mat*iters));

  CHECK_CUSPARSE(cusparseCreate(&handle));
  CHECK_CUSPARSE(cusparseCreateMatDescr(&desc));
  CHECK_CUSPARSE(cusparseSetMatType(desc, CUSPARSE_MATRIX_TYPE_GENERAL));
  CHECK_CUSPARSE(cusparseSetMatIndexBase(desc, CUSPARSE_INDEX_BASE_ZERO));

  H_ERR(cudaEventRecord(s));
  for ( uint i=0; i<iters; ++i)
  {
    CHECK_CUSPARSE(cusparseScsrmm2(handle, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      g.nvertex, vlen, g.nvertex, g.nedge, &alpha, 
      desc, g.edge_val, g.row_offset, g.col_idx, 
      d_src+num_dense_mat*i, g.nvertex, &beta, 
      d_dst+num_dense_mat*i, g.nvertex
    ));
  }
  H_ERR(cudaEventRecord(e));
  H_ERR(cudaDeviceSynchronize());
  H_ERR(TOHOST(d_dst, dst, num_dense_mat*iters));

  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(cudaFree(d_dst));
  H_ERR(cudaFree(d_src));

  return time / iters;
}
#endif

template <typename T>
std::tuple<int*, int*, T*, uint, uint> read_mtx(std::string&& path, bool with_weight=false, bool with_header=false)
{
  std::vector<int> svector, dvector;
  std::vector<T> evector;
  int *src, *dst;
  int64_t nvertex = -1, nedge = -1;
  T *ew;

  int vmax = 0;
  std::ifstream fin(path);

  if(!fin.is_open()) ASSERT(false, "can not open file");

  // skip comments
  while(1) {
    char c = fin.peek();
    if(c>='0' && c<='9') break;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }

  // if with header (meaningless, just ignore)
  if(with_header) {
    fin >> nvertex >> nvertex >> nedge;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }
  else nvertex = nedge = -1;

  svector.clear();
  dvector.clear();
  evector.clear();

  while(fin.good()){
    int v0,v1;
    T w = static_cast<T>(1);
    fin >> v0 >> v1;
    if(with_weight) fin >> w;
    else{
      fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
    }

    vmax = vmax < v0 ? v0 : vmax;
    vmax = vmax < v1 ? v1 : vmax;

    if(v0 == v1) continue;
    svector.push_back(v0);
    dvector.push_back(v1);

    evector.push_back(w);
    if(fin.eof()) break;
  }

  printf("s: %ld, d: %ld, e: %ld\n",svector.size(), dvector.size(), evector.size());
  ASSERT(svector.size() == dvector.size() && svector.size() == evector.size(), 
         "file header did not match file body");
  if ((size_t)nedge != svector.size()) 
    std::cout << "removed redundant edges : " << nedge << " -> " 
              << svector.size() << std::endl;
  
  nedge = svector.size();
  src = new int[svector.size()];
  dst = new int[dvector.size()];
  ew = new T[evector.size()];

  memcpy(src, svector.data(), sizeof(int)*svector.size());
  memcpy(dst, dvector.data(), sizeof(int)*dvector.size());
  memcpy(ew, evector.data(), sizeof(T)*evector.size());

  if (nvertex<0)
  {
    nvertex = vmax+1;
  }

  if (nedge<0)
  {
    nedge = svector.size();
  }

  return std::make_tuple(src, dst, ew, nvertex, nedge);
}

template<class G, class T>
float spmm_test(G& g, const T *src, T *dst, uint vlen, uint iters=1, uint nfeats=1)
{
  cudaEvent_t s,e;
  float time;
  float *d_src, *d_dst;
  size_t num_dense_ele = g.nvertex*vlen;

  uint *d_idx;
  auto sorted_index = g.sortVertex();
  // auto views = g.partition(G::PartitionAlg::CSR_MERGE_PATH, sorted_index.data());
  // for (auto view : views)
  // {
  //   printf("veiw @%lx:\n", &view);
  //   printf("rows: ");
  //   for ( uint i=0; i<view.size(); ++i) printf("%d ", view[i]);
  //   printf("\n nnz=%d, avg-row-nnz=%f, nnz-divergence=%.3f\n\n", view.viewNnz, view.avgRowNnz, view.viewDivergence);
  // }
  // exit(0);

  H_ERR(cudaMalloc(&d_idx, sizeof(uint)*g.nvertex));
  H_ERR(TODEV(d_idx, sorted_index.data(), g.nvertex));

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(T)*num_dense_ele*nfeats));
  H_ERR(cudaMalloc(&d_dst, sizeof(T)*num_dense_ele*nfeats));

  H_ERR(TODEV(d_src, src, num_dense_ele*nfeats));
  H_ERR(CLEAN(d_dst, num_dense_ele*nfeats));

  std::cout << "device mem allocated" << std::endl;

  uint sizes[] = {static_cast<uint>(g.nvertex), vlen};
  uint strides[] = {vlen, 1};

  for (uint i=0; i<iters/10; i++)
  {
    acd::TensorInfo<float, uint> src_tensor(d_src+num_dense_ele*(i%nfeats), 2, sizes, strides);
    acd::TensorInfo<float, uint> dst_tensor(d_dst+num_dense_ele*(i%nfeats), 2, sizes, strides);
    
    _spmm_manscript<TestFunc, uint, float,
                  pull_fix, shared, u_e_v, shared_red, 
                  16, 16, 1, 4, 1>
    (d_idx, reinterpret_cast<uint*>(g.row_offset), reinterpret_cast<uint*>(g.col_idx), g.edge_val, src_tensor, dst_tensor, g.nedge);
    // spmm_l1<<<dim3(g.nvertex/32, vlen/128), dim3(32,32)>>>(
    // spmm_l2<<<dim3(g.nvertex, vlen/128), dim3(32)>>>(
    //     reinterpret_cast<const uint32_t*>(g.row_offset),
    //     reinterpret_cast<const uint32_t*>(g.col_idx),
    //     g.edge_val,
    //     d_src,
    //     d_dst,
    //     vlen);
  }

  H_ERR(cudaEventRecord(s));
  for (uint i=0; i<iters; i++)
  {
    acd::TensorInfo<float, uint> src_tensor(d_src+num_dense_ele*(i%nfeats), 2, sizes, strides);
    acd::TensorInfo<float, uint> dst_tensor(d_dst+num_dense_ele*(i%nfeats), 2, sizes, strides);
    // _spmm_llvlib<TestFunc, uint, float, 128, 128, Alternative5::BufferType::none, Alternative5::BufferType::local, 1, 1, 1, 1>
    // (d_idx, g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);
    
    _spmm_manscript<TestFunc, uint, float,
                    pull_fix, local, u_e_v, shfl_red,  
                    32, 8, 1, 4, 1>
    (d_idx,g.row_offset, g.col_idx, g.edge_val, d_src, vlen, d_dst, vlen, g.nvertex, g.nedge, vlen, nullptr);
    // spmm_l1<<<dim3(g.nvertex/32, vlen/128), dim3(32,32)>>>(
    // spmm_l2<<<dim3(g.nvertex, vlen/128), dim3(32)>>>(
    //     reinterpret_cast<const uint32_t*>(g.row_offset),
    //     reinterpret_cast<const uint32_t*>(g.col_idx),
    //     g.edge_val,
    //     d_src,
    //     d_dst,
    //     vlen);
  }
  H_ERR(cudaEventRecord(e));
  H_ERR(cudaEventSynchronize(e));
  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_dst, dst, num_dense_ele*nfeats));
  cudaEventDestroy(s);
  cudaEventDestroy(e);

  cudaFree(d_src);
  cudaFree(d_dst);

  return time / iters;
}

template<class G, class T>
float gcoo_test(G& g, const T *src, T *dst, uint vlen, uint iters=1)
{
  float time;
  cudaEvent_t s,e;
  float *d_src, *d_dst;
  size_t num_dense_ele = g.nvertex*vlen;

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(float)*num_dense_ele));
  H_ERR(cudaMalloc(&d_dst, sizeof(float)*num_dense_ele));


  H_ERR(TODEV(d_src, src, num_dense_ele));
  H_ERR(CLEAN(d_dst, num_dense_ele));

  uint sizes[] = {g.nvertex, vlen};
  uint strides[] = {vlen, 1};

  acd::TensorInfo<float, uint> src_tensor(d_src, 2, sizes, strides);
  acd::TensorInfo<float, uint> dst_tensor(d_dst, 2, sizes, strides);

  H_ERR(cudaEventRecord(s));
  for (uint i=0; i<iters; i++)
    _gcoo_spmm<TestFunc, float>(g.src_list, g.dst_list, g.edge_val, g.grp_offset, src_tensor, dst_tensor, g.nedge, g.ngrps);
  H_ERR(cudaEventRecord(e));

  H_ERR(cudaEventSynchronize(e));
  cudaEventElapsedTime(&time, s, e);

  H_ERR(TOHOST(d_dst, dst, num_dense_ele));
  cudaFree(d_src);
  cudaFree(d_dst);

  cudaEventDestroy(s);
  cudaEventDestroy(e);

  return time / iters;
}

template <typename F, typename scalar_t>
void _spmm_ptx(CUfunction& k, const int* row_offset, const int* col_indx, const scalar_t* edge_weight,
               const acd::TensorInfo<scalar_t, uint> src, acd::TensorInfo<scalar_t, uint> dst, const uint edge)
{
  static uint launch_cnt = 0;
  const uint CoopSz_warp = 8;

  const uint coop_size = 32;
  uint bx = coop_size;
  uint by = 128 / coop_size;
  uint col_blocks = CEIL(dst.sizes[1], coop_size)*2;
  uint row_blocks = CEIL(dst.sizes[0], by);//(bx/coop_size));

  void *kparam[] = {
    (void*)&CoopSz_warp,
    (void*)&row_offset,
    (void*)&col_indx,
    (void*)&edge_weight,
    (void*)&src,
    (void*)&dst,
    (void*)&edge
  };
  
  uint sm_size = bx*by*sizeof(uint);
  if (edge_weight != nullptr) sm_size += bx*by*sizeof(scalar_t);

  std::cout << "pre-call ptx kernel " << launch_cnt <<" launch OK" << std::endl;
  CUDRIVER_CHECK(cuLaunchKernel(k, row_blocks, col_blocks, 1, bx, by, 1, sm_size, 0, kparam, NULL));
  std::cout << "ptx kernel " << launch_cnt++ <<" launch OK" << std::endl;
}


template<class G, class T>
void spmm_veclib_benchmarker(G& g, const T *src, T *dst, uint vlen, uint iters=1, uint nfeats=1)
{

#define CALL_VECLIB(spgrp, dngrp, spbt, dnbt, spvlen, dnvlen, spiter, dniter)\
do{\
//   for (uint i=0; i<iters/10; i++)\
//   {\
//     acd::TensorInfo<float, uint> src_tensor(d_src+num_dense_ele*(i%nfeats), 2, sizes, strides);\
//     acd::TensorInfo<float, uint> dst_tensor(d_dst+num_dense_ele*(i%nfeats), 2, sizes, strides);\
//     _spmm_llvlib<TestFunc, uint, float, spgrp, dngrp, Alternative5::BufferType::spbt, Alternative5::BufferType::dnbt, spvlen, dnvlen, spiter, dniter>\
//     (d_idx, g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);\
//   }\
//   H_ERR(cudaDeviceSynchronize());\
//   H_ERR(cudaEventRecord(s));\
//   for (uint i=0; i<iters; i++)\
//   {\
//     acd::TensorInfo<float, uint> src_tensor(d_src+num_dense_ele*(i%nfeats), 2, sizes, strides);\
//     acd::TensorInfo<float, uint> dst_tensor(d_dst+num_dense_ele*(i%nfeats), 2, sizes, strides);\
//     _spmm_llvlib<TestFunc, uint, float, spgrp, dngrp, Alternative5::BufferType::spbt, Alternative5::BufferType::dnbt, spvlen, dnvlen, spiter, dniter>\
//     (d_idx, g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);\
//   }\
//   H_ERR(cudaEventRecord(e));\
//   H_ERR(cudaDeviceSynchronize());\
//   H_ERR(cudaEventElapsedTime(&time, s, e));\
//   printf("spg" #spgrp "-dng" #dngrp "-spbt:" #spbt "-dnbt:" #dnbt "-spv" #spvlen\
//   "," #spiter "-dnv" #dnvlen "," #dniter " :time=%.6f, TP=%.6f\n",\
//   time/iters, (double)2*g.nedge*iters*vlen/time/1e6);\
// }while(0)

#define CALL_VECLIB(spwk, spld, dncp, red, spg, dng, spv, dnv, dnit)\
do{\
  for (uint i=0; i<CEIL(iters,10); i++)\
  {\
    _spmm_manscript<TestFunc, uint, float, spwk, spld, dncp, red, spg, dng, spv, dnv, dnit>\
    (d_idx, reinterpret_cast<uint*>(g.row_offset), reinterpret_cast<uint*>(g.col_idx), g.edge_val,\
     d_src, vlen, d_dst, vlen, g.nvertex, g.nedge, vlen, d_buffer);\
  }\
  H_ERR(cudaEventRecord(s));\
  for (uint i=0; i<iters; i++)\
  {\
    _spmm_manscript<TestFunc, uint, float, spwk, spld, dncp, red, spg, dng, spv, dnv, dnit>\
    (d_idx, reinterpret_cast<uint*>(g.row_offset), reinterpret_cast<uint*>(g.col_idx), g.edge_val,\
     d_src, vlen, d_dst, vlen, g.nvertex, g.nedge, vlen, d_buffer);\
  }\
  H_ERR(cudaEventRecord(e));\
  H_ERR(cudaEventSynchronize(e));\
  H_ERR(cudaEventElapsedTime(&time, s, e));\
  printf("spwalk=" #spwk ",spload=" #spld ",dn_comp=" #dncp\
        ",red=" #red ",sp:" #spg "v" #spv ",dn:" #dng "v" #dnv\
         "u" #dnit " :time=%.6f, TP=%.6f\n",\
  time/iters, (double)2*g.nedge*iters*vlen/time/1e6);\
}while(0)

  cudaEvent_t s,e;
  float time;
  float *d_src, *d_dst;
  size_t num_dense_ele = g.nvertex*vlen;

  uint *d_idx, *d_buffer;
  auto sorted_index = g.sortVertex();

  H_ERR(cudaMalloc(&d_idx, sizeof(uint)*g.nvertex));
  H_ERR(TODEV(d_idx, sorted_index.data(), g.nvertex));

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(T)*num_dense_ele*nfeats));
  H_ERR(cudaMalloc(&d_dst, sizeof(T)*num_dense_ele*nfeats));

  H_ERR(TODEV(d_src, src, num_dense_ele*nfeats));
  H_ERR(CLEAN(d_dst, num_dense_ele*nfeats));

  std::cout << "device mem allocated" << std::endl;

  uint sizes[] = {static_cast<uint>(g.nvertex), vlen};
  uint strides[] = {vlen, 1};

  _spmm_manscript_buffer<TestFunc, uint, float, pull_dyn, none, u_e_v, shared_red, 8, 8, 1, 1, 1>\
    (d_idx, reinterpret_cast<uint*>(g.row_offset), reinterpret_cast<uint*>(g.col_idx), g.edge_val, sizes, strides, g.nedge, &d_buffer);

// #define __gen_conf_4_grp(spg, dng)\
// do{\
// CALL_VECLIB(spg, dng, none, none, 1, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 1, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 1, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 1, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 1, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 1, 4, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 2, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 2, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 2, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 2, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 2, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 2, 4, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 4, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 4, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 4, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 4, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, none, 4, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, none, 4, 4, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 1, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 1, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 1, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 1, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 1, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 1, 4, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 2, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 2, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 2, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 2, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 2, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 2, 4, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 4, 1, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 4, 1, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 4, 2, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 4, 2, 1, 2);\
// CALL_VECLIB(spg, dng, none, local, 4, 4, 1, 1);\
// CALL_VECLIB(spg, dng, none, local, 4, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 1, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 2, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, none, 4, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 1, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 2, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_buffer_g, local, 4, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 1, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 2, 4, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 1, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 1, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 2, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 2, 1, 2);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 4, 1, 1);\
// CALL_VECLIB(spg, dng, shared_ptr_g, local, 4, 4, 1, 2);\
// }while (0)

#define __gen_conf_4_grp(spg, dng, bt)\
do{\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_fix, none, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_fix, local, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_fix, bt, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_dyn, none, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_dyn, bt, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 1, 1);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 2, 1);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 4, 1);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 1, 2);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 2, 2);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 4, 2);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 1, 4);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 2, 4);\
CALL_VECLIB(pull_dyn, local, u_e_v, bt##_red, spg, dng, 1, 4, 4);\
}while (0)

  // __gen_conf_4_grp(128,128, shared);
  // printf("----------------------------\n");
  // __gen_conf_4_grp(64, 32, shared);
  // printf("----------------------------\n");
  // __gen_conf_4_grp(32, 32, shfl);
  // printf("----------------------------\n");
  // __gen_conf_4_grp(32, 16, shfl);
  // printf("----------------------------\n");
  // __gen_conf_4_grp(16, 16, shfl);
  // printf("----------------------------\n");
  // __gen_conf_4_grp(8, 8, shfl);
  // CALL_VECLIB(128, 128, none, local, 1, 1, 1, 1);
  // CALL_VECLIB(32, 32, none, local, 1, 4, 1, 1);
  // CALL_VECLIB(32, 32, shared_buffer_g, local, 1, 4, 1, 1);
  CALL_VECLIB(pull_fix, none, u_e_v, atomic, 128, 128, 1, 1, 1);
  CALL_VECLIB(pull_dyn, none, u_e_v, atomic, 128, 128, 1, 1, 1);

  H_ERR(TOHOST(d_dst, dst, num_dense_ele*nfeats));
  cudaEventDestroy(s);
  cudaEventDestroy(e);

  cudaFree(d_src);
  cudaFree(d_dst);
  cudaFree(d_buffer);

#undef __gen_conf_4_grp
#undef CALL_VECLIB
}


template <typename F, typename scalar_t>
std::vector<float> _spmm_benchmarker(const int* row_offset, const int* col_indx, const scalar_t* edge_weight, 
                       const acd::TensorInfo<scalar_t, uint> src, acd::TensorInfo<scalar_t, uint> dst, 
                       const uint nedge, const uint iters = 1)
{
  const uint _THD = 256;
  const uint coop_size = 32;

  uint bx = coop_size; // 256;
  uint by = _THD / coop_size;//1;
  uint col_blocks = CEIL(dst.sizes[1], coop_size);
  uint row_blocks = CEIL(dst.sizes[0], by);//(bx/coop_size));

  // uint row_per_block = ;
  // uint col_blocks = CEIL(dst.sizes[1], bx);
  // uint row_blocks = MIN(dst.sizes[0], 4096);

  uint sm_size = bx*by*sizeof(uint);
  if (edge_weight != nullptr) sm_size += bx*by*sizeof(scalar_t);

  float time;
  std::vector<float> res(6, 0.0);
  cudaEvent_t s, e;
  cudaEventCreate(&e);
  cudaEventCreate(&s);

  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
  CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, col_blocks), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaEventSynchronize(e);
  cudaEventElapsedTime(&time, s, e);
  res[0] = time / iters;
  
  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
    CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel_merged<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, col_blocks), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaDeviceSynchronize();
  cudaEventElapsedTime(&time, s, e);
  res[1] = time / iters;

  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
    CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel_unroll_flat<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, CEIL(col_blocks, 2)), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaDeviceSynchronize();
  cudaEventElapsedTime(&time, s, e);
  res[2] = time / iters;

  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
    CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel_unroll_merged<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, CEIL(col_blocks, 2)), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaDeviceSynchronize();
  cudaEventElapsedTime(&time, s, e);
  res[3] = time / iters;

  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
    CUDA_LAUNCH_CHECK(Alternative4::_spmm_kernel_unroll_flat<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, CEIL(col_blocks, 4)), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaDeviceSynchronize();
  cudaEventElapsedTime(&time, s, e);
  res[4] = time / iters;

  cudaEventRecord(s);
  for (uint i=0; i<iters; i++)
  {
    CUDA_LAUNCH_CHECK(Alternative4::_spmm_kernel_unroll_merged<F, scalar_t, coop_size>
                    <<<dim3(row_blocks, CEIL(col_blocks, 4)), dim3(bx,by), sm_size>>>
                    (row_offset, col_indx, edge_weight, src, dst, nedge));
  }
  cudaEventRecord(e);
  cudaDeviceSynchronize();
  cudaEventElapsedTime(&time, s, e);
  res[5] = time / iters;

  cudaEventDestroy(e);
  cudaEventDestroy(s);
  
  return res;
}

template<class G, class T>
float spmm_ptx_test(G& g, const T *src, T *dst, uint vlen, uint iters=1)
{
  float time;
  cudaEvent_t s,e;
  float *d_src, *d_dst;
  size_t num_dense_ele = g.nvertex*vlen;
  
  CUmodule ext_mod;
  CUfunction kernel;
  CUDRIVER_CHECK(cuModuleLoad(&ext_mod, "spmm_alter4.ptx"));
  CUDRIVER_CHECK(cuModuleGetFunction(&kernel, ext_mod, "spmm_kernel_alter4_ptx"));
  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(float)*num_dense_ele));
  H_ERR(cudaMalloc(&d_dst, sizeof(float)*num_dense_ele));

  H_ERR(TODEV(d_src, src, num_dense_ele));
  H_ERR(CLEAN(d_dst, num_dense_ele));

  uint sizes[] = {g.nvertex, vlen};
  uint strides[] = {vlen, 1};

  acd::TensorInfo<float, uint> src_tensor(d_src, 2, sizes, strides);
  acd::TensorInfo<float, uint> dst_tensor(d_dst, 2, sizes, strides);
  // load ptx module

  printf("OKOK..\n");
  H_ERR(cudaEventRecord(s));
  for (uint i=0; i<iters; i++)
    _spmm_ptx<TestFunc, float>(kernel, g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);
  H_ERR(cudaEventRecord(e));

  H_ERR(cudaDeviceSynchronize());
  H_ERR(cudaEventElapsedTime(&time, s,e));
  H_ERR(TOHOST(d_dst, dst, num_dense_ele));

  cudaFree(d_src);
  cudaFree(d_dst);

  cudaEventDestroy(s);
  cudaEventDestroy(e);

  return time / iters;
}

// template<class G, class T>
// void spmm_benchmark(G& g, const T *src, T *dst, uint vlen, uint iters=1)
// {
//   float time;
//   float *d_src, *d_dst;
//   size_t num_dense_ele = g.nvertex*vlen;

//   std::cout << "calling benchmarks " << std::endl;

//   H_ERR(cudaMalloc(&d_src, sizeof(T)*num_dense_ele));
//   H_ERR(cudaMalloc(&d_dst, sizeof(T)*num_dense_ele));

//   std::cout << "allocated address : d_src=" << std::hex << reinterpret_cast<uint64_t>(d_src) 
//             << " d_dst=" << reinterpret_cast<uint64_t>(d_dst) << std::endl;

//   H_ERR(TODEV(d_src, src, num_dense_ele));
//   H_ERR(CLEAN(d_dst, num_dense_ele));

//   uint sizes[] = {static_cast<uint>(g.nvertex), vlen};
//   uint strides[] = {vlen, 1};

//   acd::TensorInfo<float, uint> src_tensor(d_src, 2, sizes, strides);
//   acd::TensorInfo<float, uint> dst_tensor(d_dst, 2, sizes, strides);

//   auto res = _spmm_benchmarker<TestFunc, float>(g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);

//   std::cout << "result: " << std::endl
//             << " j1-f: " << res[0] << std::endl
//             << " j1-m: " << res[1] << std::endl
//             << " j2-f: " << res[2] << std::endl
//             << " j2-m: " << res[3] << std::endl
//             << " j4-f: " << res[4] << std::endl
//             << " j4-m: " << res[5] << std::endl;

//   H_ERR(TOHOST(d_dst, dst, num_dense_ele));

//   cudaFree(d_src);
//   cudaFree(d_dst);
// }


int main(int argc, char** argv)
{
  std::cout << "cusparse version : " << __CUDACC_VER_MAJOR__ 
                              << "." << __CUDACC_VER_MINOR__ 
                              << std::endl;
  std::cout << "-- running "
  #ifdef GCOO
            << "GCOO"
  #endif
  #ifdef CSR
            << "CSR"
  #endif
            << " --" <<std::endl;

  /*
   * arguments 
   * 1. graph mtx path
   * 2. mtx with header ? 0, 1
   * 3. vlen
   * 4. iterations to run
   * 5*. device to run on
   * 6*. feature copys to test cache hit rate
   * 7*. verbose error check
   */
  assert(argc >= 5);
  bool with_header = atoi(argv[2]);
  int feat_dim = atoi(argv[3]);
  int iters = argc > 4 ? atoi(argv[4]) : 1;
  if ( argc > 5 ) cudaSetDevice(atoi(argv[5]));
  int nfeats = argc > 6 ? std::min(iters, atoi(argv[6])) : 1;
  bool verbose_check = argc > 7 ? atoi(argv[7]) : false;

  std::cout << "reading mtx file: " << argv[1] << std::endl;
  auto ginfo = read_mtx<float>(std::string(argv[1]), false, with_header);
  std::cout << "read_mtx pass, using graph(" << std::get<3>(ginfo) << "," << std::get<4>(ginfo) << ")\n";

  // uint nv = std::get<3>(ginfo);
  uint ne = std::get<4>(ginfo);
  float *ew = std::get<2>(ginfo);
  int *src = std::get<0>(ginfo);
  int *dst = std::get<1>(ginfo);

  std::cout << "calling graph construct with " << src << " " << dst << " " 
            << ew << " " << ne << std::endl;

  GraphData::CSRGraph<int, float> csr(src, dst, ew, ne);
  std::cout << "cst built" << std::endl;

  #ifdef GCOO
  GraphData::GCOOGraph<int, float> gcoo(src, dst, ew, ne);
  uint nv = gcoo.nvertex;
  #else
  uint nv = csr.nvertex;
  #endif

  uint64_t num_dense_ele = feat_dim*nv;

  std::cout << "graph built" << std::endl;

  float *h_data = new float[nfeats*num_dense_ele];
  float* h_dst = new float[nfeats*num_dense_ele];
  float* h_eval = new float[nfeats*num_dense_ele];
  // float* h_w = new float[iters*feat_dim*feat_dim1];

  init_random_mat(h_data, num_dense_ele*nfeats);
  init_random_mat(ew, csr.nedge);

  memset(h_dst, 0, sizeof(float)*num_dense_ele*nfeats);
  memset(h_eval, 0, sizeof(float)*num_dense_ele*nfeats);

  csr.cuda();

  #ifdef CSR
  // auto t_target = spmm_test(csr, h_data, h_w, h_dst, feat_dim, feat_dim1, iters);
  // auto t_target = spmm_test(csr, h_data, h_dst, feat_dim, iters, nfeats);
  #endif

  #ifdef GCOO
  gcoo.cuda();
  auto t_target = gcoo_test(gcoo, h_data, h_dst, feat_dim, iters);
  #endif

  // std::cout << "my time comsumption: " << t_target << " ms\n";
  // std::cout << "my throughput: " << (double)2*csr.nedge*feat_dim/t_target/1e6 << " gflops\n";
  // std::cout << "----------------------------------" << std::endl;

  // auto t_cusparse = cusparse_check_mixed(csr, h_data, h_w, h_eval, feat_dim, feat_dim1, iters);
  auto t_cusparse = cusparse_check(csr, h_data, h_eval, feat_dim, iters);

  std::cout << "cusparse time consumption: " << t_cusparse << " ms\n";
  std::cout << "cusparse throughput: " << (double)2*csr.nedge*feat_dim/t_cusparse/1e6 << " gflops\n";
  
  spmm_veclib_benchmarker(csr, h_data, h_dst, feat_dim, iters, nfeats);

  std::cout << "Evaluating ...\n";
  double err=0;

#if __CUDACC_VER_MAJOR__ < 11
  csr.cpu();
  memset(h_eval, 0, sizeof(float)*num_dense_ele*nfeats);
  double t1 = mwtime();
  for (uint it = 0; it < nfeats; ++it) cpu_check(csr, h_data+num_dense_ele*it, h_eval+num_dense_ele*it, feat_dim);
  double t_cpu = mwtime() - t1;  
  printf("CPU time : %f ms %f gflops\n", t_cpu, 2*csr.nedge*feat_dim/t_cpu/1e6);
#endif

  for(uint it = 0; it < nfeats; ++it)
  {
    if (verbose_check) printf("\n--------- iter %d -----------\n", it);
    for(uint64_t i=0; i<num_dense_ele; ++i)
    {
      float cur_err=fabs(h_dst[it*num_dense_ele+i] - h_eval[it*num_dense_ele+i]);
      if (verbose_check)
      {
        if ( i%feat_dim==0 ) printf("\n row %d: ", i/feat_dim);
        if (cur_err>1e-4) 
          printf("c%d: %f %s %f\t", 
                 i%feat_dim, 
                 h_eval[it*num_dense_ele+i], 
                 (h_eval[it*num_dense_ele+i] > h_dst[it*num_dense_ele+i] ? ">" : "<"), 
                 h_dst[it*num_dense_ele+i]);
      }
      err+=cur_err;
    }
  }
  std::cout << std::endl << "Average error : " << err/(num_dense_ele*nfeats) << std::endl;

  // for ( uint i=0; i<graph.nvertex; i++) printf("%d ", graph.start_pos[i]);
  // printf("\n");
  delete [] h_data;
  delete [] h_dst;
  delete [] h_eval;
  delete [] src;
  delete [] dst;
  delete [] ew;
  return 0;
}
