#include <cassert>
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <cmath>
#include <tuple>
#include <string>

#include <cuda.h>
#include <cusparse.h>
// #include "ge-spmm/gespmm.h"

#include "format/CsrGraph.cuh"
// #include "../format/GcooGraph.cuh"
// #include "../format/EllGraph.cuh"

#if __CUDACC_VER_MAJOR__ >= 11
#define CHECK_CUSPARSE(func)                                                   \
do {                                                                           \
    cusparseStatus_t status = (func);                                          \
    if (status != CUSPARSE_STATUS_SUCCESS) {                                   \
        printf("CUSPARSE API failed at line %d with error: %s (%d)\n",         \
               __LINE__, cusparseGetErrorString(status), status);              \
        exit(-1);                                                              \
    }                                                                          \
} while (0)
#else
#define CHECK_CUSPARSE(func) func
#endif 

template <typename scalar_t>
void init_random_mat(scalar_t* buffer, int numel)
{
  for ( int i=0; i<numel; ++i )
  {
    buffer[i] = ((scalar_t)(random() % 100)) / 13.;
  }
}

template <typename T>
std::tuple<int*, int*, T*, int, int> 
read_mtx(std::string&& path, bool with_weight=false, bool with_header=false)
{
  std::vector<int> svector, dvector;
  std::vector<T> evector;
  int *src, *dst;
  int64_t nvertex = -1, nedge = -1;
  T *ew;

  int vmax = 0;
  std::ifstream fin(path);

  if(!fin.is_open()) ASSERT(false, "can not open file");

  // skip comments
  while(1) {
    char c = fin.peek();
    if(c>='0' && c<='9') break;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }

  // if with header (meaningless, just ignore)
  if(with_header) {
    fin >> nvertex >> nvertex >> nedge;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }
  else nvertex = nedge = -1;

  svector.clear();
  dvector.clear();
  evector.clear();

  while(fin.good()){
    int v0,v1;
    T w = static_cast<T>(1);
    fin >> v0 >> v1;
    if(with_weight) fin >> w;
    else{
      fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
    }

    vmax = vmax < v0 ? v0 : vmax;
    vmax = vmax < v1 ? v1 : vmax;

    if(v0 == v1) continue;
    svector.push_back(v0);
    dvector.push_back(v1);

    evector.push_back(w);
    if(fin.eof() || (size_t)nedge == svector.size()) break;
  }

  printf("s: %ld, d: %ld, e: %ld\n",svector.size(), dvector.size(), evector.size());
  ASSERT(svector.size() == dvector.size() && svector.size() == evector.size(), 
         "file header did not match file body");
  if ((size_t)nedge != svector.size()) 
    std::cout << "removed redundant edges : " << nedge << " -> " 
              << svector.size() << std::endl;
  
  nedge = svector.size();
  src = new int[svector.size()];
  dst = new int[dvector.size()];
  ew = new T[evector.size()];

  memcpy(src, svector.data(), sizeof(int)*svector.size());
  memcpy(dst, dvector.data(), sizeof(int)*dvector.size());
  memcpy(ew, evector.data(), sizeof(T)*evector.size());

  if (nvertex<0)
  {
    nvertex = vmax+1;
  }

  if (nedge<0)
  {
    nedge = svector.size();
  }

  return std::make_tuple(src, dst, ew, nvertex, nedge);
}

template<typename T>
cusparseSpMatDescr_t build_csr(GraphData::CSRGraph<int, T> &g) {
  cusparseSpMatDescr_t csrMat;
  g.cuda();
  CHECK_CUSPARSE(cusparseCreateCsr(&csrMat, g.nvertex, g.nvertex, g.nedge, 
                                   g.row_offset, g.col_idx, g.edge_val,
                                   CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, 
                                   CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));
  g.independent = false;
  return csrMat;
}

template<typename T>
cusparseSpMatDescr_t build_coo(int *src,  int *dst, T *data, 
                               const uint nvertex, const uint nedge) {
  using namespace GraphData;
  cusparseSpMatDescr_t cooMat;

  CHECK_CUSPARSE(cusparseCreateCoo(&cooMat, nvertex, nvertex, nedge, 
                                   dst, src, data, CUSPARSE_INDEX_32I,
                                   CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));

  return cooMat;
}

#if __CUDACC_VER_MAJOR__ >= 11
template <typename T>
cusparseSpMatDescr_t build_bell(cusparseSpMatDescr_t coo_mat, int *ell_cols, T *ell_vals,
                                const int nvertex, const int blk_size, const int ell_width) {
  using namespace GraphData;

  cusparseHandle_t sparse_h;
  cusparseDnMatDescr_t tmpDn;
  cusparseSpMatDescr_t bellMat;

  T* tmpdn_buffer;
  size_t ext_buffer_size;
  void *ext_buffer;
  
  MEMALLOC(T, tmpdn_buffer, nvertex*nvertex, DeviceTag::CUDA);

  CHECK_CUSPARSE( cusparseCreateDnMat(&tmpDn, nvertex, nvertex, nvertex, tmpdn_buffer,
                                    CUDA_R_32F, CUSPARSE_ORDER_ROW) );

  CHECK_CUSPARSE(cusparseCreate(&sparse_h));
  CHECK_CUSPARSE(cusparseSparseToDense_bufferSize(sparse_h,
                                                  coo_mat,
                                                  tmpDn,
                                                  CUSPARSE_SPARSETODENSE_ALG_DEFAULT,
                                                  &ext_buffer_size));
  H_ERR( cudaMalloc(&ext_buffer, ext_buffer_size) );

  CHECK_CUSPARSE(cusparseSparseToDense(sparse_h,
                                       coo_mat,
                                       tmpDn,
                                       CUSPARSE_SPARSETODENSE_ALG_DEFAULT,
                                       ext_buffer));
  H_ERR( cudaFree(ext_buffer) );
  
  // Create sparse matrix B in Blocked ELL format
  CHECK_CUSPARSE( cusparseCreateBlockedEll(&bellMat, nvertex, nvertex,
                                            blk_size, ell_width,
                                            ell_cols, ell_vals,
                                            CUSPARSE_INDEX_32I,
                                            CUSPARSE_INDEX_BASE_ZERO,
                                            CUDA_R_32F) );
  CHECK_CUSPARSE( cusparseDenseToSparse_bufferSize(
                                    sparse_h, tmpDn, bellMat,
                                    CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
                                    &ext_buffer_size) );
  H_ERR( cudaMalloc(&ext_buffer, ext_buffer_size) );
  // execute Sparse to Dense conversion
  CHECK_CUSPARSE( cusparseDenseToSparse_analysis(sparse_h, tmpDn, bellMat,
                                      CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
                                      ext_buffer) );

  CHECK_CUSPARSE( cusparseDenseToSparse_convert(sparse_h, tmpDn, bellMat,
                                      CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
                                      ext_buffer) );

  CHECK_CUSPARSE(cusparseDestroy(sparse_h));
  CHECK_CUSPARSE(cusparseDestroyDnMat(tmpDn));
  MEMFREE(tmpdn_buffer, DeviceTag::CUDA);
  H_ERR( cudaFree(ext_buffer) );

  return bellMat;
}
#endif

template <typename T>
std::tuple<int, int*, int*, T*> 
build_bsr(cusparseHandle_t sparse_h, GraphData::CSRGraph<int, T> &g, const int blksize) {
  
  using GraphData::DeviceTag;
  
  int nnz_bsr;
  int *nnzTotalDevHostPtr = &nnz_bsr;
  int *bsrRowPtr, *bsrColInd;
  T *bsrVal;
  int mblk = CEIL(g.nvertex, blksize);
  H_ERR( cudaMalloc(&bsrRowPtr, sizeof(int) * (mblk + 1)) );

  cusparseMatDescr_t csr_desc, bsr_desc;
  CHECK_CUSPARSE( cusparseCreateMatDescr(&csr_desc) );
  CHECK_CUSPARSE( cusparseSetMatType(csr_desc, CUSPARSE_MATRIX_TYPE_GENERAL) );
  CHECK_CUSPARSE( cusparseSetMatIndexBase(csr_desc, CUSPARSE_INDEX_BASE_ZERO) );

  CHECK_CUSPARSE( cusparseCreateMatDescr(&bsr_desc) );
  CHECK_CUSPARSE(cusparseSetMatType(bsr_desc, CUSPARSE_MATRIX_TYPE_GENERAL));
  CHECK_CUSPARSE(cusparseSetMatIndexBase(bsr_desc, CUSPARSE_INDEX_BASE_ZERO));

  cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW;
  g.cuda();
  CHECK_CUSPARSE ( cusparseXcsr2bsrNnz(sparse_h, dir, g.nvertex, g.nvertex,
        csr_desc, g.row_offset, g.col_idx, blksize,
        bsr_desc, bsrRowPtr, &nnz_bsr) );
  H_ERR( cudaDeviceSynchronize() );

  H_ERR( cudaMalloc(&bsrColInd, sizeof(int) * nnz_bsr ) );
  H_ERR( cudaMalloc(&bsrVal, sizeof(T) * nnz_bsr * blksize * blksize) );

  // int *csrRowPtr, *csrColInd;
  // T *csrVal;
  // H_ERR( cudaMalloc(&csrRowPtr, sizeof(int) * (g.nvertex+1)*2 ) );
  // H_ERR( cudaMalloc(&csrColInd, sizeof(int) * (g.nedge)*2 ) );
  // H_ERR( cudaMalloc(&csrVal, sizeof(T) * (g.nedge)*2 ) );
  // MEMCOPY(csrRowPtr, g.row_offset, g.nvertex+1, GraphData::DeviceTag::CUDA, GraphData::DeviceTag::CUDA);
  // MEMCOPY(csrColInd, g.col_idx, g.nedge, GraphData::DeviceTag::CUDA, GraphData::DeviceTag::CUDA);
  // MEMCOPY(csrVal, g.edge_val, g.nedge, GraphData::DeviceTag::CUDA, GraphData::DeviceTag::CUDA);
  // H_ERR( cudaDeviceSynchronize() );

  CHECK_CUSPARSE ( cusparseScsr2bsr(sparse_h, dir, g.nvertex, g.nvertex,
                   csr_desc, g.edge_val, g.row_offset, g.col_idx,
                   blksize, 
                   bsr_desc, bsrVal, bsrRowPtr, bsrColInd ) );
  H_ERR( cudaDeviceSynchronize() );

  // H_ERR( cudaFree(csrRowPtr) );
  // H_ERR( cudaFree(csrColInd) );
  // H_ERR( cudaFree(csrVal) );
  
  CHECK_CUSPARSE ( cusparseDestroyMatDescr(csr_desc) );
  CHECK_CUSPARSE ( cusparseDestroyMatDescr(bsr_desc) );

  return std::make_tuple(nnz_bsr, bsrRowPtr, bsrColInd, bsrVal);
}


float cusparse_spmm(cusparseHandle_t sparse_h, cusparseSpMatDescr_t csr_mat, cusparseDnMatDescr_t matB,
                    cusparseDnMatDescr_t matC, uint iters=1)
{
  cudaEvent_t s, e;
  #if __CUDACC_VER_MAJOR__ >= 11
  auto cualg = CUSPARSE_SPMM_ALG_DEFAULT;
  #else
  auto cualg = CUSPARSE_COOMM_ALG1;
  #endif

  // first build the standard col_idx on device 
  size_t ext_buf_size;
  void * ext_buffer;
  float alpha = 1.0;
  float beta = 0.0;
  float time;

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  CHECK_CUSPARSE(cusparseSpMM_bufferSize(
    sparse_h,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    &alpha, csr_mat, matB, &beta, matC, CUDA_R_32F,
    cualg, &ext_buf_size
  )); 
  H_ERR(cudaMalloc(&ext_buffer, ext_buf_size));
  // std::cout << "temp buffer 4 cusparse size = " << ext_buf_size << std::endl;
  
  H_ERR(cudaEventRecord(s, 0));
  // double t1 = mwtime();
  for ( uint i=0; i<iters; i++)
  {
    cusparseSpMM(
      sparse_h, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      &alpha, csr_mat, matB, &beta, matC, CUDA_R_32F,
      cualg, ext_buffer
    );
    // H_ERR(cudaDeviceSynchronize());
  }
  H_ERR(cudaEventRecord(e, 0));
  H_ERR(cudaEventSynchronize(e));
  // double t2 = mwtime();

  H_ERR(cudaEventElapsedTime(&time, s, e));

  cudaFree(ext_buffer);

  cudaEventDestroy(s);
  cudaEventDestroy(e);
  // no retrieving the results
  // printf("CPU time record = %fms\n", (t2-t1)/iters);
  return time / iters;
}


float cusparse_spmm_old(cusparseHandle_t sparse_h, int nvertex, int blk_size, int bnnz, 
                        int *bsrRowPtr, int *bsrColInd, float *bsrVal,
                        float *B, float* C, int vlen, const int iters=1) {
  cusparseMatDescr_t descA;
  cudaEvent_t s, e;
  cudaEventCreate(&s);  
  cudaEventCreate(&e);

  // first build the standard col_idx on device 
  float alpha = 1.0;
  float beta = 0.0;
  float time;

  int bnvertex = CEIL(nvertex, blk_size);
  CHECK_CUSPARSE(cusparseCreateMatDescr(&descA));
  CHECK_CUSPARSE(cusparseSetMatType(descA, CUSPARSE_MATRIX_TYPE_GENERAL));
  H_ERR(cudaEventRecord(s, 0));
  for ( int i=0; i<iters; i++ ) {
    CHECK_CUSPARSE( cusparseSbsrmm(sparse_h,
                    CUSPARSE_DIRECTION_COLUMN,
                    CUSPARSE_OPERATION_NON_TRANSPOSE,
                    CUSPARSE_OPERATION_NON_TRANSPOSE,
                    bnvertex, vlen, bnvertex, bnnz,
                    &alpha, descA, bsrVal, bsrRowPtr, bsrColInd,
                    blk_size, B, nvertex, &beta, C, nvertex) ); // this api expect B\C in col major format
    // H_ERR(cudaDeviceSynchronize());
  }
  H_ERR(cudaEventRecord(e, 0));
  H_ERR(cudaEventSynchronize(e));
  H_ERR(cudaEventElapsedTime(&time, s, e));

  cusparseDestroyMatDescr(descA);
  cudaEventDestroy(s);
  cudaEventDestroy(e);

  return time / iters;
}


int main(int argc, char** argv) {

  assert(argc >= 5);
  char* mtx_file = argv[1];
  bool with_header = atoi(argv[2]);
  int vlen = atoi(argv[3]);
  int iters = atoi(argv[4]);

  std::cout << "reading mtx file: " << argv[1] << std::endl;
  auto ginfo = read_mtx<float>(std::string(argv[1]), false, with_header);
  std::cout << "read_mtx pass, using graph(" << std::get<3>(ginfo) << "," << std::get<4>(ginfo) << ")\n";

  int    *src = std::get<0>(ginfo);
  int    *dst = std::get<1>(ginfo);
  float *data = std::get<2>(ginfo);
  int nvertex = std::get<3>(ginfo);
  int   nedge = std::get<4>(ginfo);
  
  GraphData::CSRGraph<int, float> g(src, dst, data, nedge, false);
  nvertex = g.nvertex;
  
  // build dense matrix
  int num_dense_ele = vlen * nvertex;
  float csr_time=0, coo_time=0, bsr_time=0;
  cusparseDnMatDescr_t matB, matC;
  float *h_in, *h_out;
  float *d_in, *d_out;
  
  h_in = new float[num_dense_ele]; 
  h_out = new float[num_dense_ele]; 
  init_random_mat(h_in, num_dense_ele);
  memset(h_out, 0, sizeof(float)*num_dense_ele);
  
  H_ERR( cudaMalloc(&d_in, sizeof(float)*num_dense_ele) );
  H_ERR( cudaMalloc(&d_out, sizeof(float)*num_dense_ele) );
  H_ERR(TODEV(d_in, h_in, num_dense_ele));
  H_ERR(TODEV(d_out, h_out, num_dense_ele));

  CHECK_CUSPARSE( cusparseCreateDnMat(&matB, nvertex, vlen, vlen, d_in,
                                      CUDA_R_32F, CUSPARSE_ORDER_ROW) );
  CHECK_CUSPARSE( cusparseCreateDnMat(&matC, nvertex, vlen, vlen, d_out,
                                      CUDA_R_32F, CUSPARSE_ORDER_ROW) );
  
  // call coo matrix
  int *d_dst, *d_src, *d_data;
  H_ERR( cudaMalloc(&d_dst, sizeof(int)*nedge) );
  H_ERR( cudaMalloc(&d_src, sizeof(int)*nedge) );
  H_ERR( cudaMalloc(&d_data, sizeof(float)*nedge) );
  auto cooMat = build_coo(d_dst, d_src, d_data, nvertex, nedge);
  
  cusparseHandle_t handle;
  CHECK_CUSPARSE( cusparseCreate(&handle) );
  coo_time = cusparse_spmm(handle, cooMat, matB, matC, iters);

  CHECK_CUSPARSE( cusparseDestroySpMat(cooMat) );
  H_ERR( cudaFree(d_src) );
  H_ERR( cudaFree(d_dst) );
  H_ERR( cudaFree(d_data) );

  // call csr matrix
  auto csrMat = build_csr(g);
#if __CUDACC_VER_MAJOR__ >= 11
  csr_time = cusparse_spmm(handle, csrMat, matB, matC, iters);
#endif

  CHECK_CUSPARSE( cusparseDestroySpMat(csrMat) );

  // call bell matrix
  /* not supported on v100
  const int blk_size = 4;
  int ell_width = nvertex;//blk_size * std::get<1>(GraphData::degree_range(src, dst, nvertex, nedge));
  int nnz_ell = ell_width * nvertex; // worst prediction
  int *d_ell_col;
  float *d_ell_val;
  H_ERR( cudaMalloc(&d_ell_col, sizeof(int)*nnz_ell / ( blk_size * blk_size)) );
  H_ERR( cudaMalloc(&d_ell_val, sizeof(float)*nnz_ell) );

  auto bellMat = build_bell(cooMat, d_ell_col, d_ell_val, nvertex, blk_size, ell_width);

  float bell_time = cusparse_spmm(bellMat, matB, matC, iters);
  */
  // call bsr matrix
  const int bs = 4;
  std::vector<float> bsr_times;
  for ( int bs = 2; bs <= 8; bs *=2 ) {
    auto bsr_info = build_bsr(handle, g, bs);

    int   bnnz       = std::get<0>(bsr_info);
    int   *bsrRowPtr = std::get<1>(bsr_info);
    int   *bsrColInd = std::get<2>(bsr_info);
    float *bsrVal    = std::get<3>(bsr_info);
    bsr_time = cusparse_spmm_old(handle, nvertex, bs, bnnz, bsrRowPtr, bsrColInd, bsrVal, 
                                      d_in, d_out, vlen, iters);
    H_ERR( cudaFree(bsrRowPtr) );
    H_ERR( cudaFree(bsrColInd) );
    H_ERR( cudaFree(bsrVal) );
    bsr_times.push_back(bsr_time);
  }
  

  double gflop = (int64_t)(2) * nedge * vlen * 1e-6;
  printf("%.4f %.4f %.4f %.4f %.4f", gflop/coo_time, gflop/csr_time, 
          gflop/bsr_times[0], gflop/bsr_times[1], gflop/bsr_times[2]);

  CHECK_CUSPARSE( cusparseDestroy(handle) );
  delete [] h_in;
  delete [] h_out;
  H_ERR( cudaFree(d_in) );
  H_ERR( cudaFree(d_out) );

  // H_ERR( cudaFree(d_ell_col) );
  // H_ERR( cudaFree(d_ell_val) );
}