#include <cassert>
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <omp.h>
#include <cmath>
#include <cublas_v2.h>
#include <cusparse.h>
#include <tuple>

#include "format/CsrGraph.cuh"
#include "format/GcooGraph.cuh"

#include "cuda/ops/gat_ops_impl.cuh"

#ifndef CSR
#ifndef GCOO
#define CSR
#endif
#endif

bool VERBOSE = false;
uint _thd = 256;
uint _bsz = 128*4;
uint _h_thresh = 128*18;
uint _d_thresh = 1<<30;

struct TestFunc : Func<float>{
  static __device__ __forceinline__ float binary(float a, float b) {return a*b;}
  static __device__ __forceinline__ float reduce(float *addr, float n) {*addr += n; return *addr;}
  static __device__ __forceinline__ float reduceAtomic(float *addr, float n) {return atomicAdd(addr, n);}
};

#define CHECK_CUBLAS(func)                                          \
do {                                                                \
    cublasStatus_t status = (func);                                 \
    if (status != CUBLAS_STATUS_SUCCESS) {                          \
        printf("CUSPARSE API failed at line %d with error: (%d)\n", \
               __LINE__, status);                                   \
        exit(-1);                                                   \
    }                                                               \
} while (0)

#define CHECK_CUSPARSE(func)                                                   \
do {                                                                           \
    cusparseStatus_t status = (func);                                          \
    if (status != CUSPARSE_STATUS_SUCCESS) {                                   \
        printf("CUSPARSE API failed at line %d with error: %s (%d)\n",         \
               __LINE__, cusparseGetErrorString(status), status);              \
        exit(-1);                                                              \
    }                                                                          \
} while (0)

template <typename scalar_t>
void init_random_mat(scalar_t* buffer, int numel)
{
  for ( int i=0; i<numel; ++i )
  {
    buffer[i] = ((scalar_t)(random() % 100)) / 13.;
  }
}

template<typename Graph>
void cpu_run(Graph g, const float *src, float *dst, int vlen)
{
  size_t row = 0;
  // printf("nedge = %ld\n", g.nedge);
  for (size_t indptr=0; indptr<g.nedge; ++indptr)
  {
    while (g.row_offset[row+1] == indptr) row += 1;
    size_t col = g.col_idx[indptr];
    float acc = 0;
    for (int k=0; k<vlen; ++k)
    {
      acc += src[row*vlen+k]*src[col*vlen+k];
      // printf("[%d,%d] += %.3f*%.3f\n", row, col,  src[row*vlen+k], src[col*vlen+k]);
    }
    // printf("[%d,%d] = %f @ %d\n", row, col, acc, indptr);
    dst[indptr] = acc*g.edge_val[indptr];
  }
}

void check_cpu(const float *res, const float *ans, const int *row_offset, const int len, const int nfeats)
{
  double err=0;
  for(int it = 0; it < nfeats; ++it)
  {
    if (VERBOSE) printf("\n--------- iter %d -----------\n", it);
    int row_cnt = 0;
    for(int i=0; i<len; ++i)
    {
      float cur_err=fabs(res[it*len+i] - ans[it*len+i]);
      if (VERBOSE)
      {
        if ( row_offset[row_cnt] == i ) printf("\n row %d: ", row_cnt++);
        if (cur_err>1e-6) 
          printf("c%d: %f %s %f\t", i%len, ans[it*len+i], 
              (ans[it*len+i] > res[it*len+i] ? ">" : "<"), 
              res[it*len+i]);
      }
      err+=cur_err;
    }
  }
  std::cout << std::endl << "Average error : " << err/(len*nfeats) << std::endl;
}


__global__ void sddmm_naive(
    const int* __restrict__ A_row,
    const int* __restrict__ A_col,
    float* __restrict__ A_val,
    const float* __restrict__ B,
    const float* __restrict__ C,
    int dense_col)
{
    int rowA = blockIdx.x;
    int offA_begin = A_row[rowA];
    int offA_end = A_row[rowA + 1];
    for (int offA = offA_begin + threadIdx.x; offA < offA_end; offA += blockDim.x)
    {
        float valA = 0.0f ;
        int colA = A_col[offA];
        for (int i = 0; i < dense_col; i += 1)
        {
            float valB = B[colA * dense_col + i];
            float valC = C[rowA * dense_col + i];
            valA += valB * valC;
        }
        A_val[offA] = valA;
    }
}

template <typename G, typename T>
float check_naive(G& g, const T *src, T *new_edge_val, int vlen, int iters=1, int nfeats=1)
{
  cudaEvent_t s,e;
  float time;
  float *d_src, *d_new_edge_val;
  size_t num_dense_ele = g.nvertex*vlen;

  using kv_t = std::tuple<int, int>;
  std::vector<kv_t> sorter;
  std::vector<int> sorted_index;
  int *d_idx, *h_idx;

  h_idx = (int*)malloc(sizeof(int)*(1+g.nvertex));
  H_ERR(TOHOST(g.row_offset, h_idx, g.nvertex+1));

  for (size_t i=0; i<g.nvertex; ++i) 
    sorter.push_back(std::make_tuple(i, h_idx[i+1]));
  /// sort on cpu
  std::sort(sorter.begin(), sorter.end(), [](const kv_t&a ,const kv_t &b){
    return std::get<1>(a) > std::get<1>(b);
  });

  for (auto kv : sorter)
  {
    sorted_index.push_back(std::get<0>(kv));
  }

  H_ERR(cudaMalloc(&d_idx, sizeof(int)*g.nvertex));
  H_ERR(TODEV(d_idx, sorted_index.data(), g.nvertex));

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(T)*num_dense_ele*nfeats));
  H_ERR(cudaMalloc(&d_new_edge_val, sizeof(T)*g.nedge*nfeats));

  H_ERR(TODEV(d_src, src, num_dense_ele*nfeats));
  H_ERR(CLEAN(d_new_edge_val, g.nedge*nfeats));

  std::cout << "device mem allocated" << std::endl;

  int sizes[] = {static_cast<int>(g.nvertex), vlen};
  int strides[] = {vlen, 1};
  
  // configure kernel launch
  dim3 grid_dim(g.nvertex);
  dim3 thd_dim(256);

  H_ERR(cudaEventRecord(s));
  for (int i=0; i<iters; i++)
  {
    launch_sddmm(reinterpret_cast<uint32_t*>(g.row_offset),
                 reinterpret_cast<uint32_t*>(g.col_idx), 
                 d_new_edge_val, d_src, d_src, 
                 g.nvertex, g.nvertex, vlen);
  }
  H_ERR(cudaEventRecord(e));

  H_ERR(cudaDeviceSynchronize());
  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_new_edge_val, new_edge_val, g.nedge*nfeats));
  cudaEventDestroy(s);
  cudaEventDestroy(e);

  cudaFree(d_src);
  cudaFree(d_new_edge_val);

  return time / iters;
}

template<typename Graph>
float cusparse_check_sddmm(Graph g, 
                           const float *src_l, 
                           const float *src_r, 
                           float* new_edge_val, 
                           int vlen, int iters=1, int nfeats=1)
{
  cublasHandle_t blas_h;
  cusparseHandle_t sparse_h;
  cusparseSpMatDescr_t matC;
  cusparseDnMatDescr_t matL, matR;
  cudaEvent_t s, e;

  // first build the standard col_idx on device 
  size_t ext_buf_size;
  void * ext_buffer;

  float *d_src_l, *d_src_r, *d_new_edge_val;
  
  float alpha = 1.0;
  float beta = 0.0;

  float time;

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src_l, sizeof(float)*g.nvertex*vlen));
  H_ERR(cudaMalloc(&d_src_r, sizeof(float)*g.nvertex*vlen));
  H_ERR(cudaMalloc(&d_new_edge_val, sizeof(float)*g.nedge));

  H_ERR(TODEV(d_src_l, src_l, g.nvertex*vlen));
  H_ERR(TODEV(d_src_r, src_r, g.nvertex*vlen));
  H_ERR(CLEAN(d_new_edge_val, g.nedge));

  CHECK_CUSPARSE(cusparseCreate(&sparse_h));
  CHECK_CUSPARSE(cusparseCreateCsr(&matC, g.nvertex, g.nvertex, g.nedge, g.row_offset, g.col_idx, d_new_edge_val,
                          CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));
  
  CHECK_CUSPARSE(cusparseCreateDnMat(&matL, g.nvertex, vlen, vlen, (void*)d_src_l, CUDA_R_32F, CUSPARSE_ORDER_ROW));
  CHECK_CUSPARSE(cusparseCreateDnMat(&matR, g.nvertex, vlen, vlen, (void*)d_src_r, CUDA_R_32F, CUSPARSE_ORDER_ROW));

  CHECK_CUSPARSE(cusparseSDDMM_bufferSize(
    sparse_h,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    CUSPARSE_OPERATION_TRANSPOSE,
    &alpha, matL, matR, &beta, matC, CUDA_R_32F,
    CUSPARSE_SDDMM_ALG_DEFAULT, &ext_buf_size
  )); 
  H_ERR(cudaMalloc(&ext_buffer, ext_buf_size));
  std::cout << "temp buffer 4 cusparse size = " << ext_buf_size << std::endl;
  
  CHECK_CUSPARSE(cusparseSDDMM_preprocess(
    sparse_h,
    CUSPARSE_OPERATION_NON_TRANSPOSE,
    CUSPARSE_OPERATION_TRANSPOSE,
    &alpha, matL, matR, &beta, matC, CUDA_R_32F,
    CUSPARSE_SDDMM_ALG_DEFAULT, &ext_buffer
  )); 

  H_ERR(cudaEventRecord(s));
  for (int i=0; i<iters; i++)
  {
    CHECK_CUSPARSE(cusparseSDDMM(
      sparse_h, 
      CUSPARSE_OPERATION_NON_TRANSPOSE,
      CUSPARSE_OPERATION_TRANSPOSE,
      &alpha, matL, matR, &beta, matC, CUDA_R_32F,
      CUSPARSE_SDDMM_ALG_DEFAULT, ext_buffer
    ));
    
    // H_ERR(cudaDeviceSynchronize()); 
  }
  H_ERR(cudaEventRecord(e));
  H_ERR(cudaDeviceSynchronize());

  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_new_edge_val, new_edge_val, g.nedge));

  cudaFree(ext_buffer);
  cusparseDestroyDnMat(matL);
  cusparseDestroyDnMat(matR);
  cusparseDestroySpMat(matC);

  cudaFree(d_src_r);
  cudaFree(d_src_l);
  cudaFree(d_new_edge_val);

  cudaEventDestroy(s);
  cudaEventDestroy(e);

  // no retrieving the results

  return time / iters;
}


template<class G, class T>
float sddmm_test(G& g, const T *h_ew, const T *src, T *new_edge_val, uint vlen, uint iters=1, uint nfeats=1)
{
  cudaEvent_t s,e;
  float time;
  float *d_src, *d_new_edge_val;
  size_t num_dense_ele = g.nvertex*vlen;

  using kv_t = std::tuple<uint, uint>;
  std::vector<kv_t> sorter;
  std::vector<uint> sorted_index;
  uint *d_idx, *h_idx;
  T *edge_val;

  h_idx = (uint*)malloc(sizeof(uint)*(1+g.nvertex));
  H_ERR(TOHOST(g.row_offset, h_idx, g.nvertex+1));

  for (size_t i=0; i<g.nvertex; ++i) 
    sorter.push_back(std::make_tuple(i, h_idx[i+1]));
  /// sort on cpu
  std::sort(sorter.begin(), sorter.end(), [](const kv_t&a ,const kv_t &b){
    return std::get<1>(a) > std::get<1>(b);
  });

  for (auto kv : sorter)
  {
    sorted_index.push_back(std::get<0>(kv));
  }

  H_ERR(cudaMalloc(&d_idx, sizeof(int)*g.nvertex));
  H_ERR(cudaMalloc(&edge_val, sizeof(T)*g.nedge));
  H_ERR(TODEV(d_idx, sorted_index.data(), g.nvertex));
  H_ERR(TODEV(edge_val, h_ew, g.nedge));

  cudaEventCreate(&s);
  cudaEventCreate(&e);

  H_ERR(cudaMalloc(&d_src, sizeof(T)*num_dense_ele*nfeats));
  H_ERR(cudaMalloc(&d_new_edge_val, sizeof(T)*g.nedge*nfeats));

  H_ERR(TODEV(d_src, src, num_dense_ele*nfeats));
  H_ERR(CLEAN(d_new_edge_val, g.nedge*nfeats));

  std::cout << "device mem allocated" << std::endl;

  uint sizes[] = {static_cast<uint>(g.nvertex), vlen};
  uint strides[] = {vlen, 1};

  H_ERR(cudaEventRecord(s));
  for (int i=0; i<iters; i++)
  {
    // _spmm_llvlib<TestFunc, int, float, 128, 128, Alternative5::BufferType::none, Alternative5::BufferType::local, 1, 1, 1, 1>
    // (d_idx, g.row_offset, g.col_idx, g.edge_val, src_tensor, dst_tensor, g.nedge);
    _sddmm_alt1<TestFunc, uint, float, push_seq, none, u_e_v, shfl_red, 256, 1, 1, 1, 1>
    (d_idx, reinterpret_cast<uint*>(g.row_offset), reinterpret_cast<uint*>(g.col_idx), edge_val,
     d_src, d_src, vlen, d_new_edge_val, g.nvertex, g.nedge, vlen);
  }
  H_ERR(cudaEventRecord(e));

  H_ERR(cudaDeviceSynchronize());
  H_ERR(cudaEventElapsedTime(&time, s, e));

  H_ERR(TOHOST(d_new_edge_val, new_edge_val, g.nedge*nfeats));
  cudaEventDestroy(s);
  cudaEventDestroy(e);

  cudaFree(d_src);
  cudaFree(d_new_edge_val);
  cudaFree(edge_val);

  return time / iters;
}


template <typename T>
std::tuple<int*, int*, T*, int, int> 
read_mtx(std::string&& path, bool with_weight=false, bool with_header=false)
{
  std::vector<int> svector, dvector;
  std::vector<T> evector;
  int *src, *dst;
  int64_t nvertex = -1, nedge = -1;
  T *ew;

  int vmax = 0;
  std::ifstream fin(path);

  if(!fin.is_open()) ASSERT(false, "can not open file");

  // skip comments
  while(1) {
    char c = fin.peek();
    if(c>='0' && c<='9') break;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }

  // if with header (meaningless, just ignore)
  if(with_header) {
    fin >> nvertex >> nvertex >> nedge;
    fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
  }
  else nvertex = nedge = -1;

  svector.clear();
  dvector.clear();
  evector.clear();

  while(fin.good()){
    int v0,v1;
    T w = static_cast<T>(1);
    fin >> v0 >> v1;
    if(with_weight) fin >> w;
    else{
      fin.ignore(std::numeric_limits<std::streamsize>::max(), fin.widen('\n'));
    }

    vmax = vmax < v0 ? v0 : vmax;
    vmax = vmax < v1 ? v1 : vmax;

    if(v0 == v1) continue;
    svector.push_back(v0);
    dvector.push_back(v1);

    evector.push_back(w);
    if(fin.eof() || (size_t)nedge == svector.size()) break;
  }

  printf("s: %ld, d: %ld, e: %ld\n",svector.size(), dvector.size(), evector.size());
  ASSERT(svector.size() == dvector.size() && svector.size() == evector.size(), 
         "file header did not match file body");
  if ((size_t)nedge != svector.size()) 
    std::cout << "removed redundant edges : " << nedge << " -> " 
              << svector.size() << std::endl;
  
  nedge = svector.size();
  src = new int[svector.size()];
  dst = new int[dvector.size()];
  ew = new T[evector.size()];

  memcpy(src, svector.data(), sizeof(int)*svector.size());
  memcpy(dst, dvector.data(), sizeof(int)*dvector.size());
  memcpy(ew, evector.data(), sizeof(T)*evector.size());

  if (nvertex<0)
  {
    nvertex = vmax+1;
  }

  if (nedge<0)
  {
    nedge = svector.size();
  }

  return std::make_tuple(src, dst, ew, nvertex, nedge);
}

int main(int argc, char** argv)
{
  std::cout << "cusparse version : " << __CUDACC_VER_MAJOR__ 
                              << "." << __CUDACC_VER_MINOR__ 
                              << std::endl;
  std::cout << "-- running "
  #ifdef GCOO
            << "GCOO"
  #endif
  #ifdef CSR
            << "CSR"
  #endif
            << " --" <<std::endl;
  /*
   * arguments 
   * 1. graph mtx path
   * 2. mtx with header ? 0, 1
   * 3. vlen
   * 4. iterations to run
   * 5*. device to run on
   * 6*. feature copys to test cache hit rate
   * 7*. verbose error check
   */

  assert(argc >= 5);
  char* mtx_file = argv[1];
  bool with_header = atoi(argv[2]);
  int vlen = atoi(argv[3]);
  int iters = atoi(argv[4]);
  int dev = argc > 5 ? atoi(argv[5]) : 0;
  int nfeats = argc > 6 ? atoi(argv[6]) : 1;
  if (argc > 7) VERBOSE = atoi(argv[7]);

  std::cout << "reading mtx file: " << argv[1] << std::endl;
  auto ginfo = read_mtx<float>(std::string(argv[1]), false, with_header);
  std::cout << "read_mtx pass, using graph(" << std::get<3>(ginfo) << "," << std::get<4>(ginfo) << ")\n";



  int ne = std::get<4>(ginfo);
  float *ew = std::get<2>(ginfo);
  int *src = std::get<0>(ginfo);
  int *dst = std::get<1>(ginfo);

  std::cout << "calling graph construct with " << src << " " << dst << " " 
            << ew << " " << ne << std::endl;
  GraphData::CSRGraph<int> csr(src, dst, nullptr, ne);
  std::cout << "cst built" << std::endl;
  int nv = csr.nvertex;

  #ifdef GCOO
  GraphData::GCOOGraph<int, float> gcoo(src, dst, ew, ne);
  assert(gcoo.nvertex == nv);
  nv = gcoo.nvertex;
  #endif

  std::cout << "graph built" << std::endl;

  int num_dense_ele = vlen*nv;
  float *h_data = new float[nfeats*num_dense_ele];
  float *h_res = new float[csr.nedge*nfeats];
  float *h_ans = new float[csr.nedge*nfeats];

  init_random_mat(h_data, num_dense_ele*nfeats);
  memset(h_res, 0, sizeof(float)*csr.nedge);
  memset(h_ans, 0, sizeof(float)*csr.nedge);
 
  double time=-1;
  csr.cuda();

  #ifdef CSR
  // time = check_naive(csr, h_data, h_res, vlen, iters, nfeats);
  time = sddmm_test(csr, ew, h_data, h_res, vlen, iters, nfeats);
  #endif

  #ifdef GCOO
  #endif

  std::cout << "my time comsumption: " << time << " ms\n";
  std::cout << "my throughput: " << (double)2*csr.nedge*vlen/time/1e6 << " gflops\n";
  std::cout << "----------------------------------" << std::endl;

  time = cusparse_check_sddmm(csr, h_data, h_data, h_ans, vlen, iters, nfeats);

  std::cout << "cusparse time consumption: " << time << " ms\n";
  std::cout << "cusparse throughput: " << (double)2*csr.nedge*vlen/time/1e6 << " gflops\n";

  csr.cpu();
  // for(int i=0; i<nfeats; i++) cpu_run(csr, h_data+csr.nedge*i, h_ans+csr.nedge*i, vlen);

  check_cpu(h_res, h_ans, csr.row_offset, csr.nedge, nfeats);

  delete [] h_data;
  delete [] h_res;
  delete [] h_ans;
  delete [] src;
  delete [] dst;
  delete [] ew;
  return 0;
}