
#include <bits/stdc++.h>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime_api.h> 
#include <cuda_runtime.h>
#include <cmath>
#include <numeric>
#include <algorithm>

#include <cusparse.h> 
using namespace std;

#define checkCudaErrors(func)				\
{									\
    cudaError_t e = (func);			\
    if(e != cudaSuccess)						                \
        printf ("%s %d CUDA: %s\n", __FILE__,  __LINE__, cudaGetErrorString(e));		\
}

#define CHECK_CUDA(func)                                                       \
{                                                                              \
    cudaError_t status = (func);                                               \
    if (status != cudaSuccess) {                                               \
        printf("CUDA API failed at line %d with error: %s (%d)\n",             \
               __LINE__, cudaGetErrorString(status), status);                  \
        return EXIT_FAILURE;                                                   \
    }                                                                          \
}

#define CHECK_CUSPARSE(func)                                                   \
{                                                                              \
    cusparseStatus_t status = (func);                                          \
    if (status != CUSPARSE_STATUS_SUCCESS) {                                   \
        printf("CUSPARSE API failed at line %d with error: %s (%d)\n",         \
               __LINE__, cusparseGetErrorString(status), status);              \
        return EXIT_FAILURE;                                                   \
    }                                                                          \
}


template <typename IndexType, typename ValueType>
void spmv_cpu_kernel(vector<IndexType> &row_offset,
                vector<IndexType> &col_index,
                vector<ValueType> &value,
                vector<ValueType> &x,
                vector<ValueType> &y,
                IndexType row_num)
{
    for(int i=0; i<row_num; i++){
        ValueType res = 0;
        IndexType num = row_offset[i+1] - row_offset[i];
        for(int j=0; j<num; j++){
            IndexType index = row_offset[i] + j;
            res += value[index]*x[col_index[index]];
        }
        y[i] = res;
    }
}

void add(unsigned int a, unsigned int b, float c,
    int *h, unsigned int *e, int *ne, float *w, int &idx)
{
    e[idx] = b;
    w[idx] = c;
    ne[idx] = h[a];
    h[a] = idx++;
}

void readMtxFile_csr(unsigned int row_num, unsigned int nnz_num,
            unsigned int *row_offset, unsigned int *col_index, float *val, 
            vector<unsigned int> original_row, vector<unsigned int> original_col, vector<float> original_value)
{
    int *h = (int *)malloc((row_num + 10) * sizeof(int));
    memset(h, -1, sizeof(int) * (row_num + 10));

    unsigned int *e = (unsigned int *)malloc((nnz_num + 10) * sizeof(int));
    int *ne = (int *)malloc((nnz_num + 10) * sizeof(int));
    float *w = (float *)malloc((nnz_num + 10) * sizeof(float));
    int idx = 0;

    for (int i = 0; i < nnz_num; i++)
    {
        unsigned int a = original_row[i];
        unsigned int b = original_col[i];
        float tc = original_value[i];
        add(a, b, tc, h, e, ne, w, idx);
    }

    row_offset[0] = 0;
    unsigned int nnz_num_id = 0;

    for (int i = 0; i < row_num; i++)
    {
        int count = 0;
        for (int j = h[i]; j != -1; j = ne[j])
        {
            count++;
            int nextNode = e[j];
            float nextWeight = w[j];
            col_index[nnz_num_id] = nextNode;
            val[nnz_num_id] = nextWeight;
            nnz_num_id++;
        }
        row_offset[i + 1] = row_offset[i] + count;
    }

    free(h);
    free(e);
    free(ne);
    free(w);
}


__device__ __forceinline__ float warpReduceSum_notemplate(float sum, unsigned int WarpSize) {

    if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
    if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
    if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
    if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
    if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
    return sum;
}

__global__ void spmv_csr_kernel_unified(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}


__global__ void spmv_csr_kernel_unified32(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 16); // 0-16, 1-17, 2-18, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 8);// 0-8, 1-9, 2-10, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 4);// 0-4, 1-5, 2-6, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 1);// 0-1, 2-3, 4-5, etc.

        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}


__global__ void spmv_csr_kernel_unified16(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 8);// 0-8, 1-9, 2-10, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 4);// 0-4, 1-5, 2-6, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 1);// 0-1, 2-3, 4-5, etc.

        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}


__global__ void spmv_csr_kernel_unified8(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 4);// 0-4, 1-5, 2-6, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 1);// 0-1, 2-3, 4-5, etc.

        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}

__global__ void spmv_csr_kernel_unified4(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 1);// 0-1, 2-3, 4-5, etc.

        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}


__global__ void spmv_csr_kernel_unified2(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        cumu_sum += __shfl_down_sync(0xffffffff, cumu_sum, 1);// 0-1, 2-3, 4-5, etc.

        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}

__global__ void spmv_csr_kernel_unified1(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row)
{
    int rows_per_block = blockDim.x / threads_per_row;
    int row_current_start = blockIdx.x * rows_per_block + threadIdx.x / threads_per_row;
    int offset_current = threadIdx.x % threads_per_row;
    int stride = rows_per_block * gridDim.x;

    // if (row_current < row_num)
    for (int row_current = row_current_start; row_current < row_num; row_current += stride)
    {
        float cumu_sum = 0;
        int offset_start = offset[row_current];
        int offset_end = offset[row_current + 1];
        for (int i = offset_start + offset_current; i < offset_end; i += threads_per_row)
        {
            int col_current = col_index[i];
            float val_current = value[i];

            cumu_sum += val_current * __ldg(&x[col_current]);
        }
        // cumu_sum = warpReduceSum_notemplate(cumu_sum, threads_per_row);
        
        if (offset_current == 0)
        {
            y[row_current] = cumu_sum;
        }
    }
    return;
}





void spmv_csr_matmul(unsigned int row_num, unsigned int col_num, unsigned int nnz_num, 
                    vector<unsigned int> row_offset, vector<unsigned int> col_index, vector<float> value,
                     vector<float> x, vector<float>& y, int block_num, int thread_num, unsigned int threads_per_row)
{

    void (*fp)(unsigned int row_num, unsigned int col_num, unsigned int* offset, unsigned int* col_index, float* value, float* x, float* y,
                                       unsigned int threads_per_row);

    if (threads_per_row == 32)
    {
        fp = spmv_csr_kernel_unified32;
    }
    else if (threads_per_row == 16)
    {
        fp = spmv_csr_kernel_unified16;
    }
    else if (threads_per_row == 8)
    {
        fp = spmv_csr_kernel_unified8;
    }
    else if (threads_per_row == 4)
    {
        fp = spmv_csr_kernel_unified4;
    }
    else if (threads_per_row == 2)
    {
        fp = spmv_csr_kernel_unified2;
    }
    else if (threads_per_row == 1)
    {
        fp = spmv_csr_kernel_unified1;
    }
    else
    {
        assert(false);
    }
    // allocate memory in GPU device
    unsigned int* d_row_offset;
    unsigned int* d_col_index;
    float* d_value;
    float* d_x;
    float* d_y;

    checkCudaErrors(cudaMalloc(&d_row_offset, (row_num + 1) * sizeof(unsigned int)));
    checkCudaErrors(cudaMalloc(&d_col_index, nnz_num * sizeof(unsigned int)));
    checkCudaErrors(cudaMalloc(&d_value, nnz_num * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_x, col_num * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_y, row_num * sizeof(float)));

    checkCudaErrors(cudaMemcpy( d_row_offset, &row_offset[0], (row_num + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_col_index, &col_index[0], nnz_num * sizeof(unsigned int), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_value, &value[0], nnz_num * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_x, &x[0], col_num * sizeof(float), cudaMemcpyHostToDevice));

    unsigned int repeat_num = 1500;
    struct timeval start, end;
    gettimeofday(&start, NULL);

    for (int i = 0; i < repeat_num; i++)
    {
        cudaMemset(d_y, 0, y.size() * sizeof(float));
        // spmv_csr_kernel_unified<<< block_num, thread_num>>>(row_num, col_num, d_row_offset, d_col_index, d_value, d_x, d_y, threads_per_row);
        fp<<< block_num, thread_num>>>(row_num, col_num, d_row_offset, d_col_index, d_value, d_x, d_y, threads_per_row);
        cudaDeviceSynchronize();
    }
    gettimeofday(&end, NULL);

    long timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
    double gflops = ((double)2.0 * value.size() * repeat_num / ((double)timeuse / 1000000)) / 1000000000;

    printf("time=%fms, gflops=%f\n", timeuse / 1000.0, gflops);
    cudaMemcpy(&y[0], d_y, y.size() * sizeof(float), cudaMemcpyDeviceToHost);

    // Free Memory
    cudaFree(d_row_offset);
    cudaFree(d_col_index);
    cudaFree(d_value);
    cudaFree(d_x);
    cudaFree(d_y);
}

int main(int argc, char **argv)
{
    string file_name = argv[1];
    cout << file_name << endl;

    int block_num = std::atoi(argv[2]);
    int thread_num = std::atoi(argv[3]);
    unsigned int threads_per_row = std::atoi(argv[4]);


    // vector<unsigned int> row_index;
    // vector<unsigned int> col_index;
    // vector<float> val;
    unsigned int row_num = 0;
    unsigned int col_num = 0;
    unsigned int nnz_num = 0;

    string out_name = file_name + "_binary_csr.mtx";
    ifstream inFile(out_name, ios::in|ios::binary); 
    
    inFile.read((char *)&row_num, sizeof(unsigned int));
    inFile.read((char *)&col_num, sizeof(unsigned int));
    inFile.read((char *)&nnz_num, sizeof(unsigned int));


    // row_index.resize(nnz_num);
    // col_index.resize(nnz_num);
    // val.resize(nnz_num);

    // inFile.read((char *)&row_index[0], nnz_num * sizeof(unsigned int));
    // inFile.read((char *)&col_index[0], nnz_num * sizeof(unsigned int));
    // inFile.read((char *)&val[0], nnz_num * sizeof(float));

    cout << "finish read file" << endl;
    

    // string out_name = file_name + ".dat";
    // ifstream inFile(out_name, ios::in|ios::binary); 
    
    // row_index.resize(nnz_num);
    // col_index.resize(nnz_num);
    // val.resize(nnz_num);
    // inFile.read((char *)&max_col_index, sizeof(unsigned long));
    // inFile.read((char *)&max_row_index, sizeof(unsigned long));


    // inFile.read((char *)&row_index[0], nnz_num * sizeof(unsigned int));
    // inFile.read((char *)&col_index[0], nnz_num * sizeof(unsigned int));
    // inFile.read((char *)&val[0], nnz_num * sizeof(float));

    // printf("finish read file\n");

    // read mtx file and convert to csr
    // unsigned int row_num = max_row_index;
    // unsigned int col_num = max_col_index;
    // read fundamental information from .mtx

    // vector<unsigned int> csr_row_offset(row_num + 1);
    // vector<unsigned int> csr_col_index(nnz_num);
    // vector<float> csr_value(nnz_num);

    // vector<unsigned int> csr_row_offset(row_num + 1);
    // vector<unsigned int> csr_col_index(nnz_num);
    // vector<float> csr_value(nnz_num);

    vector<unsigned int> csr_row_offset;
    vector<unsigned int> csr_col_index;
    vector<float> csr_value;

    csr_row_offset.resize(row_num + 1);
    csr_col_index.resize(nnz_num);
    csr_value.resize(nnz_num);

    inFile.read((char *)&csr_row_offset[0], (row_num + 1) * sizeof(unsigned int));
    inFile.read((char *)&csr_col_index[0], nnz_num * sizeof(unsigned int));
    inFile.read((char *)&csr_value[0], nnz_num * sizeof(float));
    
    
    vector<float> x(col_num, 1.0);
    vector<float> y(row_num, 0.0);

    // readMtxFile_csr(row_num, nnz_num,
    //         &csr_row_offset[0], &csr_col_index[0], &csr_value[0], 
    //         row_index, col_index, val);


    // printf("block_num: %d  thread_num: %d threads_per_row: %u \n", block_num, thread_num, threads_per_row);
    // spmv_csr_matmul(row_num, col_num, nnz_num, csr_row_offset, csr_col_index, 
    //                 csr_value, x, y, block_num, thread_num, threads_per_row);

    unsigned int* d_row_offset;
    unsigned int* d_col_index;
    float* d_value;
    float* d_x;
    float* d_y;

    checkCudaErrors(cudaMalloc(&d_row_offset, (row_num + 1) * sizeof(unsigned int)));
    checkCudaErrors(cudaMalloc(&d_col_index, nnz_num * sizeof(unsigned int)));
    checkCudaErrors(cudaMalloc(&d_value, nnz_num * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_x, col_num * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_y, row_num * sizeof(float)));

    checkCudaErrors(cudaMemcpy( d_row_offset, &csr_row_offset[0], (row_num + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_col_index, &csr_col_index[0], nnz_num * sizeof(unsigned int), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_value, &csr_value[0], nnz_num * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy( d_x, &x[0], col_num * sizeof(float), cudaMemcpyHostToDevice));

    // cusparse spmv
    //--------------------------------------------------------------------------
    // CUSPARSE APIs
    float     alpha           = 1.0f;
    float     beta            = 0.0f;

    cusparseHandle_t     handle = NULL;
    cusparseSpMatDescr_t matA;
    cusparseDnVecDescr_t vecX, vecY;
    void*                dBuffer    = NULL;
    size_t               bufferSize = 0;
    CHECK_CUSPARSE( cusparseCreate(&handle) )
    // Create sparse matrix A in CSR format
    CHECK_CUSPARSE( cusparseCreateCsr(&matA, row_num, col_num, nnz_num,
                                      d_row_offset, d_col_index, d_value,
                                      CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
                                      CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F) )
    // Create dense vector X
    CHECK_CUSPARSE( cusparseCreateDnVec(&vecX, col_num, d_x, CUDA_R_32F) )
    // Create dense vector y
    CHECK_CUSPARSE( cusparseCreateDnVec(&vecY, row_num, d_y, CUDA_R_32F) )
    // allocate an external buffer if needed
    CHECK_CUSPARSE( cusparseSpMV_bufferSize(
                                 handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                                 &alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
                                 CUSPARSE_MV_ALG_DEFAULT, &bufferSize) )
    CHECK_CUDA( cudaMalloc(&dBuffer, bufferSize) )
    
    unsigned int repeat_num = 1500;
    struct timeval start, end;
    gettimeofday(&start, NULL);

    // execute SpMV
    for(int i=0; i<1500; i++){
        CHECK_CUSPARSE( cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                                    &alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
                                    CUSPARSE_MV_ALG_DEFAULT, dBuffer) )
        cudaDeviceSynchronize();
    }
    gettimeofday(&end, NULL);

    long timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
    double gflops = ((double)2.0 * csr_value.size() * repeat_num / ((double)timeuse / 1000000)) / 1000000000;

    printf("time=%fms, gflops=%f\n", timeuse / 1000.0, gflops);

    // destroy matrix/vector descriptors
    CHECK_CUSPARSE( cusparseDestroySpMat(matA) )
    CHECK_CUSPARSE( cusparseDestroyDnVec(vecX) )
    CHECK_CUSPARSE( cusparseDestroyDnVec(vecY) )
    CHECK_CUSPARSE( cusparseDestroy(handle) )
    //--------------------------------------------------------------------------
    // device result check
    CHECK_CUDA( cudaMemcpy(y.data(), d_y, row_num * sizeof(float),
                           cudaMemcpyDeviceToHost) )

    ////////////////////////////
    
    // vector<float> y_res(row_num, 0.0);
    // spmv_cpu_kernel<unsigned int, float>(csr_row_offset, csr_col_index, csr_value, x, y_res, row_num);
    //     // 验证结果
    // for (int i = 0; i < row_num; i++)
    // {
    //     if (y_res[i] - y[i] > 1e-3)
    //     // if (y_res[i] - y_int[i] > 1e-3)
    //     {
    //         // printf("y_res: %.2f y: %d\n", y_res[i], y_int[i]);
    //         printf("y_res: %.2f y: %.2f\n", y_res[i], y[i]);
    //         printf("Row num: %d\n", i);
    //         assert(false);
    //     }
    // }
    // printf("Right!\n");
    
}
