#include "../include/SSS_utils.cuh"
#include "../include/CUDA_error.cuh"
#include "../include/SSS_matvec.h"
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
#include <cuda_runtime_api.h>


#define gridsize 65535
#define blocksize 64
#define LONGROW_THRESHOLD 2048


extern double SpGEMM_count ;

//axpy
extern double axpy_comp_time ;
extern double axpy_time ;
//ax
extern double ax_comp_time ;
extern double ax_time ;
//dot
extern double dot_comp_time ;
extern double dot_time ;
//axpby
extern double axpby_comp_time ;
extern double axpby_time ;

//mxy
extern double mxy_time ;

//amxpy
extern double amxpy_time ;


double SSS_get_time(void)
{
    struct timeval tv;
    double t;

    gettimeofday(&tv, (struct timezone *)0);
    t = tv.tv_sec + (double)tv.tv_usec * 1e-6 ;
    t=t*1000;
 
    return t;
}

//Standard and aggressive coarsening schemes
void SSS_exit_on_errcode(const int status, const char *fctname)
{
    if (status >= 0) return;

    switch (status) {
        case ERROR_OPEN_FILE:
            printf("### ERROR: %s -- Cannot open file!\n", fctname);
            break;

        case ERROR_WRONG_FILE:
            printf("### ERROR: %s -- Wrong file format!\n", fctname);
            break;

        case ERROR_INPUT_PAR:
            printf("### ERROR: %s -- Wrong input arguments!\n", fctname);
            break;

        case ERROR_ALLOC_MEM:
            printf("### ERROR: %s -- Cannot allocate memory!\n", fctname);
            break;

        case ERROR_DATA_STRUCTURE:
            printf("### ERROR: %s -- Data structure mismatch!\n", fctname);
            break;

        case ERROR_DATA_ZERODIAG:
            printf("### ERROR: %s -- Matrix has zero diagonal entries!\n", fctname);
            break;

        case ERROR_DUMMY_VAR:
            printf("### ERROR: %s -- Unexpected input argument!\n", fctname);
            break;

        case ERROR_AMG_interp_type:
            printf("### ERROR: %s -- Unknown AMG interPolation type!\n", fctname);
            break;

        case ERROR_AMG_COARSE_TYPE:
            printf("### ERROR: %s -- Unknown AMG coarsening type!\n", fctname);
            break;

        case ERROR_AMG_SMOOTH_TYPE:
            printf("### ERROR: %s -- Unknown AMG smoother type!\n", fctname);
            break;

        case ERROR_SOLVER_STAG:
            printf("### ERROR: %s -- Solver stagnation error!\n", fctname);
            break;

        case ERROR_SOLVER_SOLSTAG:
            printf("### ERROR: %s -- Solution is close to zero!\n", fctname);
            break;

        case ERROR_SOLVER_TOLSMALL:
            printf("### ERROR: %s -- Tol is too small for the solver!\n", fctname);
            break;

        case ERROR_SOLVER_matrix:
            printf("### ERROR: %s -- max iteration number reached!\n", fctname);
            break;

        case ERROR_SOLVER_EXIT:
            printf("### ERROR: %s -- Solver exited unexpected!\n", fctname);
            break;

        case ERROR_MISC:
            printf("### ERROR: %s -- Unknown error occurred!\n", fctname);
            break;

        case ERROR_UNKNOWN:
            printf("### ERROR: %s -- Function does not exit successfully!\n", fctname);
            break;

        default:
            break;
    }

    exit(status);
}


void SSS_free(void *mem)
{
    if (mem) free(mem);
}

 
//打印出迭代解算器的迭代信息
void SSS_print_itinfo(const int stop_type, const int iter, const double relres,
        const double absres, const double factor)
{

        if (iter > 0) {
            printf("%6d | %13.6e   | %13.6e  | %10.4lf\n", iter, relres, absres,
                    factor);
        }
        else {                  // iter = 0: initial guess
            printf("-----------------------------------------------------------\n");

            switch (stop_type) {
                case STOP_REL_RES:
                    printf("It Num |   ||r||/||b||   |     ||r||      |  Conv. Factor\n");
                    break;

                case STOP_REL_PRECRES:
                    printf("It Num | ||r||_B/||b||_B |    ||r||_B     |  Conv. Factor\n");
                    break;

                case STOP_MOD_REL_RES:
                    printf("It Num |   ||r||/||x||   |     ||r||      |  Conv. Factor\n");
                    break;
            }

            printf("-----------------------------------------------------------\n");
            printf("%6d | %13.6e   | %13.6e  |     -.-- \n", iter, relres, absres);
        }
    
}

__global__ void SSS_device_array_zero_kernel(int n, double *d_val)
{
    int tid = blockDim.x * blockIdx.x +threadIdx.x;

    if(tid <=n)
    {
        d_val[tid] = 0;
    }

}

void SSS_device_array_zero(int n,double *d_val)
{
    SSS_device_array_zero_kernel<<<gridsize,blocksize>>>(n,d_val);
}


__global__ void SSS_device_matrix_zero_kernel(int n, int *d_val)
{
    int tid = blockDim.x * blockIdx.x +threadIdx.x;

    if(tid <=n)
    {
        d_val[tid] = 0;
    }
}

void SSS_device_matrix_zero(int n,int *d_val)
{
    SSS_device_matrix_zero_kernel<<<gridsize,blocksize>>>(n,d_val);

}


//计算向量b的L2范数  
double SSS_blas_vec_norm2(const SSS_VEC *x)
{
    double twonorm = 0;
    int i;
    int length = x->n;
    double *xpt = x->d;

    for (i = 0; i < length; ++i) twonorm += xpt[i] * xpt[i];

    return sqrt(twonorm);
}



double SSS_blas_array_norm2(int n, const double *x)
{
    int i;
    double twonorm = 0.;

    for (i = 0; i < n; ++i) twonorm += x[i] * x[i];

    return sqrt(twonorm);
}

/*
__device__ void SSS_blas_array_norm2_reduce_kernel_device(volatile double *sdata, int tid)
{
    sdata[tid] += sdata[tid + 32];
    sdata[tid] += sdata[tid + 16];
    sdata[tid] += sdata[tid + 8];
    sdata[tid] += sdata[tid + 4];
    sdata[tid] += sdata[tid + 2];
    sdata[tid] += sdata[tid + 1];
}

__global__ void SSS_blas_array_norm2_reduce_kernel(double *d_y, double *d_res, int n)
{
    //shared mem 1024 
    __shared__ double sdata[64];

    // 坐标索引
    unsigned int tid = threadIdx.x;
    unsigned int index = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
    unsigned int indexWithOffset = index + blockDim.x;

    if (index >= n) sdata[tid] = 0;
    else if (indexWithOffset >= n) sdata[tid] = d_y[index];
    else sdata[tid] = d_y[index] + d_y[indexWithOffset];

    __syncthreads();

    // 在共享内存中对每一个块进行规约计算
    for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
    {
        if (tid < s) sdata[tid] += sdata[tid + s];

        __syncthreads();
    }
    if (tid < 32) SSS_blas_array_norm2_reduce_kernel_device(sdata, tid);

    // 把计算结果从共享内存写回全局内存
    if (tid == 0) 
    {
        d_res[blockIdx.x] = sdata[0];
        //printf("%d\n",blockIdx.x);
    }
}

__global__ void SSS_blas_array_norm2_muladd_kernel(int n, double *d_x ,double *d_y)
{
    int tid = blockDim.x * blockIdx.x +threadIdx.x;
    
    if(tid<n)
    {
        d_y[tid] = d_x[tid] * d_x[tid] ;
    }
}

double SSS_blas_array_norm2_cuda(int n, double *d_x)
{

    double *d_y=NULL;
    CHECK(cudaMalloc((void **)&d_y,n * sizeof(double)));
    //Step 1 ,muladd
    SSS_blas_array_norm2_muladd_kernel<<<64,64>>>(n,d_x,d_y);
    
    double *d_res = NULL;
    CHECK(cudaMalloc((void **)&d_res,1 * sizeof(double)));
    
    //Step 2 ,reduce
    SSS_blas_array_norm2_reduce_kernel<<<64,64>>>(d_y,d_res,n);
    
    double *res = (double*)malloc(1*sizeof(double));
    CHECK(cudaMemcpy(res,d_res,1*sizeof(double),cudaMemcpyDeviceToHost));

    return sqrt(res[0]);
}


double SSS_blas_vec_norm2_cuda(const SSS_VEC *x)
{
    
    int n = x->n;

    double *d_x =NULL;
    cudaMalloc((void **)&d_x,n * sizeof(double));
    cudaMemcpy(d_x, x->d, n * sizeof(double), cudaMemcpyHostToDevice);

    double *d_y=NULL;
    cudaMalloc((void **)&d_y,n * sizeof(double));
    //Step 1 ,muladd
    SSS_blas_array_norm2_muladd_kernel<<<64,64>>>(n,d_x,d_y);
    
    double *d_res = NULL;
    cudaMalloc((void **)&d_res,1 * sizeof(double));
    
    //Step 2 ,reduce
    SSS_blas_array_norm2_reduce_kernel<<<64,64>>>(d_y,d_res,n);
    
    double *res = (double*)malloc(1*sizeof(double));
    cudaMemcpy(res,d_res,1*sizeof(double),cudaMemcpyDeviceToHost);

    return sqrt(res[0]);
}
*/

//amxpy
void SSS_blas_mv_amxpy(double alpha, const SSS_MAT *A, const SSS_VEC *x, SSS_VEC *y)
{
    double d1 = SSS_get_time();
    int m = A->num_rows;
    int *ia = A->row_ptr, *ja = A->col_idx;
    double *Aj = A->val;
    int i, k, begin_row, end_row;
    double temp;

    for (i = 0; i < m; ++i) {
        temp = 0.0;
        begin_row = ia[i];
        end_row = ia[i + 1];

        for (k = begin_row; k < end_row; ++k) 
            temp += Aj[k] * x->d[ja[k]];

        y->d[i] += temp * alpha;
    }
    double d2 = SSS_get_time();
    amxpy_time+=d2-d1;
}

__global__ void SSS_blas_mv_amxpy_kernel(const int alpha ,const int m,int *row_ptr,int *col_idx,double *A_val,double *x_val,double *y_val)
{

    int tid = blockDim.x * blockIdx.x +threadIdx.x;

    if (tid < m)
    {
        double temp = 0;
        int begin_row = row_ptr[tid];
        int end_row = row_ptr[tid+1];

        for(int k = begin_row; k < end_row; k++) 
        {
            temp+= A_val[k] *x_val[col_idx[k]];
        }
        y_val[tid]+=temp * alpha;
    }

}

// 
void alpha_spmv_cuda(double alpha, int n,int *d_row_ptr,int * d_col_idx,double * d_A_val,double * d_x_val,double * d_y_val)
{
    struct timeval spmv1, spmv2;
    gettimeofday(&spmv1,NULL);

    SSS_blas_mv_amxpy_kernel<<<gridsize,blocksize>>>(alpha,n,d_row_ptr,d_col_idx,d_A_val,d_x_val,d_y_val);
    //num+=1;
    cudaDeviceSynchronize();
    
  
    gettimeofday(&spmv2,NULL);
    double spmv_t = (spmv2.tv_sec - spmv1.tv_sec) * 1000.0 + (spmv2.tv_usec - spmv1.tv_usec) / 1000.0;
    //spmv_time+=spmv_t;
}

//mxy
void SSS_blas_mv_mxy(const SSS_MAT *A, const SSS_VEC *x, SSS_VEC *y)
{
    double d1 = SSS_get_time();
    int m = A->num_rows;
    int *ia = A->row_ptr, *ja = A->col_idx;
    double *Aj = A->val;
    int i, k, begin_row, end_row;
    double temp;

    for (i = 0; i < m; ++i) {
        temp = 0.0;
        begin_row = ia[i];
        end_row = ia[i + 1];

        for (k = begin_row; k < end_row; ++k) {
            temp += Aj[k] * x->d[ja[k]];
        }

        y->d[i] = temp;
    }
    double d2 = SSS_get_time();
    mxy_time+= d2-d1;
}

//长行
__global__
void SSS_blas_spmv_kernel_thread_long(const int          longrow,
                                 const int         *d_csrRowPtr,
                                 const int         *d_csrColIdx,
                                 const double      *d_csrVal,
                                 const double      *d_x,
                                 double            *d_y)
{
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (tid < longrow)
    {
        double sum = 0;
        int start = d_csrRowPtr[tid];
        int stop  = d_csrRowPtr[tid+1];
        if (stop - start <= LONGROW_THRESHOLD)
        {
            for (int j = start; j < stop; j++)
            {
                sum += d_x[d_csrColIdx[j]] * d_csrVal[j];
            } 
        }
        d_y[tid] = sum;
    }
}

//短行
__global__
void SSS_blas_spmv_kernel_thread(const int          num_rows,
                                 const int         *d_csrRowPtr,
                                 const int         *d_csrColIdx,
                                 const double      *d_csrVal,
                                 const double      *d_x,
                                 double            *d_y)
{
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (tid < num_rows)
    {
        double sum = 0;
        int start = d_csrRowPtr[tid];
        int stop  = d_csrRowPtr[tid+1];
        if (stop - start <= LONGROW_THRESHOLD)
        {
            for (int j = start; j < stop; j++)
            {
                sum += d_x[d_csrColIdx[j]] * d_csrVal[j];
            } 
        }
        d_y[tid] = sum;
    }
}

//shared_memory
__global__ 
void SSS_blas_spmv_kernel_warp ( const int       num_rows, 
                                 const int      *d_csrRowPtr,
                                 const int      *d_csrColIdx, 
                                 const double   *d_csrVal, 
                                 const double   *d_x, 
                                 double         *d_y)
{
    __shared__ double sum[blocksize];
    // global thread index
    int tid = blockDim.x * blockIdx.x + threadIdx.x ;
    // global warp index
    int warp_id = tid / 32;
    // thread index within the warp
    // 一个lane就是一个warp中的一个thread，每个lane在同一个warp中由lane索引唯一确定，范围为[0,31]
    int lane = tid & (32 - 1);
    // one warp per row
    int row = warp_id ;
    if (row >= num_rows) return;
    int row_start = d_csrRowPtr [row];
    int row_end = d_csrRowPtr [row + 1];
    if (row_start == row_end) 
    {
        if (!lane) d_y[row] = 0;
            return;
    }
    // compute running sum per thread
    sum[threadIdx.x] = 0;
    for (int i = row_start + lane ; i < row_end ; i += 32)
    {
        sum[threadIdx.x] += d_csrVal [i] * d_x[d_csrColIdx[i]];
    }    
    // parallel reduction in shared memory
    if ( lane < 16) sum[threadIdx.x] += sum[threadIdx.x + 16];
    if ( lane < 8)  sum[threadIdx.x] += sum[threadIdx.x + 8];
    if ( lane < 4)  sum[threadIdx.x] += sum[threadIdx.x + 4];
    if ( lane < 2)  sum[threadIdx.x] += sum[threadIdx.x + 2];
    if ( lane < 1)  sum[threadIdx.x] += sum[threadIdx.x + 1];
    // first thread writes the sums
    if ( lane == 0)
    {
        d_y[row] = sum[threadIdx.x];
    }
}


__forceinline__ __device__
double sum_32_shfl(double sum)
{
    for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1)
        //sum += __shfl_xor(sum, mask);
        sum += __shfl_xor_sync(0xffffffff, sum, mask);

    return sum;
}

/*
    Warp 级别的操作原语（Warp-level Primitives）通过 shuffle 指令，
    允许 thread 直接读其他 thread 的寄存器值，
    只要两个 thread 在同一个 warp 中，
    这种比通过 shared Memory 进行 thread 间的通讯效果更好，latency 更低，
    同时也不消耗额外的内存资源来执行数据交换
*/

//shuffle
__global__ 
void SSS_blas_spmv_kernel_warp_shfl (   const int     num_rows , 
                                        const int    *d_csrRowPtr ,
                                        const int    *d_csrColIdx , 
                                        const double *d_csrVal , 
                                        const double *d_x, 
                                        double       *d_y)
{
    // global thread index
    int tid = blockDim.x * blockIdx.x + threadIdx.x ;
    
    // global warp index
    int warp_id = tid / 32;
    
    // thread index within the warp
    int lane = tid & (32 - 1);
    
    // one warp per row
    int row = warp_id ;
    if (row >= num_rows) return;
       
    int row_start = d_csrRowPtr[row];
    int row_end = d_csrRowPtr[row +1];

    if (row_start == row_end) 
    {
        if (!lane) d_y[row] = 0;
            return;
    }
    // compute running sum per thread
    double  sum = 0;
    
    for ( int i = row_start + lane ; i < row_end ; i += 32)
    {
        sum += d_csrVal[i] * d_x[d_csrColIdx[i]];
    }
    
    // parallel reduction by shuffle
    for(int offset = 32 / 2 ; offset > 0 ; offset >>= 1)
    {
        sum += __shfl_xor_sync(0xffffffff, sum, offset);
    }
    
    // first thread writes the sums
    if ( lane == 0)
    {
        d_y[row] = sum;
    }
        
}


void spmv_cuda(int n,int *d_A_row_ptr,int *d_A_col_idx,double *d_A_val,double *d_x_val,double *d_y_val)
{
    /*
        blosize = 64 ~ 1024
    */
    
    int grid_size = ceil ((double)n / (double)blocksize);
    int grid_size_warp = ceil ((double)n / (double)(blocksize / 32));

    cudaDeviceSynchronize();
    
    // 1 thread 1 row
    SSS_blas_spmv_kernel_thread<<<grid_size,blocksize>>>(n,d_A_row_ptr,d_A_col_idx,d_A_val,d_x_val,d_y_val);
    
    // 1 warp 1 row (shared memory)
    //SSS_blas_spmv_kernel_warp<<<grid_size_warp,blocksize>>>(n,d_A_row_ptr,d_A_col_idx,d_A_val,d_x_val,d_y_val);
    
    // 1 warp 1 row (shuffle)
    //SSS_blas_spmv_kernel_warp_shfl<<<grid_size_warp,blocksize>>>(n,d_A_row_ptr,d_A_col_idx,d_A_val,d_x_val,d_y_val);
    
    cudaDeviceSynchronize();    
}








// dot
double SSS_blas_array_dot(int n, const double *x, const double *y)
{
    int i;
    double value = 0.0;

    for (i = 0; i < n; ++i) value += x[i] * y[i];
   // if(value > 100) printf("dot  = %lf\n",value);
    return value;
}


double SSS_blas_array_dot_cuda(int n, const double *x, const double *y)
{
    struct timeval dot1, dot2;
    gettimeofday(&dot1,NULL);

    double value = 0.0;
    double *d_x = NULL, *d_y = NULL;

    
    cublasHandle_t handle;
    cublasCreate_v2(&handle); // 创建句柄
    cudaMalloc(&d_x, n *sizeof(double));
    cublasSetVector(n, sizeof(double), x, 1, d_x, 1);
    cudaMalloc(&d_y, n *sizeof(double));
    cublasSetVector(n, sizeof(double), y, 1, d_y, 1);


    struct timeval dot_comp1, dot_comp2;
    gettimeofday(&dot_comp1,NULL);
    
    cublasDdot_v2(handle, n, d_x, 1, d_y, 1, &value);
    
    gettimeofday(&dot_comp2,NULL);
    double dot_comp_t = (dot_comp2.tv_sec - dot_comp1.tv_sec) * 1000.0 + (dot_comp2.tv_usec - dot_comp1.tv_usec) / 1000.0;
    dot_comp_time+=dot_comp_t;

    cudaFree(d_x);
    cudaFree(d_y);

    cublasDestroy(handle); // 销毁句柄

    gettimeofday(&dot2,NULL);
    double dot_t = (dot2.tv_sec - dot1.tv_sec) * 1000.0 + (dot2.tv_usec - dot1.tv_usec) / 1000.0;
    dot_time += dot_t;
    return value;

}

double SSS_blas_array_dot_cublas(cublasHandle_t handle , int n,  double *d_x, const double *d_y)
{
    // struct timeval dot1, dot2;
    // gettimeofday(&dot1,NULL);

    double value = 0.0;
    cublasDdot_v2(handle, n, d_x, 1, d_y, 1, &value);
  


    // cudaFree(d_x);
    // cudaFree(d_y);

    //cublasDestroy(handle); // 销毁句柄

    // gettimeofday(&dot2,NULL);
    // double dot_t = (dot2.tv_sec - dot1.tv_sec) * 1000.0 + (dot2.tv_usec - dot1.tv_usec) / 1000.0;
    // dot_time += dot_t;

    return value;

}

//axpy , 稠密 y =a * x
void SSS_blas_array_axpy(int n, double a, const double *x, double *y)
{
    int i;

    for (i = 0; i < n; ++i) y[i] += a * x[i];
}

// __global__ void SSS_blas_array_axpy_kernel(int n, double a,const double *d_x,  double *d_y)
// {
//     int tid = blockDim.x * blockIdx.x +threadIdx.x;
//     if(tid<n)
//     {
//         d_y[tid] += a * d_x[tid];
//     }
// }

void SSS_blas_array_axpy_cuda(int n, double a, const double *x, double *y)
{


    double a1 = SSS_get_time();

    // double *d_x = NULL;
    // cudaMalloc((void **)&d_x,n * sizeof(double));
    // cudaMemcpy(d_x, x, n * sizeof(double), cudaMemcpyHostToDevice);

    // double *d_y = NULL;
    // cudaMalloc((void **)&d_y,n * sizeof(double));
    // cudaMemcpy(d_y, y, n * sizeof(double), cudaMemcpyHostToDevice);

    // double b1 = SSS_get_time();
    // SSS_blas_array_axpy_kernel<<<gridsize,blocksize>>>(n,a,d_x,d_y);
    // double b2 = SSS_get_time();
    // axpy_comp_time+=b2-b1;

    // cudaMemcpy(y, d_y, n * sizeof(double), cudaMemcpyDeviceToHost);


    
    // cudaFree(d_x);
    // cudaFree(d_y);

    double *d_x, *d_y;

    cublasHandle_t handle;  // 申明句柄
    cublasCreate_v2(&handle); // 创建句柄
    cudaMalloc(&d_x, sizeof(double) * n);
    cudaMalloc(&d_y, sizeof(double) * n);

    cublasSetVector(n, sizeof(double), x, 1, d_x, 1); // H2D host to device
    cublasSetVector(n, sizeof(double), y, 1, d_y, 1);
    
    double a3 = SSS_get_time();
    cublasDaxpy_v2(handle, n, &a, d_x, 1, d_y, 1); //实现向量+
    double a4 = SSS_get_time();
    axpy_comp_time+=a4-a3;


    cublasGetVector(n, sizeof(double), d_y, 1, y, 1); // D2H
   
    cudaFree(d_x);
    cudaFree(d_y);
    cublasDestroy(handle); // 销毁句柄

    double a2 = SSS_get_time();
    
    axpy_time+=a2-a1;

   
}


double SSS_blas_array_norminf(int n, const double *x)
{
    int i;
    double infnorm = 0.0;

    for (i = 0; i < n; ++i) infnorm = SSS_max(infnorm, SSS_ABS(x[i]));

    return infnorm;
}




//Set initial value for an array to be x=Ax
void SSS_blas_array_set( int n, double *x, double Ax)
{

    int i;

    for (i = 0; i < n; ++i) x[i] = Ax;
}

//axpby , 稠密 y = ax+by
void SSS_blas_array_axpby(int n, double a, const double *x, double b, double *y)
{
    int i;

    for (i = 0; i < n; ++i) y[i] = a * x[i] + b * y[i];
}

__global__ void SSS_blas_array_axpby_kernel(int n, double a, double b,const double *d_x,  double *d_y)
{
    int tid = blockDim.x * blockIdx.x +threadIdx.x;

    if(tid<n)
    {
        d_y[tid] = a * d_x[tid] + b * d_y[tid];
    }
}

void SSS_blas_array_axpby_cuda(int n, double a, const double *x, double b, double *y)
{
    
    struct timeval axpby1, axpby2;
    gettimeofday(&axpby1,NULL);

    double *d_x = NULL;
    cudaMalloc((void **)&d_x,n * sizeof(double));
    cudaMemcpy(d_x, x, n * sizeof(double), cudaMemcpyHostToDevice);
    
    double *d_y = NULL;
    cudaMalloc((void **)&d_y,n * sizeof(double));
    cudaMemcpy(d_y, y, n * sizeof(double), cudaMemcpyHostToDevice);
    double a1 = SSS_get_time();

    SSS_blas_array_axpby_kernel<<<gridsize,blocksize>>>(n,a,b,d_x,d_y);
    double a2 = SSS_get_time();
    axpby_comp_time+=a2-a1;

    cudaMemcpy(y, d_y, n * sizeof(double), cudaMemcpyDeviceToHost);
    
    cudaFree(d_x);
    cudaFree(d_y);   

    gettimeofday(&axpby2,NULL);
    double axpby_t = (axpby2.tv_sec - axpby1.tv_sec) * 1000.0 + (axpby2.tv_usec - axpby1.tv_usec) / 1000.0;

    axpby_time +=axpby_t;
    
}

//ax 
void SSS_blas_array_ax(int n, double a, double *x)
{
    int i;

    for (i = 0; i < n; ++i) x[i] *= a;
}

void __global__ SSS_blas_array_ax_kernel(const int n,const double a,double *d_x)
{   
    const int tid = blockDim.x * blockIdx.x +threadIdx.x;
    if(tid<n)
    {
        d_x[tid] *= a ;
    }    
}

void SSS_blas_array_ax_cuda(int n,double a,double *x)
{    
   
    double a1=SSS_get_time();
    double *d_x = NULL;
    cudaMalloc((void **)&d_x,n * sizeof(double));
    cudaMemcpy(d_x, x, n * sizeof(double), cudaMemcpyHostToDevice);

    double b1=SSS_get_time();
    SSS_blas_array_ax_kernel<<<gridsize,blocksize>>>(n,a,d_x);
    double b2=SSS_get_time();

    CHECK(cudaMemcpy(x, d_x, n * sizeof(double), cudaMemcpyDeviceToHost));
    cudaFree(d_x);

    double a2=SSS_get_time();
    
    ax_comp_time +=b2-b1;
    ax_time +=a2-a1;
}



// SSS_MAT SSS_blas_spgemm_device(const SSS_MAT *A,const SSS_MAT *B)
// {
//     SSS_MAT C;
//     int m = A->num_rows;
//     int n = B->num_cols;
//     int k = A->num_cols;

//     int nnzA = A->num_nnzs;
//     int nnzB = B->num_nnzs;

//     int nnzC;
//     int baseC;
//     //handle
//     cusparseHandle_t cusparsehandle=0;
//     cusparseMatDescr_t descrA=0, descrB=0, descrC=0;

//     /* initialize cusparse library */
//     cusparseCreate(&cusparsehandle);

//     /* create and setup matrix descriptor */
//     cusparseCreateMatDescr(&descrA);
//     cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
//     cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);

//     cusparseCreateMatDescr(&descrB);
//     cusparseSetMatType(descrB, CUSPARSE_MATRIX_TYPE_GENERAL);
//     cusparseSetMatIndexBase(descrB, CUSPARSE_INDEX_BASE_ZERO);

//     cusparseCreateMatDescr(&descrC);
//     cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL);
//     cusparseSetMatIndexBase(descrC, CUSPARSE_INDEX_BASE_ZERO);

//     cusparseOperation_t transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
//     cusparseOperation_t transB = CUSPARSE_OPERATION_NON_TRANSPOSE;

//     // nnzTotalDevHostPtr points to host memory
//     int *nnzTotalDevHostPtr = &nnzC;
//     cusparseSetPointerMode(cusparsehandle, CUSPARSE_POINTER_MODE_HOST);
    
//     cudaMalloc((void **)&C.d_row_ptr,(m+1) * sizeof(int));
//     //Step 1: nnzC
//     cusparseXcsrgemmNnz(cusparsehandle, transA, transB,
//         m, n, k,
//         descrA, nnzA, A->d_row_ptr, A->d_col_idx,
//         descrB, nnzB, B->d_row_ptr, B->d_col_idx,
//         descrC,       C.d_row_ptr, nnzTotalDevHostPtr );
    
//     if (NULL != nnzTotalDevHostPtr)
//     {
//         nnzC = *nnzTotalDevHostPtr;
//     }
//     else
//     {
//         cudaMemcpy(&nnzC,  C.d_row_ptr + m, sizeof(int), cudaMemcpyDeviceToHost);
//         cudaMemcpy(&baseC, C.d_row_ptr,     sizeof(int), cudaMemcpyDeviceToHost);
//         nnzC -= baseC;
//     }

//     printf("nnzC = %d\n",nnzC);

//     cudaMalloc((void **)&C.d_col_idx, nnzC * sizeof(int));
//     cudaMalloc((void **)&C.d_val,     nnzC * sizeof(double));
//     //Step 2: computer 
//     cusparseDcsrgemm(cusparsehandle, transA, transB, m, n, k,
//         descrA, nnzA, A->d_val, A->d_row_ptr, A->d_col_idx,
//         descrB, nnzB, B->d_val, B->d_row_ptr, B->d_col_idx,
//         descrC,       C.d_val, C.d_row_ptr, C.d_col_idx);
        
//     C.num_rows = m;
//     C.num_cols = n;
//     C.num_nnzs = nnzC;
    
//     return C;
// }




template<typename T>
void exclusive_scan(T *input, int length)
{
    if(length == 0 || length == 1)
        return;

    T old_val, new_val;

    old_val = input[0];
    input[0] = 0;
    for (int i = 1; i < length; i++)
    {
        new_val = input[i];
        input[i] = old_val + input[i-1];
        old_val = new_val;
    }
}


int spgemm_hash_s( const int           *d_csrRowPtrA,
    const int           *d_csrColIdxA,
    const double    *d_csrValA,
    const int            mA,
    const int            nA,
    const int            nnzA,
    const int           *d_csrRowPtrB,
    const int           *d_csrColIdxB,
    const double    *d_csrValB,
    const int            mB,
    const int            nB,
    const int            nnzB,
          int           *d_csrRowPtrC,
          int           *d_csrColIdxC,
          double    *d_csrValC,
    const int            mC,
    const int            nC,
          int           nnzC,
    const int           nthreads)
{
        
    if (nA != mB)
    {
    printf("Cannot multiply matrix A of size %i x %i and matrix B of size %i x %i, return.\n",
    mA, nA, mB, nB);
    return -1;
    }
    //int nthreads = omp_get_max_thread();

    int hashsize_full_reg=0;
    for (int blki=0;blki <mA;blki++)
    {
    int max=0;
    for(int l= d_csrRowPtrA[blki];l < d_csrRowPtrA[blki+1]; l++)
    {
    int cola= d_csrColIdxA[l];
    max += d_csrRowPtrB[cola+1]-d_csrRowPtrB[cola];
    }
    if (max>hashsize_full_reg)
    hashsize_full_reg=max;
    }
    int *tmpIdx2D0_g = (int *)malloc(nthreads * hashsize_full_reg* sizeof(int));  //index 
    // for (int l = 0; l < nthreads * hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
    //     tmpIdx2D0_g[l] = -1;  //0x80000000=-1 ,

    #pragma omp parallel for  
    for (int blki =0;blki < mA; blki ++)
    {
    int thread_id = omp_get_thread_num();


    int *tmpIdx2D0 = tmpIdx2D0_g + thread_id * hashsize_full_reg;
    for (int l = 0; l <hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
    tmpIdx2D0[l] = -1;  //0x80000000=-1 

    for (int blkj =d_csrRowPtrA[blki];blkj < d_csrRowPtrA[blki + 1];blkj ++)
    {
    int col=d_csrColIdxA[blkj];
    for(int l = d_csrRowPtrB[col];l < d_csrRowPtrB[col+1];l++)
    {
    const int key = d_csrColIdxB[l];
    int hashadr = (key) % hashsize_full_reg;
    while (1)
    {
        const int keyexist = tmpIdx2D0[hashadr]; //tmpIdx2Dthread[hashadr];
        if (keyexist == key)
        {
        //    tmpVal2D0[hashadr] +=b[l]*a[i] ;
            break;
        }
        else if (keyexist == -1)
        {
            tmpIdx2D0[hashadr] = key;
        //    tmpVal2D0[hashadr] = b[l]*a[i];
            //hashsize_real_local[j]++;
            d_csrRowPtrC[blki]++;
            break;
        }
        else
        {
            hashadr = (hashadr + 1) % hashsize_full_reg;
            // in step 1, it is not possible to overflow, since the assigned space is upper bound
        }
    }

    }
    }
    for (int l = 0; l < hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
    tmpIdx2D0[l] = -1;  //0x80000000=-1 ,pos is the starting position of the thread 
    }
    exclusive_scan(d_csrRowPtrC, mC +1);
    nnzC = d_csrRowPtrC[mC];
    //printf("nnzC = %d \n",nnzC);


    return nnzC;
}

template<typename T>
void swap(T *a , T *b)
{
    T tmp = *a;
    *a = *b;
    *b = tmp;
}

// quick sort key-value pair (child function)
template<typename iT, typename vT>
int partition(iT *key, vT *val, int length, int pivot_index)
{
    int i  = 0 ;
    int small_length = pivot_index;

    iT pivot = key[pivot_index];
    swap<iT>(&key[pivot_index], &key[pivot_index + (length - 1)]);
    swap<vT>(&val[pivot_index], &val[pivot_index + (length - 1)]);

    for(; i < length; i++)
    {
        if(key[pivot_index+i] < pivot)
        {
            swap<iT>(&key[pivot_index+i],  &key[small_length]);
            swap<vT>(&val[pivot_index+i],&val[small_length]);
            small_length++;
        }
    }

    swap<iT>(&key[pivot_index + length - 1],  &key[small_length]);
    swap<vT>(&val[pivot_index + length - 1],&val[small_length]);

    return small_length;
}

template<typename iT, typename vT>
void quick_sort_key_val_pair(iT *key, vT *val, int length)
{
    if(length == 0 || length == 1)
        return;

    int small_length = partition<iT, vT>(key, val, length, 0) ;
    quick_sort_key_val_pair<iT, vT>(key, val, small_length);
    quick_sort_key_val_pair<iT, vT>(&key[small_length + 1], &val[small_length + 1], length - small_length - 1);
}

void spgemm_hash_n( const int           *d_csrRowPtrA,
    const int           *d_csrColIdxA,
    const double    *d_csrValA,
    const int            mA,
    const int            nA,
    const int            nnzA,
    const int           *d_csrRowPtrB,
    const int           *d_csrColIdxB,
    const double    *d_csrValB,
    const int            mB,
    const int            nB,
    const int            nnzB,
          int           *d_csrRowPtrC,
          int           *d_csrColIdxC,
          double    *d_csrValC,
    const int            mC,
    const int            nC,
          int           nnzC,
    const int           nthreads)
{
// printf("nthread= %d \n",nthreads);
if (nA != mB)
{
printf("Cannot multiply matrix A of size %i x %i and matrix B of size %i x %i, return.\n",
  mA, nA, mB, nB);
}
//int nthreads = omp_get_max_thread();

int hashsize_full_reg=0;
for (int blki=0;blki <mA;blki++)
{
int max=0;
for(int l= d_csrRowPtrA[blki];l < d_csrRowPtrA[blki+1]; l++)
{
int cola= d_csrColIdxA[l];
max += d_csrRowPtrB[cola+1]-d_csrRowPtrB[cola];
}
if (max>hashsize_full_reg)
hashsize_full_reg=max;
}
int *tmpIdx2D0_g = (int *)malloc(nthreads * hashsize_full_reg* sizeof(int));  //index 
// for (int l = 0; l < nthreads * hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
//     tmpIdx2D0_g[l] = -1;  //0x80000000=-1 ,

double *tmpVal2D0_g = (double *)malloc(hashsize_full_reg * nthreads * sizeof(double));
memset(tmpVal2D0_g, 0, hashsize_full_reg * nthreads * sizeof(double));
#pragma omp parallel for  
for (int blki =0;blki < mA; blki ++)
{
int thread_id = omp_get_thread_num();

int *tmpIdx2D0 = tmpIdx2D0_g + thread_id * hashsize_full_reg;
double *tmpVal2D0 = tmpVal2D0_g  + thread_id * hashsize_full_reg;
for (int l = 0; l <hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
   tmpIdx2D0[l] = -1;  //0x80000000=-1 

for (int blkj =d_csrRowPtrA[blki];blkj < d_csrRowPtrA[blki + 1];blkj ++)
{
   int col=d_csrColIdxA[blkj];
   for(int l = d_csrRowPtrB[col];l < d_csrRowPtrB[col+1];l++)
   {
       const int key = d_csrColIdxB[l];
       int hashadr = (key*107) % hashsize_full_reg;
       while (1)
       {
           const int keyexist = tmpIdx2D0[hashadr]; //tmpIdx2Dthread[hashadr];
           if (keyexist == key)
           {
               tmpVal2D0[hashadr] += d_csrValB[l] * d_csrValA[blkj] ;
               break;
           }
           else if (keyexist == -1)
           {
               tmpIdx2D0[hashadr] = key;
               tmpVal2D0[hashadr] = d_csrValB[l] * d_csrValA[blkj];
               //hashsize_real_local[j]++;
           //    d_blkrowptrC[blki]++;
               break;
           }
           else
           {
               hashadr = (hashadr + 1) % hashsize_full_reg;
               // in step 1, it is not possible to overflow, since the assigned space is upper bound
           }
       }

   }
}
int cptr=d_csrRowPtrC[blki];
for (int k=0;k<hashsize_full_reg;k++)
{
   if (tmpIdx2D0[k]!=-1)
   {
       d_csrColIdxC[cptr]=tmpIdx2D0[k];
       d_csrValC[cptr] = tmpVal2D0[k];
       cptr++;
   }
}
// quick_sort_key_val_pair(tmpIdx2D0, tmpVal2D0, hashsize_full_reg);

for (int l = 0; l < hashsize_full_reg; l++)  //hashsize_full_reg is the hash length assigned to the thread
   tmpIdx2D0[l] = -1;  //0x80000000=-1 ,pos is the starting position of the thread 
memset(tmpVal2D0,0,hashsize_full_reg* sizeof(double));
//printf("thread_id = %d\n",thread_id);
}

free(tmpIdx2D0_g);                                                                                                                                                                                                                                                                          

free(tmpVal2D0_g);
#pragma omp parallel for  
for(int i =0; i < mC ; i ++)
{
int nnzcnt = d_csrRowPtrC[i +1] - d_csrRowPtrC[i];
quick_sort_key_val_pair(d_csrColIdxC + d_csrRowPtrC[i], d_csrValC + d_csrRowPtrC[i], nnzcnt);
}
}




SSS_MAT spgemm_hash( SSS_MAT *A, SSS_MAT *B,SSS_MAT *C)
{ 


    int n_threads=1;
    //omp_set_num_threads(n_threads);
    
    //int nnn=omp_get_num_threads();
    //printf("omp_get_num_threads= %d\n",omp_get_num_threads());
    int mA = A->num_rows;
    int nA = A->num_cols;
    int nnzA = A->num_nnzs;
    int *csrRowPtrA = A->row_ptr;
    int *csrColIdxA = A->col_idx;
    double *csrValA = A->val;

    int mB = B->num_rows;
    int nB = B->num_cols;
    int nnzB = B->num_nnzs;
    int *csrRowPtrB = B->row_ptr;
    int *csrColIdxB = B->col_idx;
    double *csrValB = B->val;

   // printf("mA = %d , nA = %d ,nnzA = %d \n",mA,nA,nnzA);
   // printf("mB = %d , nB = %d ,nnzB = %d\n",mB,nB,nnzB);
    unsigned long long int nnzCub = 0;

    int mC= mA;
    int nC= nB;
    int nnzc =0;

    int *csrRowPtrC = (int *)malloc((mA+1)*sizeof(int));
    memset(csrRowPtrC,0,(mA+1)*sizeof(int));
    int *csrColIdxC;
    double *csrValC;



 //csrRowPtrC = (int *)malloc((mC+1) * sizeof(int));
   //     memset(csrRowPtrC, 0, (mC+1) * sizeof(int));

   nnzc= spgemm_hash_s(csrRowPtrA, csrColIdxA, csrValA, mA, nA, nnzA,
                         csrRowPtrB, csrColIdxB, csrValB, mB, nB, nnzB,
                         csrRowPtrC, csrColIdxC, csrValC, mC, nC, nnzc,n_threads);

   // printf("nnzc = %d\n",nnzc);
    csrColIdxC = (int *)malloc(nnzc * sizeof(int));
    csrValC    = (double *)malloc(nnzc * sizeof(double));

    spgemm_hash_n(csrRowPtrA, csrColIdxA, csrValA, mA, nA, nnzA,
                         csrRowPtrB, csrColIdxB, csrValB, mB, nB, nnzB,
                        csrRowPtrC, csrColIdxC, csrValC, mC, nC, nnzc,n_threads);



                         C->num_rows =mC;
   C->num_cols =nC;
   C->num_nnzs =nnzc;
   C->row_ptr = csrRowPtrC;
   C->col_idx = csrColIdxC;
   C->val =  csrValC;

    return *C;
}


SSS_MAT spgemm_spa( SSS_MAT *A, SSS_MAT *B,SSS_MAT *C)
{
    int mA = A->num_rows;
    int nA = A->num_cols;
    int nnzA = A->num_nnzs;
    int *csrRowPtrA = A->row_ptr;
    int *csrColIdxA = A->col_idx;
    double *csrValA = A->val;

    int mB = B->num_rows;
    int nB = B->num_cols;
    int nnzB = B->num_nnzs;
    int *csrRowPtrB = B->row_ptr;
    int *csrColIdxB = B->col_idx;
    double *csrValB = B->val;

    // printf("mA = %d , nA = %d ,nnzA = %d \n",mA,nA,nnzA);
    // printf("mB = %d , nB = %d ,nnzB = %d\n",mB,nB,nnzB);
    unsigned long long int nnzCub = 0;

    int mC= mA;
    int nC= nB;
    int nnzc =0;
    int *csrRowPtrC = (int *)malloc((mA+1)*sizeof(int));
    memset(csrRowPtrC,0,(mA+1)*sizeof(int));
    int *csrColIdxC;
    double *csrValC;

   
    //#pragma omp parallel for
    for (int iid=0;iid<mA;iid++)
    {
        
        int *d_dense_row_column_flag = (int *)malloc((mB)*sizeof(int));
        memset(d_dense_row_column_flag,0,(mB)*sizeof(int));
        

        for(int i=csrRowPtrA[iid];i<csrRowPtrA[iid+1];i++)
        {
            int col=csrColIdxA[i];
            for(int l=csrRowPtrB[col];l<csrRowPtrB[col+1];l++)
            {
                 int key = csrColIdxB[l];
                 d_dense_row_column_flag[key]=1;                
            }
        }

        int nnzr = 0;
        for (int cid = 0; cid < nB; cid++)
        {   
            if (d_dense_row_column_flag[cid] == 1)
            {
                nnzr++;
            }
        }

        csrRowPtrC[iid] = nnzr;
        free(d_dense_row_column_flag);
    }

    
   
    exclusive_scan(csrRowPtrC, mC +1);

    nnzc=csrRowPtrC[mA];
  //  printf("nnzc = %d\n",nnzc);
               
    csrColIdxC = (int*)malloc((nnzc)*sizeof(int));
    csrValC = (double*)malloc((nnzc)*sizeof(double));




    //numerical phase

    //#pragma omp parallel for
    for (int iid=0;iid<mA;iid++)
    {
        char *d_dense_row_column_flag1 = (char *)malloc((nB)*sizeof(char));
        double *d_dense_row_value = (double *)malloc((nB)*sizeof(double));
    
        memset(d_dense_row_column_flag1, 0, (nB)*sizeof(char));
        memset(d_dense_row_value, 0, (nB)*sizeof(double));

        for(int i=csrRowPtrA[iid];i<csrRowPtrA[iid+1];i++)
        {
            int col=csrColIdxA[i];
            for(int l=csrRowPtrB[col];l<csrRowPtrB[col+1];l++)
            {
                const int key = csrColIdxB[l];
                d_dense_row_column_flag1[key]=1;
                d_dense_row_value[key]+=csrValB[l]*csrValA[i];
            }
        }
    
        int nnzr = csrRowPtrC[iid];
        for (int cid = 0; cid < nB; cid++)
        {   
            if (d_dense_row_column_flag1[cid] == 1)
            {
                csrValC[nnzr]= d_dense_row_value[cid];
                csrColIdxC[nnzr]=cid;
                nnzr++;
            }
        }
    
        free(d_dense_row_column_flag1);
        free(d_dense_row_value);

    }
    
 //   printf("mC = %d, nC = %d, nnzc = %d\n",mC,nC,nnzc);

   C->num_rows =mC;
   C->num_cols =nC;
   C->num_nnzs =nnzc;
   C->row_ptr = csrRowPtrC;
   C->col_idx = csrColIdxC;
   C->val =  csrValC;

   return *C;
}










SSS_MAT SSS_blas_spgemm_device( SSS_MAT *A,  SSS_MAT *B,SSS_MAT *C)
{
    //SpGEMM count
    SpGEMM_count++;

    cudaError_t EE;
    double               alpha       = 1.0f;
    double               beta        = 0.0f;
    cusparseOperation_t transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
    cusparseOperation_t transB = CUSPARSE_OPERATION_NON_TRANSPOSE;
    cudaDataType        computeType = CUDA_R_64F;



    
    int *C_d_row_ptr;
    int *C_d_col_idx;
    double *C_d_val;
    
    EE = cudaMalloc((void **)&C->d_row_ptr,(A->num_rows+1) * sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("ttttttttttttttttttttt \n");
    }

    // CUSPARSE APIs
    cusparseHandle_t cusparsehandle = NULL;
    //cusparseSpMatDescr_t descrA,descrB,descrC;
    void*  dBuffer1    = NULL, *dBuffer2   = NULL;
    size_t bufferSize1 = 0,    bufferSize2 = 0;
    cusparseCreate(&cusparsehandle);




    /* create and setup matrix descriptor */
    cusparseCreateCsr(&A->descrMAT, A->num_rows, A->num_cols, A->num_nnzs,
        A->d_row_ptr, A->d_col_idx, A->d_val,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;

    cusparseCreateCsr(&B->descrMAT, B->num_rows, B->num_cols, B->num_nnzs,
        B->d_row_ptr, B->d_col_idx, B->d_val,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;

    cusparseCreateCsr(&C->descrMAT, A->num_rows, B->num_cols, 0,
        NULL,NULL,NULL,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;


 
    // SpGEMM Computation
    cusparseSpGEMMDescr_t spgemmDesc;
    cusparseSpGEMM_createDescr(&spgemmDesc) ;

    // ask bufferSize1 bytes for external memory
    cusparseSpGEMM_workEstimation(cusparsehandle, transA, transB,
        &alpha, A->descrMAT, B->descrMAT, &beta, C->descrMAT,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize1, NULL) ;
    
    cudaMalloc((void**) &dBuffer1, bufferSize1) ;
    
    // inspect the matrices A and B to understand the memory requirement for
    // the next step
    cusparseSpGEMM_workEstimation(cusparsehandle, transA, transB,
        &alpha, A->descrMAT, B->descrMAT, &beta, C->descrMAT,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize1, dBuffer1);


    // ask bufferSize2 bytes for external memory
    cusparseSpGEMM_compute(cusparsehandle, transA, transB,
        &alpha,A->descrMAT, B->descrMAT, &beta, C->descrMAT,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize2, NULL);

    cudaMalloc((void**) &dBuffer2, bufferSize2);


    // compute the intermediate product of A * B
    cusparseSpGEMM_compute(cusparsehandle, transA, transB,
        &alpha,A->descrMAT, B->descrMAT, &beta, C->descrMAT,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize2, dBuffer2);
    
    // get matrix C non-zero entries C_nnz1


    int64_t C_num_rows1, C_num_cols1, C_nnz1;

    cusparseSpMatGetSize(C->descrMAT, &C_num_rows1, &C_num_cols1,
            &C_nnz1);

    // allocate matrix C
    EE = cudaMalloc((void **)&C->d_col_idx, C_nnz1* sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("rrrrrrrrrrrrrrrrrrr \n");
    }
    EE = cudaMalloc((void **)&C->d_val,     C_nnz1 * sizeof(double));
    if(EE != cudaSuccess)
    {
        printf("ttttttttttttttttttttt \n");
    }

    printf("C_nnz1 = %d\n",C_nnz1);
    // update matC with the new pointers
    C->num_rows = C_num_rows1;
    C->num_cols = C_num_cols1;
    C->num_nnzs = C_nnz1;
    cusparseCsrSetPointers(C->descrMAT, C->d_row_ptr, C->d_col_idx, C->d_val) ;


    // copy the final products to the matrix C
    cusparseSpGEMM_copy(cusparsehandle, transA, transB,
        &alpha,A->descrMAT, B->descrMAT, &beta, C->descrMAT,
        computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc);

    cusparseSpGEMM_destroyDescr(spgemmDesc);
    // cusparseDestroySpMat(descrA);
    // cusparseDestroySpMat(descrB);
    // cusparseDestroySpMat(descrC);
    cusparseDestroy(cusparsehandle);

    return *C;
}

// SSS_MAT SSS_RAP_device(const SSS_MAT *R, const SSS_MAT *A, const SSS_MAT *P)
// {
//     SSS_MAT tmp;
//     tmp = SSS_blas_spgemm_device(R,A);

//     SSS_MAT RAP;
//     RAP = SSS_blas_spgemm_device(&tmp, P);

//     return RAP;
// }


void SSS_mat_test_printf(SSS_MAT *A,int n)
{
    printf("test MAT: row = %d , col =%d ,n = %d \n",A->num_rows,A->num_cols,A->num_nnzs);

    for(int i=0;i<n;i++)
    {
        printf("test MAT(%d) :row_ptr[%d] = %d,col_idx[%d] = %d , val[%d] = %lf \n",n,i,A->row_ptr[i],i,A->col_idx[i],i,A->val[i]);
    }

}

void SSS_vec_test_printf(SSS_VEC *A,int n)
{
    printf("test VEC(%d): n = %d \n",n,A->n);

    for(int i=0;i<n;i++)
    {
        printf("val[%d] = %lf \n",i,A->d[i]);
    }

}

void SSS_mat_d2h(SSS_MAT *P)
{
    cudaError_t EE;

    EE = cudaMemcpy(P->row_ptr, P->d_row_ptr, (P->num_rows+1) * sizeof(int),cudaMemcpyDeviceToHost);
    if(EE != cudaSuccess)
    {
        printf("SSS_mat_d2h faild1 \n");
    }    
    
    EE = cudaMemcpy(P->col_idx, P->d_col_idx, (P->num_nnzs) * sizeof(int),cudaMemcpyDeviceToHost);
    if(EE != cudaSuccess)
    {
        printf("SSS_mat_d2h faild2 \n");
    }

    EE = cudaMemcpy(P->val, P->d_val, (P->num_nnzs) * sizeof(double),cudaMemcpyDeviceToHost);
    if(EE != cudaSuccess)
    {
        printf("SSS_mat_d2h faild3 \n");
    }
}

void SSS_vec_d2h(SSS_VEC *A)
{
    cudaError_t EE;

    EE = cudaMemcpy(A->d, A->d_d, (A->n) * sizeof(double),cudaMemcpyDeviceToHost);
    if(EE != cudaSuccess)
    {
        printf("SSS_vec_d2h faild3 \n");
    }
}



SSS_MAT SSS_blas_mat_rap_device(const SSS_MAT *R, const SSS_MAT *A, const SSS_MAT *P)
{
    cudaError_t EE;

    //SSS_MAT tmp;
    
    int *tmp_d_row_ptr;
    int *tmp_d_col_idx;
    double *tmp_d_val;

    EE = cudaMalloc((void **)&tmp_d_row_ptr, (R->num_rows+1) * sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("rrrrrrrrrrrrrrrrrrr \n");
    }


    int nnztmp;
    int basetmp;
    
    SSS_MAT RAP;


    int nnzRAP;
    int baseRAP;
    
    //handle
    cusparseHandle_t cusparsehandle=NULL;
    cusparseStatus_t cusparse_status ;
    cusparseSpMatDescr_t descrR,descrA,descrtmp,descrP,descrRAP;
    void*  dBuffer1    = NULL, *dBuffer2   = NULL;
    size_t bufferSize1 = 0,    bufferSize2 = 0;

    void*  dBuffer3    = NULL, *dBuffer4   = NULL;
    size_t bufferSize3 = 0,    bufferSize4 = 0;

    double                alpha       = 1.0f;
    double               beta        = 0.0f;
    cudaDataType        computeType = CUDA_R_64F;

    /* initialize cusparse library */
    cusparseCreate(&cusparsehandle);


    /* create and setup matrix descriptor */
    cusparseCreateCsr(&descrR, R->num_rows, R->num_cols, R->num_nnzs,
        R->d_row_ptr, R->d_col_idx, R->d_val,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;

    cusparseCreateCsr(&descrA, A->num_rows, A->num_cols, A->num_nnzs,
        A->d_row_ptr, A->d_col_idx, A->d_val,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;



    cusparseCreateCsr(&descrtmp, R->num_rows, A->num_cols, 0,
        NULL,NULL,NULL,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;


    cusparseCreateCsr(&descrP, P->num_rows, P->num_cols, P->num_nnzs,
        P->d_row_ptr, P->d_col_idx, P->d_val,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;



    cusparseCreateCsr(&descrRAP, R->num_rows, P->num_cols, 0,
        NULL,NULL,NULL,
        CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
        CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F) ;

    cusparseOperation_t transR = CUSPARSE_OPERATION_NON_TRANSPOSE;
    cusparseOperation_t transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
    cusparseOperation_t transtemp = CUSPARSE_OPERATION_NON_TRANSPOSE;
    cusparseOperation_t transP = CUSPARSE_OPERATION_NON_TRANSPOSE;

    // SpGEMM Computation
    cusparseSpGEMMDescr_t spgemmDesc;
    cusparseSpGEMM_createDescr(&spgemmDesc) ;

    // ask bufferSize1 bytes for external memory
    cusparseSpGEMM_workEstimation(cusparsehandle, transR, transA,
        &alpha, descrR, descrA, &beta, descrtmp,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize1, NULL) ;

    cudaMalloc((void**) &dBuffer1, bufferSize1) ;

    // inspect the matrices A and B to understand the memory requirement for
    // the next step
    cusparseSpGEMM_workEstimation(cusparsehandle, transR, transA,
        &alpha, descrR, descrA, &beta, descrtmp,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize1, dBuffer1) ;


    // ask bufferSize2 bytes for external memory
    cusparseSpGEMM_compute(cusparsehandle, transR, transA,
        &alpha, descrR, descrA, &beta, descrtmp,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize2, NULL);

    cudaMalloc((void**) &dBuffer2, bufferSize2);


    // compute the intermediate product of A * B
    cusparseSpGEMM_compute(cusparsehandle, transR, transA,
        &alpha, descrR, descrA, &beta, descrtmp,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize2, dBuffer2);



    // get matrix C non-zero entries C_nnz1
    int64_t tmp_num_rows1, tmp_num_cols1, tmp_nnz1;
    cusparseSpMatGetSize(descrtmp, &tmp_num_rows1, &tmp_num_cols1,
            &tmp_nnz1);

    
    printf("tmp_nnz1 = %d\n",tmp_nnz1);
    // allocate matrix C
    EE = cudaMalloc((void **)&tmp_d_col_idx, tmp_nnz1* sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("rrrrrrrrrrrrrrrrrrr \n");
    }
    EE = cudaMalloc((void **)&tmp_d_val,     tmp_nnz1 * sizeof(double));
    if(EE != cudaSuccess)
    {
        printf("ttttttttttttttttttttt \n");
    }


    cusparseCsrSetPointers(descrtmp, tmp_d_row_ptr, tmp_d_col_idx,tmp_d_val) ;

    cusparseSpGEMM_copy(cusparsehandle, transR, transA,
                            &alpha, descrR, descrA, &beta, descrtmp,
                            computeType, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc) ;


    int tmp_num_rows = R->num_rows;
    int tmp_num_cols = A->num_cols;
    int tmp_num_nnzs = nnztmp;




    //---------------RAP = tmp * P-------------------------

    EE = cudaMalloc((void **)&RAP.d_row_ptr, (R->num_rows+1) * sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("rrrrrrrrrrrrrrrrrrr \n");
    }

    // SpGEMM Computation
    //cusparseSpGEMMDescr_t spgemmDesc2;
    // cusparse_status = cusparseSpGEMM_createDescr(&spgemmDesc2) ;
    // if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    // {
    //     printf("cusparseSpGEMM_createDescr error !!!\n");
    // }

    // ask bufferSize1 bytes for external memory
    cusparse_status = cusparseSpGEMM_workEstimation(cusparsehandle, transtemp, transP,
        &alpha, descrtmp, descrP, &beta, descrRAP,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize3, NULL) ;
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseSpGEMM_workEstimation1 error !!!\n");
    }

    cudaMalloc((void**) &dBuffer3, bufferSize3) ;

    // inspect the matrices A and B to understand the memory requirement for
    // the next step
    cusparse_status = cusparseSpGEMM_workEstimation(cusparsehandle, transtemp, transP,
        &alpha, descrtmp, descrP, &beta, descrRAP,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize3, dBuffer3) ;
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseSpGEMM_workEstimation2 error !!!\n");
    }

    // ask bufferSize2 bytes for external memory
    cusparse_status = cusparseSpGEMM_compute(cusparsehandle, transtemp, transP,
        &alpha, descrtmp, descrP, &beta, descrRAP,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize4, NULL);
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseSpGEMM_compute1 error !!!\n");
    }

    cudaMalloc((void**) &dBuffer4, bufferSize4);


    // compute the intermediate product of A * B
    cusparse_status = cusparseSpGEMM_compute(cusparsehandle, transtemp, transP,
        &alpha, descrtmp, descrP, &beta, descrRAP,
        computeType, CUSPARSE_SPGEMM_DEFAULT,
        spgemmDesc, &bufferSize4, dBuffer4);
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseSpGEMM_compute2 error !!!\n");
    }



    // get matrix C non-zero entries C_nnz1
    int64_t RAP_num_rows1, RAP_num_cols1, RAP_nnz1;
    cusparse_status = cusparseSpMatGetSize(descrRAP, &RAP_num_rows1, &RAP_num_cols1,
            &RAP_nnz1);
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseSpMatGetSize error !!!\n");
    }
            


    printf("RAP_nnz1 = %d \n",RAP_nnz1);
    // allocate matrix C
    EE = cudaMalloc((void **)&RAP.d_col_idx, RAP_nnz1* sizeof(int));
    if(EE != cudaSuccess)
    {
        printf("rrrrrrrrrrrrrrrrrrr \n");
    }
    EE = cudaMalloc((void **)&RAP.d_val,     RAP_nnz1 * sizeof(double));
    if(EE != cudaSuccess)
    {
        printf("ttttttttttttttttttttt \n");
    }


    cusparse_status = cusparseCsrSetPointers(descrRAP, RAP.d_row_ptr, RAP.d_col_idx,RAP.d_val) ;
    if(cusparse_status!=CUSPARSE_STATUS_SUCCESS)
    {
        printf("cusparseCsrSetPointers error !!!\n");
    }



    RAP.num_rows = tmp_num_rows;
    RAP.num_cols = P->num_cols;
    RAP.num_nnzs = RAP_nnz1;

   
    // cudaFree(tmp_d_row_ptr);
    // cudaFree(tmp_d_row_ptr);
    // cudaFree(tmp_d_val);
    return RAP;



}