#include "../include/SSS_cycle.h"
#include <cstdlib>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cusparse.h>
#include <cusolverDn.h>
#include <driver_types.h>

#include <iostream>


#include <helper_cuda.h>

#include <nsparse.cuh>
#include <nsparse_asm.h>


using namespace std;

#ifndef VALUE_TYPE
#define VALUE_TYPE double
#endif


#define min(a,b) (a<b ? a:b)
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)

#define gridsize 65535
#define blocksize 256

//#include "../../CSR5_cuda/anonymouslib_cuda.h"
//#include </home/txt/Downloads/Benchmark_SpMV_using_CSR5-master/CSR5_cuda/anonymouslib_cuda.h>

extern double residual_time ; 
extern double restriction_time ; 
extern double prolongation_time ;
extern double direct_LU_time ; 
extern double cuda_malloc_memcpy_time ;
extern double csr2dense_time ;


__global__ void SSS_axpy(int n,double alpha,double *x,double *y)
{
    int tid =blockIdx.x*blockDim.x+ threadIdx.x;
    
    
    y[tid] +=  alpha*x[tid];
    

    
}

__global__ void SSS_alpha_y(int n,double alpha,real *x,real *y)
{
    int tid =blockIdx.x*blockDim.x+ threadIdx.x;
    
    
    y[tid] =  alpha*x[tid];
    

    
}




__global__ void csr2dense_kernel(int* d_rowPtr, int* d_colInd, double* d_val,double* A_dense, double* d_help,int m, int n,int nnz)
{
    int tid =blockIdx.x*blockDim.x+ threadIdx.x;

    int l=0;
    if (tid<nnz)
    {
        while (tid >=d_rowPtr[l])
            l++;
        d_help[tid] = l-1;
    }

    if (tid<nnz) 
    {
		int datal = d_help[tid] + d_colInd[tid] * m;
		A_dense[datal] = d_val[tid];
	}
}

void  csr2dense_device(int* d_csr_offsets, int* d_csr_columns, double* d_csr_values,double* d_dense, int m, int n,int ld)
{
    // CUSPARSE APIs
    cusparseHandle_t     handle = NULL;
    cusparseMatDescr_t  descrA = 0;
    cusparseCreate(&handle) ;
    cusparseCreateMatDescr(&descrA);         
    cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
    cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);

    // Create sparse matrix A in CSR format
    cusparseDcsr2dense(handle,m,n,descrA,d_csr_values,d_csr_offsets,d_csr_columns,d_dense,ld);
    cusparseDestroy(handle);
    cusparseDestroyMatDescr(descrA);
   

}

    /*   
        SSS_amg_coarest_solve_device():
        
        step1 : csr2dense
        step2 : cusolver_LU
    */
void SSS_amg_coarest_solve_device(SSS_MAT *A, SSS_VEC *b, SSS_VEC *x, const double ctol)
{
    /* 
        step_1 : csr2dense
    */
    int ld = A->num_cols;
    int dense_size = A->num_rows * A->num_cols;
    double *A_dense ,*A_help;

    double cuda_api1 = SSS_get_time();
    cudaMalloc((void**) &A_dense,dense_size * sizeof(double));
    cudaMalloc((void**) &A_help,dense_size * sizeof(double));

    double cuda_api2 = SSS_get_time();
    cuda_malloc_memcpy_time+=cuda_api2 - cuda_api1;

    double c2d1 = SSS_get_time();

    //此时A_dense是最粗层的稠密矩阵
    csr2dense_kernel<<<512,256>>>(A->d_row_ptr, A->d_col_idx, A->d_val,A_dense,A_help,A->num_rows, A->num_cols, A->num_nnzs);
    //csr2dense_device(A->d_row_ptr, A->d_col_idx, A->d_val, A_dense, A->num_rows, A->num_cols, ld);
    double c2d2 = SSS_get_time();
    csr2dense_time += c2d2 - c2d1;
    /* 
        step_2 : cusolver_lu
    */
    cusolverDnHandle_t cusolverH = NULL;
    cudaStream_t stream = NULL;
   
   //----------------LU-----------------//
    /* step 1: create cusolver handle, bind a stream */
    cusolverDnCreate(&cusolverH);
    
    cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
    
    cusolverDnSetStream(cusolverH, stream);
    

    //A: A_dense
    //B: b->d
    //x: x->d

    int m = A->num_rows;
    int lda = A->num_rows;
    int ldb = A->num_cols;
    double *d_LU;
    double cuda_api3 = SSS_get_time();
    cudaMalloc((void**) &d_LU,dense_size * sizeof(double));
    double cuda_api4 = SSS_get_time();
    cuda_malloc_memcpy_time+=cuda_api4 - cuda_api3;
 
 
    int info = 0;     /* host copy of error info */
    int *d_info = NULL; /* error info */
    int  lwork = 0;     /* size of workspace */
    double *d_work = NULL; /* device workspace for getrf */
    
    
    /* step 2: copy A to device */
    double cuda_api5 = SSS_get_time();
    cudaMalloc ((void**)&d_info, sizeof(int));
    double cuda_api6 = SSS_get_time();
    cuda_malloc_memcpy_time+=cuda_api6 - cuda_api5;


    
    /* step 3: query working space of getrf */
    cusolverDnDgetrf_bufferSize(
        cusolverH,
        m,
        m,
        A_dense,
        lda,
        &lwork);
    
    double cuda_api7 = SSS_get_time();
    cudaMalloc((void**)&d_work, sizeof(double)*lwork);
    double cuda_api8 = SSS_get_time();
    cuda_malloc_memcpy_time+=cuda_api8 - cuda_api7;


    /* step 4: LU factorization */

    cusolverDnDgetrf(
        cusolverH,
        m,
        m,
        A_dense,
        lda,
        d_work,
        NULL,
        d_info);
    /*
    * step 5: solve A*X = B 
    */
    cusolverDnDgetrs(
        cusolverH,
        CUBLAS_OP_N,
        m,
        1, /* nrhs */
        A_dense,
        lda,
        NULL,
        b->d_d,
        ldb,
        d_info);

    cudaDeviceSynchronize();
    
    //将计算结果b->d_d传回x->d_d
    double cuda_api9 = SSS_get_time();
    cudaMemcpy(x->d_d , b->d_d, sizeof(double)*m, cudaMemcpyDeviceToDevice);
    double cuda_api10 = SSS_get_time();
    cuda_malloc_memcpy_time+=cuda_api10 - cuda_api9;
    
    if (d_info ) cudaFree(d_info);
    if (d_work ) cudaFree(d_work);
    if (cusolverH   ) cusolverDnDestroy(cusolverH);
    if (stream      ) cudaStreamDestroy(stream);

}




void scr2sfCSR(sfCSR *csr_mat,SSS_MAT *mat)
{
        csr_mat->M = mat->num_rows;
        csr_mat->N = mat->num_cols;
        csr_mat->nnz = mat->num_nnzs;

        checkCudaErrors(cudaMalloc((void **)&(csr_mat->d_rpt), sizeof(int) * (csr_mat->M + 1)));
        checkCudaErrors(cudaMalloc((void **)&(csr_mat->d_col), sizeof(int) * csr_mat->nnz));
        checkCudaErrors(cudaMalloc((void **)&(csr_mat->d_val), sizeof(real) * csr_mat->nnz));
    
        checkCudaErrors(cudaMemcpy(csr_mat->d_rpt, mat->d_row_ptr, sizeof(int) * (csr_mat->M + 1), cudaMemcpyDeviceToDevice));
        checkCudaErrors(cudaMemcpy(csr_mat->d_col, mat->d_col_idx, sizeof(int) * csr_mat->nnz, cudaMemcpyDeviceToDevice));
        checkCudaErrors(cudaMemcpy(csr_mat->d_val, mat->d_val, sizeof(real) * csr_mat->nnz, cudaMemcpyDeviceToDevice));
}


void spmv_amb(sfCSR *csr_mat, real *x, real *y, sfPlan *plan)
{
    real *d_x, *d_y;
    sfAMB mat;


    checkCudaErrors(cudaMalloc((void **)&d_x, sizeof(real) * (csr_mat->N + MAX_BLOCK_SIZE)));
    checkCudaErrors(cudaMalloc((void **)&d_y, sizeof(real) * (csr_mat->M + WARP)));
    checkCudaErrors(cudaMemcpy(d_x, x, sizeof(real) * csr_mat->N, cudaMemcpyDeviceToDevice));
  

    sf_csr2amb(&mat, csr_mat, d_x, plan);
  
  
    /* Execution of SpMV on Device */
    sf_spmv_amb(d_y, &mat, d_x, plan);
    cudaDeviceSynchronize();

    checkCudaErrors(cudaMemcpy(y, d_y, sizeof(real) * csr_mat->M, cudaMemcpyDeviceToDevice));


  
    cudaDeviceSynchronize();

    /*7th step. Release*/
    cudaFree(d_x);
    cudaFree(d_y);
    release_amb(mat);
    //release_csr(*csr_mat);
  
}

void alpha_spmv_amb(double alpha,sfCSR *csr_mat, real *x, real *y, sfPlan *plan)
{
    real *d_x, *d_y;
    sfAMB mat;


    checkCudaErrors(cudaMalloc((void **)&d_x, sizeof(real) * (csr_mat->N + MAX_BLOCK_SIZE)));
    checkCudaErrors(cudaMalloc((void **)&d_y, sizeof(real) * (csr_mat->M + WARP)));
    checkCudaErrors(cudaMemcpy(d_x, x, sizeof(real) * csr_mat->N, cudaMemcpyDeviceToDevice));
  

    sf_csr2amb(&mat, csr_mat, d_x, plan);
  
  
    /* Execution of SpMV on Device */
    sf_spmv_amb(d_y, &mat, d_x, plan);
    cudaDeviceSynchronize();

    checkCudaErrors(cudaMemcpy(y, d_y, sizeof(real) * csr_mat->M, cudaMemcpyDeviceToDevice));


    int grid_size = ceil ((double)csr_mat->M / (double)blocksize);
    SSS_alpha_y<<<grid_size,blocksize>>>(csr_mat->M,alpha,y,y);
  
    cudaDeviceSynchronize();

    /*7th step. Release*/
    cudaFree(d_x);
    cudaFree(d_y);
    release_amb(mat);
    //release_csr(*csr_mat);
  
}


void SSS_amg_cycle_cuda(SSS_AMG *mg)
{
    cudaError_t EE;   

    //cusparse 
    cusparseHandle_t cusparseHandle = NULL;
    cusparseCreate(&cusparseHandle);
    
    // cusparseSpMatDescr_t matA;
    // cusparseSpMatDescr_t matR;
    // cusparseSpMatDescr_t matP;

    cusparseDnVecDescr_t vecX, vecY;
    void*                dBuffer    = NULL;
    size_t               bufferSize = 0;

    cusparseStatus_t cusparse_stat;
    cusparse_stat  = cusparseCreate(&cusparseHandle);
    
    //cublas
    cublasHandle_t cublas_handle;  // 申明句柄
    cublasStatus_t cublas_status;
    cublasCreate_v2(&cublas_handle); // 创建句柄    

    double spmv_time = 0;
    int cycle_type = mg->pars.cycle_type;
    int nl = mg->num_levels;
    double tol = mg->pars.ctol;

    double alpha = 1.0;
    double n_alpha = -1.0;
    double beta = 0.0;
    double beta_1 = 1.0;
    int num_lvl[max_AMG_LVL] = {0}, l = 0;

    if (tol > mg->pars.tol)  tol = mg->pars.tol * 0.1;
    if (cycle_type <= 0) cycle_type = 1;// V-cycle


    //select spmv 
    int method = 0;   //0 = cusparse, 1 = csr5, 2 = nsparse


    sfPlan plan;
    init_plan(&plan);
    

    ForwardSweep:

    while (l < nl - 1) 
    {
        SSS_SMTR s;
        num_lvl[l]++;
        //nsparse init 
        
        // pre-smoothing
        s.smoother = mg->pars.smoother;
        s.A = &mg->cg[l].A;
        s.b = &mg->cg[l].b;
        s.x = &mg->cg[l].x;
        s.nsweeps = mg->pars.pre_iter;
        s.istart = 0;
        s.iend = mg->cg[l].A.num_rows - 1;
        s.istep = 1;

        //s.nsweeps = 3;

        SSS_amg_smoother_pre_cuda(&s);

        double cuda_api11 = SSS_get_time();
        
        EE = cudaMemcpy(mg->cg[l].wp.d_d,mg->cg[l].b.d_d, mg->cg[l].A.num_rows * sizeof(double), cudaMemcpyDeviceToDevice);
        cudaDeviceSynchronize();
        double cuda_api12 = SSS_get_time();
        cuda_malloc_memcpy_time += cuda_api12 - cuda_api11;

        //form residual r = b - A x
        double residual_1 = SSS_get_time();
        
        //用cusparse的SpMV先算 Ax
        // //Create sparse matrix A in CSR format
        if(method ==0)
        {
            // cusparseCreateCsr(&mg->cg[l].A.descrMAT, mg->cg[l].A.num_rows, mg->cg[l].A.num_cols, mg->cg[l].A.num_nnzs,
            //                 mg->cg[l].A.d_row_ptr, mg->cg[l].A.d_col_idx, mg->cg[l].A.d_val,
            //                 CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
            //                 CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
            
            // // Create dense vector X
            cusparseCreateDnVec(&vecX, mg->cg[l].A.num_cols, mg->cg[l].x.d_d, CUDA_R_64F);
            
            // // Create dense vector y
            cusparseCreateDnVec(&vecY, mg->cg[l].A.num_rows, mg->cg[l].wp.d_d, CUDA_R_64F);
            
            // // allocate an external buffer if needed
            cusparseSpMV_bufferSize(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                            &n_alpha, mg->cg[l].A.descrMAT, vecX, &beta_1, vecY, CUDA_R_64F,
                            CUSPARSE_MV_ALG_DEFAULT, &bufferSize);
            
            cudaMalloc(&dBuffer, bufferSize);
            
            // // execute SpMV
            cusparse_stat = cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                        &n_alpha, mg->cg[l].A.descrMAT, vecX, &beta_1, vecY, CUDA_R_64F,
                        CUSPARSE_MV_ALG_DEFAULT, dBuffer);
        }
        


        //nsparse spmv
        //init sfCSR
        else if (method ==2)
        {
            sfCSR mat_A;
            scr2sfCSR(&mat_A,&mg->cg[l].A);
            double *dtmp;
            
            cudaMalloc((void **)&dtmp,(mg->cg[l].A.num_rows) * sizeof(double));
            // //execute SpMV
            spmv_amb(&mat_A, mg->cg[l].x.d_d, dtmp, &plan); 
            // // r = -Ax + b
            cublas_status = cublasDaxpy_v2(cublas_handle, mg->cg[l].A.num_rows, &n_alpha, dtmp, 1, mg->cg[l].wp.d_d, 1); //实现向量+
            //spmv_cuda(mg->cg[l].R.num_rows, mg->cg[l].R.d_row_ptr, mg->cg[l].R.d_col_idx,
            // mg->cg[l].R.d_val,mg->cg[l].wp.d_d,mg->cg[l+1].b.d_d);
            cudaDeviceSynchronize();
            release_csr(mat_A);
        }
        SpMV_count++;

        //alpha_spmv_cuda(-1.0, mg->cg[l].A.num_rows, mg->cg[l].A.d_row_ptr, mg->cg[l].A.d_col_idx, mg->cg[l].A.d_val,mg->cg[l].x.d_d,mg->cg[l].wp.d_d);
        //cuda10.2 spmv
        // cusparse_stat = cusparseDcsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
        //     mg->cg[l].A.num_rows,mg->cg[l].A.num_cols,mg->cg[l].A.num_nnzs,&n_alpha,descr,
        //     mg->cg[l].A.d_val,mg->cg[l].A.d_row_ptr,mg->cg[l].A.d_col_idx,mg->cg[l].x.d_d,&beta,mg->cg[l].wp.d_d);
        if(cusparse_stat!=CUSPARSE_STATUS_SUCCESS)
        {
            printf("cusparseDcsrmv !!!\n");
        }

        //再用cublas的axpy算 b - Ax  (y = alpha * x + y)
        // cudaDeviceSynchronize();

        // int grid_size = ceil ((double)mg->cg[l].A.num_rows / (double)blocksize);
        // SSS_axpy<<<grid_size,blocksize>>>(mg->cg[l].A.num_rows,alpha,mg->cg[l].b.d_d,mg->cg[l].wp.d_d);
        // cudaDeviceSynchronize();

        // cublas_status = cublasDaxpy_v2(cublas_handle, mg->cg[l].A.num_rows, &alpha, mg->cg[l].b.d_d, 1, mg->cg[l].wp.d_d, 1); //实现向量+
        // cublas_status = cublasDaxpy_v2(cublas_handle, mg->cg[l].A.num_rows, &alpha, mg->cg[l].wp.d_d, 1, mg->cg[l].b.d_d, 1); //实现向量+
        // if(cublas_status !=CUBLAS_STATUS_SUCCESS)
        // {
        //     printf("cublasDaxpy_v2 !!!\n");
        // }



        // alpha_spmv_cuda(-1.0, mg->cg[l].A.num_rows, mg->cg[l].A.d_row_ptr, mg->cg[l].A.d_col_idx, mg->cg[l].A.d_val,mg->cg[l].x.d_d,mg->cg[l].wp.d_d);
        // cudaDeviceSynchronize();
        double residual_2 = SSS_get_time();
        residual_time += residual_2 - residual_1;
        
        // restriction r1 = R*r0
        double restriction_1 = SSS_get_time();
        // (1)用cusparse的SpMV
        // // Create sparse matrix R in CSR format
        if(method ==0)
        {
            cusparseCreateCsr(&mg->cg[l].R.descrMAT, mg->cg[l].R.num_rows, mg->cg[l].R.num_cols, mg->cg[l].R.num_nnzs,
                            mg->cg[l].R.d_row_ptr, mg->cg[l].R.d_col_idx, mg->cg[l].R.d_val,
                            CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
                            CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
            // // Create dense vector X
            cusparseCreateDnVec(&vecX, mg->cg[l].R.num_cols, mg->cg[l].wp.d_d, CUDA_R_64F);
            // // Create dense vector y
            cusparseCreateDnVec(&vecY, mg->cg[l].R.num_rows, mg->cg[l+1].b.d_d, CUDA_R_64F);
            // // allocate an external buffer if needed
            cusparseSpMV_bufferSize(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                            &alpha, mg->cg[l].R.descrMAT, vecX, &beta, vecY, CUDA_R_64F,
                            CUSPARSE_MV_ALG_DEFAULT, &bufferSize);
            cudaMalloc(&dBuffer, bufferSize);
            // // execute SpMV
            cusparse_stat = cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                        &alpha, mg->cg[l].R.descrMAT, vecX, &beta, vecY, CUDA_R_64F,
                        CUSPARSE_MV_ALG_DEFAULT, dBuffer);
        }
        else if (method ==2 )
        {
            // //(2)用nsparse的SpMV
            //init sfCSR mat_R
            sfCSR mat_R;
            scr2sfCSR(&mat_R,&mg->cg[l].R);
            cudaDeviceSynchronize();
            // //execute SpMV
            spmv_amb(&mat_R, mg->cg[l].wp.d_d, mg->cg[l+1].b.d_d, &plan); 
            cudaDeviceSynchronize();
            // spmv_amb(&mat_R, mg->cg[l].b.d_d, mg->cg[l+1].b.d_d, &plan); 
            release_csr(mat_R);
        }
        // (3)自己的SpMV kernel
        //spmv_cuda(mg->cg[l].R.num_rows, mg->cg[l].R.d_row_ptr, mg->cg[l].R.d_col_idx,mg->cg[l].R.d_val,mg->cg[l].wp.d_d,mg->cg[l+1].b.d_d);

        SpMV_count++;

        
        // cuda 10.2
        // cusparse_stat = cusparseDcsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
        //     mg->cg[l].R.num_rows,mg->cg[l].R.num_cols,mg->cg[l].R.num_nnzs,&alpha,descr,
        //     mg->cg[l].R.d_val,mg->cg[l].R.d_row_ptr,mg->cg[l].R.d_col_idx,mg->cg[l].wp.d_d,&beta,mg->cg[l+1].b.d_d);

        if(cusparse_stat!=CUSPARSE_STATUS_SUCCESS)
        {
            printf("restriction !!!\n");
        }
        //printf("(restriction)nnzr = %d\n",mg->cg[l].R.num_nnzs / mg->cg[l].R.num_rows);
        //spmv_cuda(mg->cg[l].R.num_rows, mg->cg[l].R.d_row_ptr, mg->cg[l].R.d_col_idx, mg->cg[l].R.d_val,mg->cg[l].wp.d_d,mg->cg[l+1].b.d_d);
        cudaDeviceSynchronize();
        double restriction_2 = SSS_get_time();
        restriction_time += restriction_2 - restriction_1;

        // prepare for the next level
        l++;
        //好像可以cudamemset
        SSS_device_array_zero(mg->cg[l].x.n,mg->cg[l].x.d_d);
    }

    // call the coarse space solver:

    double direct_LU_time1 = SSS_get_time();
    SSS_amg_coarest_solve_device(&mg->cg[nl - 1].A, &mg->cg[nl - 1].b, &mg->cg[nl - 1].x, tol);
    //jacobi_device1(&mg->cg[nl - 1].x, i_1, i_end, step, &mg->cg[nl - 1].A, &mg->cg[nl - 1].b, L);   

    double direct_LU_time2 = SSS_get_time();
    direct_LU_time += direct_LU_time2 - direct_LU_time1;
    
    while (l > 0) 
    {
        SSS_SMTR s;
        l--;
        // prolongation u = u + alpha*P*e1 
        double prolongation1 = SSS_get_time();

        //用cusparse的SpMV
        
        //Create sparse matrix A in CSR format
        //printf("post\n");
        if(method == 0 )
        {
            cusparseCreateCsr(&mg->cg[l].P.descrMAT, mg->cg[l].P.num_rows, mg->cg[l].P.num_cols, mg->cg[l].P.num_nnzs,
                            mg->cg[l].P.d_row_ptr, mg->cg[l].P.d_col_idx, mg->cg[l].P.d_val,
                            CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
                            CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
            
            // Create dense vector X
            cusparseCreateDnVec(&vecX, mg->cg[l+1].x.n, mg->cg[l+1].x.d_d, CUDA_R_64F);
            // Create dense vector y            
            cusparseCreateDnVec(&vecY, mg->cg[l].x.n, mg->cg[l].x.d_d, CUDA_R_64F);
            // allocate an external buffer if needed
            cusparseSpMV_bufferSize(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                            &alpha, mg->cg[l].P.descrMAT, vecX, &beta_1, vecY, CUDA_R_64F,
                            CUSPARSE_MV_ALG_DEFAULT, &bufferSize);
            cudaMalloc(&dBuffer, bufferSize);
            // execute SpMV
            cusparse_stat = cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                        &alpha, mg->cg[l].P.descrMAT, vecX, &beta_1, vecY, CUDA_R_64F,
                        CUSPARSE_MV_ALG_DEFAULT, dBuffer);
        }
        else if (method == 2)
        {
            //init sfCSR
            sfCSR mat_P;
            scr2sfCSR(&mat_P,&mg->cg[l].P);
            cudaDeviceSynchronize();
            double *dtmp11;
            cudaMalloc((void **)&dtmp11,(mg->cg[l].P.num_rows) * sizeof(double));
            // //execute SpMV
            spmv_amb(&mat_P, mg->cg[l+1].x.d_d, dtmp11, &plan); 
            cudaDeviceSynchronize();
            cublas_status = cublasDaxpy_v2(cublas_handle, mg->cg[l].x.n, &alpha, dtmp11, 1, mg->cg[l].x.d_d, 1); //实现向量+
            cudaDeviceSynchronize();
            release_csr(mat_P);
            cudaDeviceSynchronize();
        }
       
        SpMV_count++;


        // spmv_cuda(mg->cg[l].P.num_rows, mg->cg[l].P.d_row_ptr, mg->cg[l].P.d_col_idx, mg->cg[l].P.d_val,mg->cg[l+1].x.d_d,mg->cg[l+1].b.d_d);


        // cusparse_stat = cusparseDcsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
        //     mg->cg[l].P.num_rows,mg->cg[l].P.num_cols,mg->cg[l].P.num_nnzs,&alpha,descr,
        //     mg->cg[l].P.d_val,mg->cg[l].P.d_row_ptr,mg->cg[l].P.d_col_idx,  mg->cg[l+1].x.d_d ,&beta,mg->cg[l].wp.d_d);
        //printf("post end \n");
        if(cusparse_stat!=CUSPARSE_STATUS_SUCCESS)
        {
            printf("cusparseDcsrmv !!!\n");
        }


        //  cublas_status = cublasDaxpy_v2(cublas_handle, mg->cg[l].x.n, &alpha, mg->cg[l].wp.d_d, 1, mg->cg[l].x.d_d, 1); //实现向量+
        
        // if(cublas_status !=CUBLAS_STATUS_SUCCESS)
        // {
        //     printf("cublasDaxpy_v2 !!!\n");
        // }
        
        //alpha_spmv_cuda(alpha, mg->cg[l].P.num_rows, mg->cg[l].P.d_row_ptr, mg->cg[l].P.d_col_idx, mg->cg[l].P.d_val,mg->cg[l+1].x.d_d,mg->cg[l].x.d_d);
        

        cudaDeviceSynchronize();
        double prolongation2 = SSS_get_time();
        prolongation_time += prolongation2 - prolongation1;

        // post-smoothing
        s.smoother = mg->pars.smoother;
        s.A = &mg->cg[l].A;
        s.b = &mg->cg[l].b;
        s.x = &mg->cg[l].x;
        s.nsweeps = mg->pars.post_iter;
        s.istart = 0;
        s.iend = mg->cg[l].A.num_rows - 1;
        s.istep = -1;

        SSS_amg_smoother_post_cuda(&s);

        if (num_lvl[l] < cycle_type)
            break;
        else
            num_lvl[l] = 0;
    }

    // cusparseDestroySpMat(matA);   
    // cusparseDestroySpMat(matR);   
    // cusparseDestroySpMat(matP);   

    cusparseDestroyDnVec(vecX);
    cusparseDestroyDnVec(vecY);
    cusparseDestroy(cusparseHandle);



    if (l > 0) goto ForwardSweep;
}

