#include "gpu_solver_types.cuh"
#include "gpu_GMRES.cuh"

extern float * dc_f32;
extern double* dc_f64;

/*
__global__ static
void device_inherit_ipiv(const int* last_ip, const int last_nrows, int* this_ip, const int this_nrows) {
    uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;
    int hf = this_nrows >> 1;
    int last_hf = last_nrows >> 1;
    if (tid < this_nrows) {
        if  (tid < last_hf) this_ip[tid] = last_ip[tid];
        else if (tid >= hf) this_ip[tid] = last_ip[tid - hf + last_hf];
    }
}*/


template<typename data_t>
static void block_trsv(cublasHandle_t handle, int nrows, const data_t* M, data_t* x) {
    assert(sizeof(data_t) == 4);
    // L*U*x = b
    CUBLAS_CHECK(cublasStrsv(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, nrows, M, nrows, x, 1));// U*x =        L^{-1}*b
    CUBLAS_CHECK(cublasStrsv(handle, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, nrows, M, nrows, x, 1));//   x = U^{-1}*L^{-1}*b
}

template<typename data_t, typename calc_t>
static void apply_LU(cublasHandle_t handle, int nrows, const data_t* M, const calc_t* b, calc_t* x, data_t* buf) {
    assert(sizeof(data_t) == 4);
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec<<<grdSize, blkSize_vec>>>(nrows, b, buf);// copy_vec(nrows, b, buf);
        sol = buf;
    }
    else {
        cublasScopy(handle, nrows, b, 1, x, 1);// cblas_scopy(nrows, b, 1, x, 1);
        sol = x;
    }

    block_trsv(handle, nrows, M, sol);

    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec<<<grdSize, blkSize_vec>>>(nrows, sol, x);// copy_vec(nrows, sol, x);
    }
}

template<typename data_t, typename calc_t>
static void apply_block_LU(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    int hf = nrows >> 1;// half
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }
    // dim3 grdSize_scalar(1,1,1), blkSize_scalar(1,1,1); device_scalar_print<<<grdSize_scalar, blkSize_scalar>>>(sol, hf, 1);
    block_trsv(handle, hf, M_both, sol);
    // device_scalar_print<<<grdSize_scalar, blkSize_scalar>>>(sol, hf, 1);
    block_trsv(handle, hf, M_both + hf * hf, sol + hf);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

static cusolverDnHandle_t* cusolver_handle = nullptr;
static int* ipiv = nullptr;
template<typename data_t, typename calc_t>
static void apply_block_LU_pivot(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    int hf = nrows >> 1;// half
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }
    // dim3 grdSize_scalar(1,1,1), blkSize_scalar(1,1,1); device_scalar_print<<<grdSize_scalar, blkSize_scalar>>>(sol, hf, 1);
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both        , hf, ipiv     , sol     , hf, device_info()));// block_trsv(handle, hf, M_both, sol);
    // device_scalar_print<<<grdSize_scalar, blkSize_scalar>>>(sol, hf, 1);
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both + hf*hf, hf, ipiv + hf, sol + hf, hf, device_info()));// block_trsv(handle, hf, M_both + hf * hf, sol + hf);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

static float* A10 = nullptr;
template<typename data_t, typename calc_t>
static void apply_block_LU_1C(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    assert(A10);
    int hf = nrows >> 1;
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }

    block_trsv(handle, hf, M_both, sol);
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A10, nrows, sol, 1, &dc_f32[1], sol + hf, 1));
    block_trsv(handle, hf, M_both + hf * hf, sol + hf);

    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

template<typename data_t, typename calc_t>
static void apply_block_LU_1C_pivot(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    assert(A10);
    int hf = nrows >> 1;
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both        , hf, ipiv     , sol     , hf, device_info()));// block_trsv(handle, hf, M_both, sol);
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A10, nrows, sol, 1, &dc_f32[1], sol + hf, 1));
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both + hf*hf, hf, ipiv + hf, sol + hf, hf, device_info()));// block_trsv(handle, hf, M_both + hf * hf, sol + hf);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

static float* A01 = nullptr;
template<typename data_t, typename calc_t>
static void apply_block_LU_2C(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    assert(A10);
    assert(A01);
    int hf = nrows >> 1;
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }

    block_trsv(handle, hf, M_both, sol);
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A10, nrows, sol, 1, &dc_f32[1], sol + hf, 1));
    block_trsv(handle, hf, M_both + hf * hf, sol + hf);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((hf - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (hf, b, sol);
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, hf, b, 1, sol, 1));
    }
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A01, nrows, sol + hf, 1, &dc_f32[1], sol, 1));
    block_trsv(handle, hf, M_both, sol);

    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

template<typename data_t, typename calc_t>
static void apply_block_LU_2C_pivot(cublasHandle_t handle, int nrows, const data_t* M_both, const calc_t* b, calc_t* x, data_t* buf) {
    assert(A10);
    assert(A01);
    int hf = nrows >> 1;
    data_t* sol = nullptr;
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, b, buf);
        sol = buf;
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, nrows, b, 1, x, 1));
        sol = x;
    }
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both        , hf, ipiv     , sol     , hf, device_info()));// block_trsv(handle, hf, M_both, sol);
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A10, nrows, sol, 1, &dc_f32[1], sol + hf, 1));
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both + hf*hf, hf, ipiv + hf, sol + hf, hf, device_info()));// block_trsv(handle, hf, M_both + hf * hf, sol + hf);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((hf - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (hf, b, sol);
    }
    else {
        CUBLAS_CHECK(cublasScopy(handle, hf, b, 1, sol, 1));
    }
    CUBLAS_CHECK(cublasSgemv(handle, CUBLAS_OP_N, hf, hf, &dc_f32[2], A01, nrows, sol + hf, 1, &dc_f32[1], sol, 1));
    CUSOLVER_CHECK(cusolverDnSgetrs(*cusolver_handle, CUBLAS_OP_N, hf, 1, M_both        , hf, ipiv     , sol     , hf, device_info()));// block_trsv(handle, hf, M_both, sol);
    if constexpr (sizeof(data_t) != sizeof(calc_t)) {
        dim3 grdSize((nrows - 1 + blkSize_vec.x) / blkSize_vec.x, 1, 1);
        device_copy_vec << <grdSize, blkSize_vec >> > (nrows, sol, x);
    }
}

static void inline invoke_iter(int nrows, const dim3 grdSize_vec, cublasHandle_t handle,
    const double* d_A_64, const double* d_rhs_64, double* d_sol_64, void (*pc_ptr_64)(cublasHandle_t, int, const float*, const double*, double*, float*),
    const float* d_A_32, const float* d_rhs_32, float* d_sol_32, void (*pc_ptr_32)(cublasHandle_t, int, const float*, const float*, float*, float*),
    float* pc_mem, double* delta_b, double* tmp_sol, int max_iter, int restart_len,
    double atol_32, double atol_64, double atol_rf,
    double rtol_32, double rtol_64, double rtol_rf,
    int& iter_32, int& iter_64, int& iter_rf)
{
    device_zero_vec <<<grdSize_vec, blkSize_vec >>> (nrows, d_sol_32);
    iter_32 = GMRES<float, float>(handle, nrows, d_A_32, d_rhs_32, d_sol_32, atol_32, rtol_32, max_iter, restart_len, true, pc_ptr_32, pc_mem);
    device_copy_vec <<<grdSize_vec, blkSize_vec >>> (nrows, d_sol_32, d_sol_64);
    iter_64 = GMRES<float, double>(handle, nrows, d_A_64, d_rhs_64, d_sol_64, atol_64, rtol_64, max_iter, restart_len, false, pc_ptr_64, pc_mem);
    cublasDcopy(handle, nrows, d_rhs_64, 1, delta_b, 1);
    cublasDcopy(handle, nrows, d_sol_64, 1, tmp_sol, 1);
    cublasDgemv(handle, CUBLAS_OP_N, nrows, nrows, &dc_f64[2], d_A_64, nrows, d_sol_64, 1, &dc_f64[1], delta_b, 1);
    device_zero_vec <<<grdSize_vec, blkSize_vec >>> (nrows, d_sol_64);
    iter_rf = GMRES<float, double>(handle, nrows, d_A_64, delta_b, d_sol_64, atol_rf, rtol_rf, max_iter, restart_len, true, pc_ptr_64, pc_mem);
    cublasDaxpy(handle, nrows, &dc_f64[1], tmp_sol, 1, d_sol_64, 1);
}

#define MERGE_ITER_ALLOC
extern cudaStream_t* streams;
float*  gpu_iterat_solver(const int nrows, const double* d_A_64, double* d_rhs_64, int pc_type, TEST_TIME & record,
			const int last_nrows, const float * last_mem, const bool reorder)
{
    cudaEvent_t start, stop;
    float tms;// 计时单位毫秒
    CUDA_CHECK(cudaEventCreate(&start));
    CUDA_CHECK(cudaEventCreate(&stop));
    CUDA_CHECK(cudaEventRecord(start));

    int iter_32 = 0, iter_64 = 0, iter_rf = 0;
    double* d_sol_64 = nullptr;
    float* d_A_32 = nullptr, * d_rhs_32 = nullptr, * d_sol_32 = nullptr;
    double* d_delta_b = nullptr, * d_tmp_sol = nullptr;
    float* d_M = nullptr;

    // auto alloc_beg = std::chrono::high_resolution_clock::now();
#ifndef MERGE_ITER_ALLOC
    CUDA_CHECK(cudaMalloc((void**)&d_A_32, sizeof(*d_A_32) * nrows * nrows));
    CUDA_CHECK(cudaMalloc((void**)&d_rhs_32, sizeof(*d_rhs_32) * nrows));
    CUDA_CHECK(cudaMalloc((void**)&d_sol_32, sizeof(*d_sol_32) * nrows));
    CUDA_CHECK(cudaMalloc((void**)&d_sol_64, sizeof(*d_sol_64) * nrows));
    CUDA_CHECK(cudaMalloc((void**)&d_delta_b, sizeof(*d_delta_b) * nrows));
    CUDA_CHECK(cudaMalloc((void**)&d_tmp_sol, sizeof(*d_tmp_sol) * nrows));
#else
    void* workspace = nullptr;
    uint64_t buf_len= sizeof(*d_A_32) * nrows * nrows
                    + sizeof(*d_rhs_32) * (nrows << 1)
                    + sizeof(*d_sol_32) * (nrows << 1)
                    + sizeof(*d_sol_64) * (nrows << 1)
                    + sizeof(*d_delta_b) * (nrows << 1)
                    + sizeof(*d_tmp_sol) * (nrows << 1);
    CUDA_CHECK(cudaMalloc((void**)&workspace, buf_len));
    d_A_32 = (float*) workspace;
    d_rhs_32 = (float*)( ((uint64_t)d_A_32   ) + sizeof(*d_A_32) * nrows * nrows  );
    d_sol_32 = (float*)( ((uint64_t)d_rhs_32 ) + sizeof(*d_rhs_32) * (nrows << 1) );
    d_sol_64 =(double*)( ((uint64_t)d_sol_32 ) + sizeof(*d_sol_32) * (nrows << 1) );
    d_delta_b=(double*)( ((uint64_t)d_sol_64 ) + sizeof(*d_sol_64) * (nrows << 1) );
    d_tmp_sol=(double*)( ((uint64_t)d_delta_b) +sizeof(*d_delta_b) * (nrows << 1) );
#endif
    // auto alloc_end = std::chrono::high_resolution_clock::now();
    // printf("alloc %.6f s\n", (std::chrono::duration_cast<std::chrono::nanoseconds>(alloc_end - alloc_beg)).count() * 1e-9);

    dim3 grdSize_mat(1, 1, 1), grdSize_vec(1, 1, 1);
    grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
    grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
    device_copy_mat<<<grdSize_mat, blkSize_mat>>>(d_A_64, nrows, d_A_32, nrows, nrows, nrows);
    grdSize_vec.x = (nrows - 1 + blkSize_vec.x) / blkSize_vec.x;
    grdSize_vec.y = 1;
    device_copy_vec<<<grdSize_vec, blkSize_vec>>>(nrows, d_rhs_64, d_rhs_32);

    const int max_iter = 8;
    // double atol_32 = 1e2, atol_64 = 1e-5, atol_rf = 5e-8;
    // double rtol_32 = 0.0, rtol_64 = 0.0, rtol_rf = 0.0;
    double atol_32 = 0.0, atol_64 = 0.0, atol_rf = 0.0;
	double rtol_32 = 1e-4, rtol_64 = 1e-10, rtol_rf = 1e-2;
    int restart_len = 3;
    
    cublasHandle_t blas_handle;
    CUBLAS_CHECK(cublasCreate(&blas_handle));
    CUBLAS_CHECK(cublasSetPointerMode(blas_handle, CUBLAS_POINTER_MODE_DEVICE));
    cusolverDnHandle_t sol_handle;
    CUSOLVER_CHECK(cusolverDnCreate(&sol_handle));

    void (*pc_ptr_32)(cublasHandle_t, int, const float*, const float*, float*, float*) = nullptr;
    void (*pc_ptr_64)(cublasHandle_t, int, const float*, const double*, double*, float*) = nullptr;
    switch (pc_type)
    {
    case 0:
    case 91:    pc_ptr_32 = apply_LU         <float, float>; pc_ptr_64 = apply_LU         <float, double>; break;
    case 1:
    case 11:    pc_ptr_32 = apply_block_LU   <float, float>; pc_ptr_64 = apply_block_LU   <float, double>; break;
    case 2:
    case 21:    pc_ptr_32 = apply_block_LU_1C<float, float>; pc_ptr_64 = apply_block_LU_1C<float, double>; break;
    case 3:
    case 31:    pc_ptr_32 = apply_block_LU_2C<float, float>; pc_ptr_64 = apply_block_LU_2C<float, double>; break;
    case 111:   pc_ptr_32 = apply_block_LU_pivot<float, float>; pc_ptr_64 = apply_block_LU_pivot<float, double>; break;
    case 211:   pc_ptr_32 = apply_block_LU_1C_pivot<float, float>; pc_ptr_64 = apply_block_LU_1C_pivot<float, double>; break;
    case 311:   pc_ptr_32 = apply_block_LU_2C_pivot<float, float>; pc_ptr_64 = apply_block_LU_2C_pivot<float, double>; break;
    }

    if (pc_type == 0) {// 单精度直接法
        // setup 
        int lwork = 0;// size of workspace, in number of elements (not in BYTE!!)
        CUDA_CHECK(cudaMalloc((void**)&d_M, sizeof(*d_M) * nrows * nrows));
        grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
        grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
        device_copy_mat<<<grdSize_mat, blkSize_mat>>>(d_A_32, nrows, d_M, nrows, nrows, nrows);
        // query working space of getrf
        CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, nrows, nrows, d_M, nrows, &lwork));
        float* d_work = (float*) check_device_workspace_to_enlarge(lwork * sizeof(*d_work));
        CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, nrows, nrows, d_M, nrows, d_work, nullptr, device_info()));
        CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
        CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
        // solve 
        CUDA_CHECK(cudaEventRecord(start));
        invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32, 
            d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
        record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        CUDA_CHECK(cudaFree(d_M)); d_M = nullptr;
    }
    else if (pc_type == 91) {
        CUDA_CHECK(cudaMalloc((void**)&d_M, sizeof(*d_M)* nrows * nrows));
        grdSize_mat.x = (nrows - 1 + blkSize_mat.x) / blkSize_mat.x;
        grdSize_mat.y = (nrows - 1 + blkSize_mat.y) / blkSize_mat.y;
        device_copy_mat <<<grdSize_mat, blkSize_mat >>> (d_A_32, nrows, d_M, nrows, nrows, nrows);

        if (last_mem == nullptr) {
            // setup
            int lwork = 0;
            CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, nrows, nrows, d_M, nrows, &lwork));
            float* d_work = (float*) check_device_workspace_to_enlarge(lwork * sizeof(*d_work));
		    CUDA_CHECK(cudaMalloc((void**)&d_work, lwork * sizeof(*d_work)));
            CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, nrows, nrows, d_M, nrows, d_work, nullptr, device_info()));
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            // solve
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
        else {
            // setup
            if (reorder) gpu_extend_block(blas_handle, sol_handle, last_mem, last_nrows, d_M, nrows, device_info());
            else         gpu_extend_full (blas_handle, sol_handle, last_mem, last_nrows, d_M, nrows, nullptr, device_info());
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            // solve
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
    }
    else if (pc_type == 1 || pc_type == 2 || pc_type == 3) {
        // setup
        int hf = nrows >> 1;
        int lwork_0 = 0, lwork_1 = 0;// size of workspace, in number of elements (not in BYTE!!)
        CUDA_CHECK(cudaMalloc((void**)&d_M, sizeof(*d_M) * hf * hf * 2));
        float* d_M_0 = d_M;
        float* d_M_1 = d_M + hf * hf;
        grdSize_mat.x = (hf - 1 + blkSize_mat.x) / blkSize_mat.x;
        grdSize_mat.y = (hf - 1 + blkSize_mat.y) / blkSize_mat.y;
        device_copy_mat <<<grdSize_mat, blkSize_mat >>> (d_A_32                  , nrows, d_M_0, hf, hf, hf);
        device_copy_mat <<<grdSize_mat, blkSize_mat >>> (d_A_32 + hf * nrows + hf, nrows, d_M_1, hf, hf, hf);
        // query working space of getrf
        CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_0, hf, &lwork_0));
        CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_1, hf, &lwork_1));
        lwork_0 = std::max(lwork_0, lwork_1);
        float* d_work = (float*) check_device_workspace_to_enlarge(lwork_0 * sizeof(*d_work));
        A10 = d_A_32 + hf;
        A01 = d_A_32 + hf * nrows;
        CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_0, hf, d_work, nullptr, device_info()));
        CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_1, hf, d_work, nullptr, device_info()));
        CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
        CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
        // solve
        CUDA_CHECK(cudaEventRecord(start));
        invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
            d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
        record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        CUDA_CHECK(cudaFree(d_M)); d_M = nullptr;
    }
    else if (pc_type == 11 || pc_type == 21 || pc_type == 31) {
        int hf = nrows >> 1;
        int lwork_0 = 0, lwork_1 = 0;// size of workspace, in number of elements (not in BYTE!!)
        CUDA_CHECK(cudaMalloc((void**)&d_M, sizeof(*d_M) * hf * hf * 2));
        float* d_M_0 = d_M;
        float* d_M_1 = d_M + hf * hf;
        grdSize_mat.x = (hf - 1 + blkSize_mat.x) / blkSize_mat.x;
        grdSize_mat.y = (hf - 1 + blkSize_mat.y) / blkSize_mat.y;
        device_copy_mat << <grdSize_mat, blkSize_mat >> > (d_A_32                  , nrows, d_M_0, hf, hf, hf);
        device_copy_mat << <grdSize_mat, blkSize_mat >> > (d_A_32 + hf * nrows + hf, nrows, d_M_1, hf, hf, hf);
        A10 = d_A_32 + hf;
        A01 = d_A_32 + hf * nrows;
        if (last_mem == nullptr) {
            // setup 
            CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_0, hf, &lwork_0));
            CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_1, hf, &lwork_1));
            lwork_0 = std::max(lwork_0, lwork_1);
            float* d_work = (float*) check_device_workspace_to_enlarge(lwork_0 * sizeof(*d_work));
            CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_0, hf, d_work, nullptr, device_info()));
            CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_1, hf, d_work, nullptr, device_info()));
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            // solve 
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
        else {
            int last_hf = last_nrows >> 1;
            // setup
            gpu_extend_block(blas_handle, sol_handle, last_mem                    , last_hf, d_M_0, hf, device_info());
            gpu_extend_block(blas_handle, sol_handle, last_mem + last_hf * last_hf, last_hf, d_M_1, hf, device_info());
            // extend_two(blas_handle, sol_handle, d_last_M_mem + last_hf * last_hf, last_hf, d_M_1, hf, d_work, d_info);
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            // solve
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
        A01 = nullptr;
        A10 = nullptr;
    }
    else if (pc_type == 111 || pc_type == 211 || pc_type == 311) {
        int hf = nrows >> 1;
        int lwork_0 = 0, lwork_1 = 0;// size of workspace, in number of elements (not in BYTE!!)
        CUDA_CHECK(cudaMalloc((void**)&d_M, sizeof(*d_M) * hf * hf * 2 + sizeof(int) * nrows));
        float* d_M_0 = d_M, * d_M_1 = d_M + hf * hf;
        int* d_ip_0 = (int*) (((uint64_t)d_M) + sizeof(*d_M) * hf * hf * 2);
        int* d_ip_1 = d_ip_0 + hf;
        grdSize_mat.x = (hf - 1 + blkSize_mat.x) / blkSize_mat.x;
        grdSize_mat.y = (hf - 1 + blkSize_mat.y) / blkSize_mat.y;
        device_copy_mat << <grdSize_mat, blkSize_mat >> > (d_A_32                  , nrows, d_M_0, hf, hf, hf);
        device_copy_mat << <grdSize_mat, blkSize_mat >> > (d_A_32 + hf * nrows + hf, nrows, d_M_1, hf, hf, hf);
        A10 = d_A_32 + hf;
        A01 = d_A_32 + hf * nrows;
        ipiv = d_ip_0;
        cusolver_handle = & sol_handle;
        if (last_mem == nullptr) {
            // setup
            CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_0, hf, &lwork_0));
            CUSOLVER_CHECK(cusolverDnSgetrf_bufferSize(sol_handle, hf, hf, d_M_1, hf, &lwork_1));
            lwork_0 = std::max(lwork_0, lwork_1);
            float* d_work = (float*) check_device_workspace_to_enlarge(lwork_0 * sizeof(*d_work));
            CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_0, hf, d_work, d_ip_0, device_info()));
            CUSOLVER_CHECK(cusolverDnSgetrf(sol_handle, hf, hf, d_M_1, hf, d_work, d_ip_1, device_info()));
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            // solve 
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
        else {
            int last_hf = last_nrows >> 1;
			const int* last_d_ip_0 = (const int*) (((uint64_t)last_mem) + sizeof(*last_mem) * last_hf * last_hf * 2);
			const int* last_d_ip_1 = last_d_ip_0 + last_hf;
            CUDA_CHECK(cudaMemcpy(d_ip_0, last_d_ip_0, sizeof(*last_d_ip_0) * last_hf, cudaMemcpyDeviceToDevice));
            CUDA_CHECK(cudaMemcpy(d_ip_1, last_d_ip_1, sizeof(*last_d_ip_1) * last_hf, cudaMemcpyDeviceToDevice));
            // setup
            gpu_extend_block(blas_handle, sol_handle, last_mem                    , last_hf, d_M_0, hf, device_info(), d_ip_0);
            gpu_extend_block(blas_handle, sol_handle, last_mem + last_hf * last_hf, last_hf, d_M_1, hf, device_info(), d_ip_1);
            CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
            CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
            CUDA_CHECK(cudaEventRecord(start));
            invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
                d_M, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
            record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
        }
        A01 = nullptr;
		A10 = nullptr;
		ipiv = nullptr;
        cusolver_handle = nullptr;
    }
    else {// 无预条件子
        CUDA_CHECK(cudaEventRecord(stop)); CUDA_CHECK(cudaEventSynchronize(stop));
        CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop)); record.t_setup = tms / 1000;
        // solve
        CUDA_CHECK(cudaEventRecord(start));
        invoke_iter(nrows, grdSize_vec, blas_handle, d_A_64, d_rhs_64, d_sol_64, pc_ptr_64, d_A_32, d_rhs_32, d_sol_32, pc_ptr_32,
            nullptr, d_delta_b, d_tmp_sol, max_iter, restart_len, atol_32, atol_64, atol_rf, rtol_32, rtol_64, rtol_rf, iter_32, iter_64, iter_rf);
        record.iter = abs(iter_32) + abs(iter_64) + abs(iter_rf);
    }
    
    // 拷贝结果回去
    device_copy_vec <<<grdSize_vec, blkSize_vec >>> (nrows, d_sol_64, d_rhs_64);
#ifndef MERGE_ITER_ALLOC
    // 释放device侧的内存
    CUDA_CHECK(cudaFree(d_A_32));
    CUDA_CHECK(cudaFree(d_rhs_32));
    CUDA_CHECK(cudaFree(d_sol_32));
    CUDA_CHECK(cudaFree(d_sol_64));
    CUDA_CHECK(cudaFree(d_delta_b));
    CUDA_CHECK(cudaFree(d_tmp_sol));
#else
    CUDA_CHECK(cudaFree(workspace));
#endif
    CUBLAS_CHECK(cublasDestroy(blas_handle));
    CUSOLVER_CHECK(cusolverDnDestroy(sol_handle));
    // 停止计时
    CUDA_CHECK(cudaEventRecord(stop));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&tms, start, stop));
    record.t_solve = tms / 1000;
    // 释放GPU事件
    CUDA_CHECK(cudaEventDestroy(start));
    CUDA_CHECK(cudaEventDestroy(stop));
    return d_M;
}
