#include "FracSolver.hpp"


// #define __SYNC
// #define TICK_TOCK

FracSolver::FracSolver(int _sol_type, int _pc_type, size_t _capa): solver_type(_sol_type), prec_type(_pc_type), capacity(_capa)
{
    setProcessRealTime();
    if (solver_type >= 1000) {// 如果采用GPU求解器，打印一下当前的GPU设备信息
        init_GPU_query();
        init_GPU_coeff();
    }
    assert( solver_type ==    0 || solver_type ==   22 || solver_type ==   33 ||
            solver_type == 1000 || solver_type == 1022 || solver_type == 1033);
    printf("Solver  type: %d\n", solver_type);
    printf("Precond type: %d\n", prec_type);
    prev_hfs.clear();
    prev_hfs.push_back(0);

    size_t _lenPerBlk = capacity * capacity;
    if (solver_type >= 1000) {
        // mat_arrs[0] = d_A33; mat_capa[0] = _lenPerBlk;
        // mat_arrs[1] = d_A13; mat_capa[1] = _lenPerBlk;
        // mat_arrs[2] = d_A23; mat_capa[2] = _lenPerBlk;
        // mat_arrs[3] = d_A31; mat_capa[3] = _lenPerBlk;
        // mat_arrs[4] = d_A32; mat_capa[4] = _lenPerBlk;

        CUDA_CHECK(cudaMalloc((void**)&d_A13, sizeof(*d_A13) * _lenPerBlk));
        CUDA_CHECK(cudaMalloc((void**)&d_A23, sizeof(*d_A23) * _lenPerBlk));
        CUDA_CHECK(cudaMalloc((void**)&d_A31, sizeof(*d_A31) * _lenPerBlk));
        CUDA_CHECK(cudaMalloc((void**)&d_A32, sizeof(*d_A32) * _lenPerBlk));
        CUDA_CHECK(cudaMalloc((void**)&d_A33, sizeof(*d_A33) * _lenPerBlk));
        CUDA_CHECK(cudaMalloc((void**)&d_As , sizeof(*d_As ) * (capacity << 1) * (capacity << 1) ));
        CUDA_CHECK(cudaMalloc((void**)&d_x1_2,sizeof(*d_x1_2)* (capacity << 1) ));
        CUDA_CHECK(cudaMalloc((void**)&d_x3 , sizeof(*d_x3 ) * (capacity) ));
        // CUDA_CHECK(cudaMalloc((void**)&d_b3 , sizeof(*d_b3 ) * (capacity) ));
        CUDA_CHECK(cudaMalloc((void**)&d_p1 , sizeof(*d_p1 ) * (capacity) ));
        CUDA_CHECK(cudaMalloc((void**)&d_p2 , sizeof(*d_p2 ) * (capacity) ));
        CUDA_CHECK(cudaMalloc((void**)&d_p3 , sizeof(*d_p3 ) * (capacity) ));
        CUBLAS_CHECK(cublasCreate(&blas_handle));
        CUSOLVER_CHECK(cusolverDnCreate(&sol_handle));
        CUBLAS_CHECK(cublasSetPointerMode(blas_handle, CUBLAS_POINTER_MODE_DEVICE));
        // 开辟锁页内存
        for (size_t i = 0; i < sizeof(hbuf) / sizeof(double); i++) {
            hbuf_len[i] = 256;
            assert(hbuf[i] == nullptr);
            CUDA_CHECK(cudaHostAlloc((void**)&hbuf[i], sizeof(double) * hbuf_len[i], cudaHostAllocWriteCombined));
        }
        if (solver_type == 1022) {
            CUDA_CHECK(cudaMalloc((void**)&invAs, sizeof(*invAs) * (capacity << 1) * (capacity << 1)));
            CUDA_CHECK(cudaMalloc((void**)&invLU, sizeof(*invLU) * (capacity << 1) * (capacity << 1)));
        } else if (solver_type == 1033) {
            CUDA_CHECK(cudaMalloc((void**)&invAs, sizeof(*invAs) * (capacity << 1) * (capacity << 1)));
        }
        CUDA_CHECK(cudaMalloc((void**)&last_mem, sizeof(*last_mem) * (capacity << 1) * (capacity << 1)));
#ifdef __SYNC
        printf("GPU __SYNC\n");
#endif
    } else {
        // mat_arrs[0] = h_A33; mat_capa[0] = _lenPerBlk;
        // mat_arrs[1] = h_A13; mat_capa[1] = _lenPerBlk;
        // mat_arrs[2] = h_A23; mat_capa[2] = _lenPerBlk;
        // mat_arrs[3] = h_A31; mat_capa[3] = _lenPerBlk;
        // mat_arrs[4] = h_A32; mat_capa[4] = _lenPerBlk;

        h_A13 = new double [_lenPerBlk];
        h_A23 = new double [_lenPerBlk];
        h_A31 = new double [_lenPerBlk];
        h_A32 = new double [_lenPerBlk];
        h_A33 = new double [_lenPerBlk];
        if (solver_type == 22) {
            invAs    = new double [(capacity << 1) * (capacity << 1)];
            invLU    = new double [(capacity << 1) * (capacity << 1)];
            last_mem = new double [(capacity << 1) * (capacity << 1)];
        } else if (solver_type == 33) {
            invAs    = new double [(capacity << 1) * (capacity << 1)];
        }
    }
}

FracSolver::~FracSolver()
{
    if (h_A13) { delete[] h_A13; h_A13 = nullptr; }
    if (h_A23) { delete[] h_A23; h_A23 = nullptr; }
    if (h_A31) { delete[] h_A31; h_A31 = nullptr; }
    if (h_A32) { delete[] h_A32; h_A32 = nullptr; }
    if (h_A33) { delete[] h_A33; h_A33 = nullptr; }
    if (d_A13) { CUDA_CHECK(cudaFree(d_A13)); d_A13 = nullptr; }
    if (d_A23) { CUDA_CHECK(cudaFree(d_A23)); d_A23 = nullptr; }
    if (d_A31) { CUDA_CHECK(cudaFree(d_A31)); d_A31 = nullptr; }
    if (d_A32) { CUDA_CHECK(cudaFree(d_A32)); d_A32 = nullptr; }
    if (d_A33) { CUDA_CHECK(cudaFree(d_A33)); d_A33 = nullptr; }
    if (d_x1_2){ CUDA_CHECK(cudaFree(d_x1_2));d_x1_2= nullptr; }
    if (d_x3 ) { CUDA_CHECK(cudaFree(d_x3 )); d_x3  = nullptr; }
    // if (d_b3 ) { CUDA_CHECK(cudaFree(d_b3 )); d_b3  = nullptr; }
    if (d_p1 ) { CUDA_CHECK(cudaFree(d_p1 )); d_p1  = nullptr; }
    if (d_p2 ) { CUDA_CHECK(cudaFree(d_p2 )); d_p2  = nullptr; }
    if (d_p3 ) { CUDA_CHECK(cudaFree(d_p3 )); d_p3  = nullptr; }
    if (blas_handle) { CUBLAS_CHECK(cublasDestroy(blas_handle)); }
    if (sol_handle ) { CUSOLVER_CHECK(cusolverDnDestroy(sol_handle)); }
    for (size_t i = 0; i < sizeof(hbuf) / sizeof(double); i++) {
        if (hbuf[i] != nullptr) { CUDA_CHECK(cudaFreeHost(hbuf[i])); hbuf[i] = nullptr; }
    }
    if (map_n2o) { delete[] map_n2o; map_n2o = nullptr; }
    if (map_o2n) { delete[] map_o2n; map_o2n = nullptr; }
    if (d_map_n2o) { CUDA_CHECK(cudaFree(d_map_n2o)); d_map_n2o = nullptr; }
    if (d_map_o2n) { CUDA_CHECK(cudaFree(d_map_o2n)); d_map_o2n = nullptr; }
}

extern cudaStream_t streams[];
extern double* dc_f64;

static void host_expand_mat(double* & old_h_A, const size_t old_lda, const int last_nrows,
    const size_t new_lda)
{
    // 开辟新的大小的内存
    double* new_h_A = new double [new_lda * new_lda];
    // 数据迁移
    #pragma omp parallel for schedule(static) num_threads(gnt)
    for (int j = 0; j < last_nrows; j++)
    for (int i = 0; i < last_nrows; i++) {
        new_h_A[j * new_lda + i] = old_h_A[j * old_lda + i];
    }
    delete[] old_h_A; old_h_A = new_h_A;// 释放原有内存
}

static void host_update_mat(double* & h_A, const size_t dst_lda, const double* src_A, const size_t src_lda, 
    const int last_nrows, const int src_nrows)
{
    assert(src_nrows <= dst_lda);
    // 只拷贝增量部分
    const int delta = src_nrows - last_nrows;
    #pragma omp parallel num_threads(gnt)
    {
        #pragma omp for schedule(static) nowait
        for (int j = 0; j < last_nrows; j++)
        for (int i = 0; i < delta; i++) {
            h_A[j * dst_lda + last_nrows + i] = src_A[j * src_lda + last_nrows + i];
        }
        #pragma omp for schedule(static)
        for (int j = last_nrows; j < src_nrows; j++)
        for (int i = 0; i < src_nrows; i++) {
            h_A[j * dst_lda + i] = src_A[j * src_lda + i];
        }
    }
    // if (d_A != nullptr) {
    // }
}

void extend_invLU_and_invA(
    double* & inv_LU, double* & inv_A, const int last_nrows, const int inv_lda,
    const double* new_LU, const int new_nrows, const int LU_lda)
{
    assert(inv_lda >= new_nrows);
    const int delta = new_nrows - last_nrows;
    const double* E_and_F = new_LU + last_nrows *  LU_lda + last_nrows;
    double* K_and_P       = inv_LU + last_nrows * inv_lda + last_nrows;

    // printf("new_LU: %p\n", new_LU);
    // for (int i = 0; i < new_nrows; i++) {
    //     for (int j = 0; j < new_nrows; j++) 
    //         printf("%10.6e ", new_LU[j * LU_lda + i]);
    //     printf("\n");
    // } printf("\n");

    if (last_nrows == 0) {// 上一轮还没有
        // printf("init extend_invLU_and_invA\n");

        inverse_of_lower_unit   (new_LU, LU_lda, inv_LU, inv_lda, new_nrows);
        inverse_of_upper_nonunit(new_LU, LU_lda, inv_LU, inv_lda, new_nrows);

        // printf("inv LU:\n");
        // for (int i = 0; i < new_nrows; i++) {
        //     for (int j = 0; j < new_nrows; j++)
        //         printf("%10.6e ", inv_LU[j * inv_lda + i]);
        //     printf("\n");
        // } printf("\n");

        // 至此已完成了new_inv_LU的直接计算
        // 再直接计算new_inv_A
        double* P = new double [delta * delta], * K = new double [delta * delta];
        for (int i = 0; i < delta*delta; i++) {// 先全部置0
            P[i] = 0.0; K[i] = 0.0;
        }
        for (int j = 0; j < delta; j++)
        for (int i = 0; i < delta; i++) {
            if (j < i) K[j * delta + i] = K_and_P[j * inv_lda + i];
            else       P[j * delta + i] = K_and_P[j * inv_lda + i];
        }
        for (int i = 0; i < delta; i++) K[i * delta + i] = 1.0;// 下三角K的对角元置1
        cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, delta, delta, delta, 1.0, P, delta, K, delta, 0.0, inv_A, inv_lda);

        // printf("inv As:\n");
        // for (int i = 0; i < new_nrows; i++) {
        //     for (int j = 0; j < new_nrows; j++)
        //         printf("%10.6e ", inv_A[j * inv_lda + i]);
        //     printf("\n");
        // } printf("\n");

        delete[] P; delete[] K;
        return ;
    }

    // printf("last_inv_LU:\n");
    // for (int i = 0; i < last_nrows; i++) {
    //     for (int j = 0; j < last_nrows; j++)
    //         printf("%10.6e ", last_inv_LU[j * last_nrows + i]);
    //     printf("\n");
    // } printf("\n");

    // printf("last_inv_A:\n");
    // for (int i = 0; i < last_nrows; i++) {
    //     for (int j = 0; j < last_nrows; j++)
    //         printf("%10.6e ", last_inv_A[j * last_nrows + i]);
    //     printf("\n");
    // } printf("\n");

    
    // 先将左上角的 (Lt)^{-1} 和 (Ut)^{-1} 拷贝 (此时左上角的已经在了)
    // copy_mat(last_inv_LU, last_nrows, new_inv_LU, new_nrows, last_nrows, last_nrows);
    // 先lower后upper以免覆盖掉
    inverse_of_lower_unit   (E_and_F, LU_lda, K_and_P, inv_lda, delta);// 求下三角K
    inverse_of_upper_nonunit(E_and_F, LU_lda, K_and_P, inv_lda, delta);// 求上三角P

    // printf("K_and_P:\n");
    // for (int i = 0; i < delta; i++) {
    //     for (int j = 0; j < delta; j++)
    //         printf("%10.6e ", K_and_P[j * inv_lda + i]);
    //     printf("\n");
    // } printf("\n");

    double* buf = new double [last_nrows * delta];
    // 将C拷贝进buf
    copy_mat(new_LU + last_nrows, LU_lda, buf, delta, last_nrows, delta);
    // K * C
    cblas_dtrmm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, delta, last_nrows,  1.0, K_and_P, inv_lda, buf, delta);
    // W * Lt = - K * C
    cblas_dtrsm(CblasColMajor, CblasRight, CblasLower, CblasNoTrans, CblasUnit, delta, last_nrows, -1.0, new_LU,  LU_lda, buf, delta);
    // 将 W 拷回
    copy_mat(buf, delta, inv_LU + last_nrows, inv_lda, last_nrows, delta);

    // printf("W:\n");
    // for (int i = 0; i < delta; i++) {
    //     for (int j = 0; j < last_nrows; j++)
    //         printf("%10.6e ", (inv_LU + last_nrows)[j * inv_lda + i]);
    //     printf("\n");
    // } printf("\n");
    
    // 将B拷贝进buf
    copy_mat(new_LU + last_nrows * LU_lda, LU_lda, buf, last_nrows, delta, last_nrows);
    // B * P
    cblas_dtrmm(CblasColMajor, CblasRight, CblasUpper, CblasNoTrans, CblasNonUnit, last_nrows, delta, 1.0, K_and_P, inv_lda, buf, last_nrows);
    // Ut * H = - B * P
    cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, last_nrows, delta, -1.0, new_LU, LU_lda, buf, last_nrows);
    // 将 H 拷回
    copy_mat(buf, last_nrows, inv_LU + last_nrows * inv_lda, inv_lda, delta, last_nrows);

    // printf("H:\n");
    // for (int i = 0; i < last_nrows; i++) {
    //     for (int j = 0; j < delta; j++)
    //         printf("%10.6e ", (inv_LU + last_nrows * inv_lda)[j * inv_lda + i]);
    //     printf("\n");
    // } printf("\n");

    // printf("new_inv_LU:\n");
    // for (int j = 0; j < new_nrows; j++) {
    //     for (int i = 0; i < new_nrows; i++)    
    //         printf("%10.6e ", (new_inv_LU)[j * new_nrows + i]);
    //     printf("\n");
    // } printf("\n");
    
    // 至此已完成整个new_inv_LU的计算
    // 开始计算new_inv_A
    const double* H = inv_LU + last_nrows * inv_lda,     * W = inv_LU + last_nrows;
    double  * new_H = inv_A  + last_nrows * inv_lda, * new_W = inv_A  + last_nrows;
    // 左上角：(At)^{-1} + H*W （左上角已经在了）
    // copy_mat(last_inv_A, last_nrows, new_inv_A, new_nrows, last_nrows, last_nrows);
    // cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, last_nrows, last_nrows, delta, 1.0, H, new_nrows, W, new_nrows, 1.0, new_inv_A, new_nrows);
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, last_nrows, last_nrows, delta, 1.0, H, inv_lda, W, inv_lda, 1.0, inv_A, inv_lda);
    // 右上角：H * K
    copy_mat(H, inv_lda, new_H, inv_lda, delta, last_nrows);    
    cblas_dtrmm(CblasColMajor, CblasRight, CblasLower, CblasNoTrans, CblasUnit   , last_nrows, delta, 1.0, K_and_P, inv_lda, new_H, inv_lda);
    // 左下角：P * W
    copy_mat(W, inv_lda, new_W, inv_lda, last_nrows, delta);
    cblas_dtrmm(CblasColMajor, CblasLeft , CblasUpper, CblasNoTrans, CblasNonUnit, delta, last_nrows, 1.0, K_and_P, inv_lda, new_W, inv_lda);
    // 右下角：P * K
    // multiply_upper_lower(K_and_P, new_nrows, K_and_P, new_nrows, new_H + last_nrows, new_nrows, delta);
    double* P = new double [delta * delta], * K = new double [delta * delta];
    for (int i = 0; i < delta*delta; i++) {// 先全部置0
        P[i] = 0.0; K[i] = 0.0;
    }
    for (int j = 0; j < delta; j++)
    for (int i = 0; i < delta; i++) {
        if (j < i) K[j * delta + i] = K_and_P[j * inv_lda + i];
        else       P[j * delta + i] = K_and_P[j * inv_lda + i]; 
    }
    for (int i = 0; i < delta; i++) K[i * delta + i] = 1.0;// 下三角K的对角元置1
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, delta, delta, delta, 1.0, P, delta, K, delta, 0.0, new_H + last_nrows, inv_lda);

    // printf("invA:\n");
    // for (int i = 0; i < new_nrows; i++) {
    //     for (int j = 0; j < new_nrows; j++)
    //         printf("%10.6e ", inv_A[j * inv_lda + i]);
    //     printf("\n");
    // } printf("\n");

    // 至此已完成整个new_inv_A的计算
    delete[] buf; delete[] K; delete[] P;

    // exit(-1);
}

void extend_invA(double* & inv_A, const int lda, const int last_nrows, const int new_nrows)
{
    assert(lda >= new_nrows);
    const int delta = new_nrows - last_nrows;
    double* K_and_P = inv_A + last_nrows * lda + last_nrows;
    // printf("extend_invA: last %d new %d\n", last_nrows, new_nrows);
    if (last_nrows == 0) {// 直接求逆即可返回
        matinv_row<int, double>(new_nrows, K_and_P, lda);

        // printf("inv As:\n");
        // for (int i = 0; i < new_nrows; i++) {
        //     for (int j = 0; j < new_nrows; j++)
        //         printf("%10.6e ", inv_A[j * lda + i]);
        //     printf("\n");
        // } printf("\n");

        return ;
    }

    double* A12 = inv_A + last_nrows * lda, * A21 = inv_A + last_nrows;
    // 此时inv_A左上角是上一步的逆矩阵
    double* buf_A12 = new double [last_nrows * delta];
    // buf <- A11^{-1}*A12
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, last_nrows, delta, last_nrows, 1.0, inv_A, lda, A12, lda, 0.0, buf_A12, last_nrows);

    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, delta, delta, last_nrows, -1.0, A21, lda, buf_A12, last_nrows, 1.0, K_and_P, lda);

    matinv_row<int, double>(delta, K_and_P, lda);

    double * buf_A21 = new double [last_nrows * delta];
    // buf <- A21*A11^{-1}
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, delta, last_nrows, last_nrows, 1.0, A21, lda, inv_A, lda, 0.0, buf_A21, delta);

    // HK
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, last_nrows, delta, delta, -1.0, buf_A12, last_nrows, K_and_P, lda, 0.0, A12, lda);
    // PW <-   - (PK)*A21*A11^{-1}
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, delta, last_nrows, delta, -1.0, K_and_P, lda, buf_A21, delta, 0.0, A21, lda);
    // invA + HW
    cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, last_nrows, last_nrows, delta, -1.0, buf_A12, last_nrows, A21, lda, 1.0, inv_A, lda);

    // printf("invA:\n");
    // for (int i = 0; i < new_nrows; i++) {
    //     for (int j = 0; j < new_nrows; j++)
    //         printf("%10.6e ", inv_A[j * lda + i]);
    //     printf("\n");
    // } printf("\n");

    // exit(-1);
    delete[] buf_A12; delete[] buf_A21;
}

static void inline set_num_threads(int nt) {
    gnt = nt;
    omp_set_num_threads(nt);
    MKL_Set_Num_Threads(nt);
}

void FracSolver::solve(const int fluid_nrows, const int fluid_lda,
                const double* solid_A,
                const double* A13, const double* A23, const double* A31, const double* A32, const double* A33,
                const double* b1, const double* b2, const double* x3,
                double* x1_2, double* b3
            )
{
    set_num_threads(8);

#ifdef TICK_TOCK
    auto tickneg_1 = std::chrono::high_resolution_clock::now();
#endif
    const int solid_nrows = (fluid_nrows << 1);
    if (solver_type == 1022 && wait_cnt == 0) {
        CUDA_CHECK(cudaStreamSynchronize(streams[8]));
        CUDA_CHECK(cudaStreamSynchronize(streams[9]));
        CUDA_CHECK(cudaStreamSynchronize(streams[10]));
        CUDA_CHECK(cudaStreamSynchronize(streams[11]));
    }

    // double * p1 = new double [fluid_nrows], * p2 = new double [fluid_nrows], * p3 = new double [fluid_nrows];
    // printf("fluid %d solid %d cap %d\n", fluid_nrows, solid_nrows, capacity);

    if (fluid_nrows > capacity) {// 需要对d_A**和h_A**进行扩容
        size_t new_capa = capacity;
        while (new_capa < fluid_nrows) new_capa <<= 1;// 扩一倍内存
        printf("Capacity %d => %d double\n", capacity, new_capa);
        if (solver_type >= 1000) {// 在GPU上需要扩容
            device_expand_mat(d_A13, capacity, last_nrows, new_capa, 0);
            device_expand_mat(d_A23, capacity, last_nrows, new_capa, 1);
            device_expand_mat(d_A31, capacity, last_nrows, new_capa, 2);
            device_expand_mat(d_A32, capacity, last_nrows, new_capa, 3);
            device_expand_mat(d_A33, capacity, last_nrows, new_capa, 4);
            device_expand_vec(d_x3  , new_capa                     , 5);
            // device_expand_vec(d_b3  , new_capa                     , 5);
            device_expand_vec(d_p1  , new_capa                     , 5);
            device_expand_vec(d_p2  , new_capa                     , 5);
            device_expand_vec(d_p3  , new_capa                     , 5);
            device_expand_vec(d_x1_2, new_capa << 1                , 6);
            if (solver_type == 1022) {// 需要重排时才expand
                device_expand_mat(last_mem, capacity << 1, last_nrows << 1, new_capa << 1, 6);
                device_expand_mat(invLU   , capacity << 1, last_nrows << 1, new_capa << 1, 6);
                device_expand_mat(invAs   , capacity << 1, last_nrows << 1, new_capa << 1, 6);
            } else if (solver_type == 1033) {
                device_expand_mat(invAs   , capacity << 1, last_nrows << 1, new_capa << 1, 6);
            }
            // 求解器类型为1000时不需要自动扩容，会在create_copyPart_to_device中扩容到当前的solid_nrows大小
            // else {
            //     device_expand_mat(d_As    , capacity << 1, last_nrows << 1, new_capa << 1, 6);
            // }
        } else {// 在CPU端需要扩容
            host_expand_mat(h_A13, capacity, last_nrows, new_capa);
            host_expand_mat(h_A23, capacity, last_nrows, new_capa);
            host_expand_mat(h_A31, capacity, last_nrows, new_capa);
            host_expand_mat(h_A32, capacity, last_nrows, new_capa);
            host_expand_mat(h_A33, capacity, last_nrows, new_capa);
            if (solver_type == 22) {
                host_expand_mat(last_mem, capacity << 1, last_nrows << 1, new_capa << 1);
                host_expand_mat(invLU   , capacity << 1, last_nrows << 1, new_capa << 1);
                host_expand_mat(invAs   , capacity << 1, last_nrows << 1, new_capa << 1);
            } else if (solver_type == 33) {
                host_expand_mat(invAs   , capacity << 1, last_nrows << 1, new_capa << 1);
            }
        }
        capacity = new_capa;
    }

    #pragma omp parallel for schedule(static) num_threads(gnt)
    for (int i = 0; i < fluid_nrows; i++) {
        x1_2[i              ] = b1[i];
        x1_2[i + fluid_nrows] = b2[i];
    }
    if (solver_type >= 1000) {
        CUDA_CHECK(cudaMemcpyAsync(d_x3  , x3  , sizeof(*d_x3  ) * fluid_nrows, cudaMemcpyHostToDevice, streams[5]));
        CUDA_CHECK(cudaMemcpyAsync(d_x1_2, x1_2, sizeof(*d_x1_2) * solid_nrows, cudaMemcpyHostToDevice, streams[6]));
#ifdef __SYNC
        CUDA_CHECK(cudaStreamSynchronize(streams[5]));
        CUDA_CHECK(cudaStreamSynchronize(streams[6]));
#endif
    }

    if (fluid_nrows > last_nrows) {// 本步需要更新（追加）矩阵元素：同样也适合于最开始last_nrows == 0的情形
        if (solver_type == 1022 || solver_type == 22 ||
            solver_type == 1033 || solver_type == 33) {// 需要重排的算法先构造map
            prev_hfs.push_back(fluid_nrows);
            const int prev_num = prev_hfs.size() - 1;
            const int hf = fluid_nrows;
            const int last_hf = last_nrows;
            delete[] map_n2o; map_n2o = new int [solid_nrows];
            delete[] map_o2n; map_o2n = new int [solid_nrows];
            #pragma omp parallel num_threads(gnt)
            {
                #pragma omp for schedule(static)
                for (int g = 0; g < prev_num; g++) {
                    int ibeg = prev_hfs[g],
                        iend = prev_hfs[g + 1];
                    // printf("[%d, %d)\n", ibeg, iend);
                    for (int i = ibeg; i < iend; i++) {
                        map_o2n[i] = (ibeg << 1) + i - ibeg;
                        map_o2n[i + hf] = i + iend;
                    }
                }
                #pragma omp for schedule(static)
                for (int i = 0; i < solid_nrows; i++)
                    map_n2o[map_o2n[i]] = i;
            }
        }
        
        if (solver_type >= 1000) {// 在GPU上需要更新增量部分的数据
            const size_t required_buf_len = fluid_nrows * fluid_nrows - last_nrows * last_nrows;// 需要的缓冲区大小
            for (int i = 0; i < sizeof(hbuf)/sizeof(double); i++) {// 逐个检查缓冲区是否足够容量
                if (hbuf_len[i] <= required_buf_len) {
                    CUDA_CHECK(cudaFreeHost(hbuf[i]));
                    while (hbuf_len[i] <= required_buf_len) hbuf_len[i] <<= 1;// 扩一倍内存
                    CUDA_CHECK(cudaHostAlloc((void**)&hbuf[i], sizeof(double) * hbuf_len[i], cudaHostAllocWriteCombined));
                }
            }
            device_update_mat(d_A13, capacity, A13, fluid_lda, last_nrows, fluid_nrows, 0, hbuf[0]);
            device_update_mat(d_A23, capacity, A23, fluid_lda, last_nrows, fluid_nrows, 1, hbuf[1]);
            device_update_mat(d_A31, capacity, A31, fluid_lda, last_nrows, fluid_nrows, 2, hbuf[2]);
            device_update_mat(d_A32, capacity, A32, fluid_lda, last_nrows, fluid_nrows, 3, hbuf[3]);
            device_update_mat(d_A33, capacity, A33, fluid_lda, last_nrows, fluid_nrows, 4, hbuf[4]);
            if (solver_type == 1000) {
                create_copyPart_to_device(solid_A, solid_nrows, d_As, last_nrows << 1, 6);
            } else {
                CUDA_CHECK(cudaFree(d_map_n2o)); CUDA_CHECK(cudaMalloc((void**)&d_map_n2o, sizeof(int)*solid_nrows));
                CUDA_CHECK(cudaFree(d_map_o2n)); CUDA_CHECK(cudaMalloc((void**)&d_map_o2n, sizeof(int)*solid_nrows));
                CUDA_CHECK(cudaMemcpyAsync(d_map_n2o, map_n2o, sizeof(int)*solid_nrows, cudaMemcpyHostToDevice, streams[7]));
                CUDA_CHECK(cudaMemcpyAsync(d_map_o2n, map_o2n, sizeof(int)*solid_nrows, cudaMemcpyHostToDevice, streams[7]));
                // 注意此时是将数据追加到上次分解完的结果中
                if (solver_type == 1022)
                    create_reorder_copyPart_to_device(solid_A, solid_nrows, last_mem, last_nrows << 1, capacity << 1, map_n2o, 6);
                else if (solver_type == 1033)
                    create_reorder_copyPart_to_device(solid_A, solid_nrows, invAs   , last_nrows << 1, capacity << 1, map_n2o, 6);
            }
#ifdef __SYNC
            CUDA_CHECK(cudaStreamSynchronize(streams[0]));
            CUDA_CHECK(cudaStreamSynchronize(streams[1]));
            CUDA_CHECK(cudaStreamSynchronize(streams[2]));
            CUDA_CHECK(cudaStreamSynchronize(streams[3]));
            CUDA_CHECK(cudaStreamSynchronize(streams[4]));
            CUDA_CHECK(cudaStreamSynchronize(streams[6]));
#endif
        } else {// 在CPU上更新增量部分的数据
            host_update_mat(h_A13, capacity, A13, fluid_lda, last_nrows, fluid_nrows);
            host_update_mat(h_A23, capacity, A23, fluid_lda, last_nrows, fluid_nrows);
            host_update_mat(h_A31, capacity, A31, fluid_lda, last_nrows, fluid_nrows);
            host_update_mat(h_A32, capacity, A32, fluid_lda, last_nrows, fluid_nrows);
            host_update_mat(h_A33, capacity, A33, fluid_lda, last_nrows, fluid_nrows);
            // if (solver_type == 22) host_update_mat(h_As , capacity<<1, solid_A, solid_nrows, last_nrows<<1 ,solid_nrows);
        }
    }
#ifdef TICK_TOCK
    auto tick0 = std::chrono::high_resolution_clock::now();
#endif

    // 计算第一批GEMV：组装右端向量
    if (solver_type >= 1000) {
        CUDA_CHECK(cudaStreamSynchronize(streams[5]));// 等待流5完成拷贝到d_x3的操作

        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[4]));// 然后流4计算A33*x3
        CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], d_A33, capacity, d_x3, 1, &dc_f64[0], d_p3, 1));// d_x3 <- A33*x3
        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[0]));// 然后流0计算A13*x3
        CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], d_A13, capacity, d_x3, 1, &dc_f64[0], d_p1, 1));
        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[1]));// 以及流1计算A23*x3
        CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], d_A23, capacity, d_x3, 1, &dc_f64[0], d_p2, 1));
        CUBLAS_CHECK(cublasSetStream(blas_handle, NULL));// 恢复默认流
#ifdef __SYNC
        CUDA_CHECK(cudaStreamSynchronize(streams[4]));
        CUDA_CHECK(cudaStreamSynchronize(streams[0]));
        CUDA_CHECK(cudaStreamSynchronize(streams[1]));
#endif
    } else {
        cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows,  1.0, h_A33, capacity, x3, 1, 0.0, b3, 1);// b3 <- A33*x3
        cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, -1.0, h_A13, capacity, x3, 1, 1.0, x1_2              , 1);// r1 <- b1 - A13*x3
        cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, -1.0, h_A23, capacity, x3, 1, 1.0, x1_2 + fluid_nrows, 1);// r2 <- b2 - A23*x3
        // cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows,  1.0, h_A33, capacity, x3, 1, 0.0, p3, 1);
        // cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows,  1.0, h_A13, capacity, x3, 1, 0.0, p1, 1);
        // cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows,  1.0, h_A23, capacity, x3, 1, 0.0, p2, 1);
        // cblas_daxpy(fluid_nrows, -1.0, p1, 1, x1_2              , 1);
        // cblas_daxpy(fluid_nrows, -1.0, p2, 1, x1_2 + fluid_nrows, 1);
        // if (solid_nrows > 95) {
        //     printf("solid_nrows %d\n", solid_nrows);
        //     printf("x3   p3   p1   p2\n");
        //     for (int i = 0; i < fluid_nrows; i++) printf("%.15e %.15e %.15e %.15e %.15e %.15e\n", x3[i], p3[i], p1[i], p2[i], x1_2[i], x1_2[i + fluid_nrows]);
        // }
    }

#ifdef TICK_TOCK
    auto tick1 = std::chrono::high_resolution_clock::now();
#endif
    if (fluid_nrows > last_nrows) {// 需要setup
        // 上一步的分解结果存放在last_mem中
        if (solver_type == 0) {
            const size_t tot_elems = (size_t) solid_nrows * (size_t) solid_nrows;
            double* new_mem = new double [tot_elems];
            // 直接拷贝
            copy_vec(tot_elems, solid_A, new_mem);
            int info = LAPACKE_mkl_dgetrfnp(LAPACK_COL_MAJOR, solid_nrows, solid_nrows, new_mem, solid_nrows); assert(info == 0);// no pivot
            delete[] last_mem; last_mem = new_mem;// 释放之前的内存
        } else if (solver_type == 22 || solver_type == 33) {// TODO: 可以动态扩容，而不需要每次都释放原来的内存！
            const int LU_lda = capacity << 1;
            double* dst = (solver_type == 22) ? last_mem : invAs;
            // 追加新的数据到last_mem中，拷贝的同时还需要重排
            #pragma omp parallel num_threads(gnt)
            {
                #pragma omp for schedule(static)
                for (int nj = 0; nj < (last_nrows << 1); nj++) {
                    const int oj = map_n2o[nj];
                    for (int ni = (last_nrows << 1); ni < solid_nrows; ni++) {
                        const int oi = map_n2o[ni];
                        dst[nj * LU_lda + ni] = solid_A[oj * solid_nrows + oi];
                    }
                }
                #pragma omp for schedule(static)
                for (int nj = (last_nrows << 1); nj < solid_nrows; nj++) {
                    const int oj = map_n2o[nj];
                    for (int ni = 0; ni < solid_nrows; ni++) {
                        const int oi = map_n2o[ni];
                        dst[nj * LU_lda + ni] = solid_A[oj * solid_nrows + oi];
                    }
                }
            }

            if (solver_type == 22) {// 先求LU再求逆
                extend_block_lda(last_mem, last_nrows << 1, solid_nrows, LU_lda);
                // extend_invLU_and_invA(invLU, invAs, last_nrows << 1, (double*) last_mem, solid_nrows);
                extend_invLU_and_invA(invLU, invAs, last_nrows << 1, capacity << 1, last_mem, solid_nrows, LU_lda);
            } else {// 直接求逆
                extend_invA(invAs, capacity << 1, last_nrows << 1, solid_nrows);
            }
        } else if (solver_type >= 1000) {
            device_setup(6, solver_type, prec_type, blas_handle, sol_handle,
                solid_nrows, d_As, last_nrows << 1, last_mem, capacity << 1, 
                invLU, invAs);
            wait_cnt = 2;
            // // // 这里将invA重排回去？？？
            // if (reo_invA) CUDA_CHECK(cudaFree(reo_invA));
            // CUDA_CHECK(cudaStreamSynchronize(streams[7]));// 等待流7拷贝完GPU上的map
            // CUDA_CHECK(cudaMalloc((void**)&reo_invA, sizeof(*reo_invA) * solid_nrows * solid_nrows));
            // device_reorder_mat(solid_nrows, invAs, capacity << 1, reo_invA, solid_nrows, d_map_n2o, 6);
#ifdef __SYNC
            CUDA_CHECK(cudaStreamSynchronize(streams[6]));
#endif
        } else assert(false);
    }

#ifdef TICK_TOCK
    auto tick2 = std::chrono::high_resolution_clock::now();
#endif
#ifdef CHECK_RESI
    // 检查残差（注意这是重排前的）
    double resi[solid_nrows];
    for (int i = 0; i < solid_nrows; i++) resi[i] = x1_2[i];// resi <- b
    double b_norm = cblas_dnrm2(solid_nrows, resi, 1);
#endif
    // solve
    if (solver_type == 0 || solver_type == 2) {
        cblas_dtrsv(CblasColMajor, CblasLower, CblasNoTrans, CblasUnit   , solid_nrows, (double*) last_mem, solid_nrows, x1_2, 1);
        cblas_dtrsv(CblasColMajor, CblasUpper, CblasNoTrans, CblasNonUnit, solid_nrows, (double*) last_mem, solid_nrows, x1_2, 1);
    } else if (solver_type == 22 || solver_type == 33) {// 还需要重排
        double* tmp_rhs = new double [solid_nrows];
        #pragma omp parallel for schedule(static) num_threads(gnt)
        for (int i = 0; i < solid_nrows; i++)
            tmp_rhs[i] = x1_2[map_n2o[i]];
        // 针对重排后的线性系统进行求解
        if (invAs) {// 有现成的逆矩阵
            double* inv_sol = new double [solid_nrows];
            // cblas_dgemv(CblasColMajor, CblasNoTrans, solid_nrows, solid_nrows, 1.0, invAs, solid_nrows, tmp_rhs, 1, 0.0, inv_sol, 1);
            cblas_dgemv(CblasColMajor, CblasNoTrans, solid_nrows, solid_nrows, 1.0, invAs, capacity << 1, tmp_rhs, 1, 0.0, inv_sol, 1);
            #pragma omp parallel for schedule(static) num_threads(gnt)
            for (int i = 0; i < solid_nrows; i++) 
                x1_2[i] = inv_sol[map_o2n[i]];
            delete[] inv_sol;
        } else {// 普通的前代回代
            // cblas_dtrsv(CblasColMajor, CblasLower, CblasNoTrans, CblasUnit   , solid_nrows, (double*) last_mem, solid_nrows, tmp_rhs, 1);
            // cblas_dtrsv(CblasColMajor, CblasUpper, CblasNoTrans, CblasNonUnit, solid_nrows, (double*) last_mem, solid_nrows, tmp_rhs, 1);
            cblas_dtrsv(CblasColMajor, CblasLower, CblasNoTrans, CblasUnit   , solid_nrows, (double*) last_mem, capacity << 1, tmp_rhs, 1);
            cblas_dtrsv(CblasColMajor, CblasUpper, CblasNoTrans, CblasNonUnit, solid_nrows, (double*) last_mem, capacity << 1, tmp_rhs, 1);
            #pragma omp parallel for schedule(static) num_threads(gnt)
            for (int i = 0; i < solid_nrows; i++) 
                x1_2[i] = tmp_rhs[map_o2n[i]];
        }
        delete[] tmp_rhs;
    } else if (solver_type >= 1000) {
        CUDA_CHECK(cudaStreamSynchronize(streams[0]));
        CUDA_CHECK(cudaStreamSynchronize(streams[1]));// 等待流0和1完成A13*x3和A23*x3的操作
        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[6]));// 之前拷贝到d_x1_2的操作也是由流6负责的
        CUBLAS_CHECK(cublasDaxpy(blas_handle, fluid_nrows, &dc_f64[2], d_p1, 1, d_x1_2              , 1));// rhs1 <- b1 - A13*x3
        CUBLAS_CHECK(cublasDaxpy(blas_handle, fluid_nrows, &dc_f64[2], d_p2, 1, d_x1_2 + fluid_nrows, 1));// rhs2 <- b2 - A23*x3 组装右端项
        if (solver_type == 1022) {// 还需要重排
            CUDA_CHECK(cudaStreamSynchronize(streams[7]));// 等待流7拷贝完GPU上的map
            double* tmp_rhs = (double*) check_device_workspace_to_enlarge(sizeof(*tmp_rhs) * solid_nrows * 4);
            device_reorder_vec(solid_nrows, d_x1_2, tmp_rhs, d_map_o2n, 6);
            if (prec_type == 0 && wait_cnt <= 0) {// 有现成的逆矩阵
            // if (prec_type == 0) {
                assert(invAs);
                double* inv_sol = tmp_rhs + 2 * solid_nrows;
                // lda == solid_nrows
                // CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, solid_nrows, solid_nrows, &dc_f64[1], invAs, solid_nrows, tmp_rhs, 1, &dc_f64[0], inv_sol, 1));
                
                // lda == (capacity << 1)
                CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, solid_nrows, solid_nrows, &dc_f64[1], invAs, capacity << 1, tmp_rhs, 1, &dc_f64[0], inv_sol, 1));                
                device_reorder_vec(solid_nrows, inv_sol, d_x1_2, d_map_n2o, 6);

            } else {// 普通的前代回代
                CUBLAS_CHECK(cublasDtrsv(blas_handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT    , solid_nrows, last_mem, capacity << 1, tmp_rhs, 1));
                CUBLAS_CHECK(cublasDtrsv(blas_handle, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, solid_nrows, last_mem, capacity << 1, tmp_rhs, 1));
                device_reorder_vec(solid_nrows, tmp_rhs, d_x1_2, d_map_n2o, 6);
            }
        } else if (solver_type == 1033) {
            CUDA_CHECK(cudaStreamSynchronize(streams[7]));// 等待流7拷贝完GPU上的map
            double* tmp_rhs = (double*) check_device_workspace_to_enlarge(sizeof(*tmp_rhs) * solid_nrows * 4);
            device_reorder_vec(solid_nrows, d_x1_2, tmp_rhs, d_map_o2n, 6);
            assert(invAs);
            double* inv_sol = tmp_rhs + 2 * solid_nrows;
            CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, solid_nrows, solid_nrows, &dc_f64[1], invAs, capacity << 1, tmp_rhs, 1, &dc_f64[0], inv_sol, 1));                
            device_reorder_vec(solid_nrows, inv_sol, d_x1_2, d_map_n2o, 6);
        } else if (solver_type == 1000) {
            CUBLAS_CHECK(cublasDtrsv(blas_handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT    , solid_nrows, last_mem, solid_nrows, d_x1_2, 1));
            CUBLAS_CHECK(cublasDtrsv(blas_handle, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, solid_nrows, last_mem, solid_nrows, d_x1_2, 1));
        } else assert(false);

        CUBLAS_CHECK(cublasSetStream(blas_handle, NULL));// 恢复默认流
#ifdef __SYNC
        CUDA_CHECK(cudaStreamSynchronize(streams[6]));
#endif
    }
    else assert(false);

#ifdef CHECK_RESI
    cblas_dgemv(CblasColMajor, CblasNoTrans, solid_nrows, solid_nrows, -1.0, solid_A, solid_nrows, x1_2, 1, 1.0, resi, 1);// resi <- b-A*x
    double r_norm = cblas_dnrm2(solid_nrows, resi, 1);
    if (r_norm / b_norm >= 1e-12) {
        printf("Error too large!!! ||r||_2/||b||_2 = %.15e  (solid_nrows %d ||r||_2: %.15e,  ||b||_2: %.15e)\n",
            r_norm / b_norm, solid_nrows, r_norm, b_norm);
        exit(-1);
    }
#endif
#ifdef TICK_TOCK
    auto tick3 = std::chrono::high_resolution_clock::now();
#endif
    // 计算第二批GEMV
    if (solver_type >= 1000) {
        CUDA_CHECK(cudaStreamSynchronize(streams[6]));// 等待流6完成TRSV操作，得到有效的d_x1_2结果
        CUDA_CHECK(cudaMemcpyAsync(x1_2, d_x1_2, sizeof(*d_x1_2) * solid_nrows, cudaMemcpyDeviceToHost, streams[6]));// 将x1和x2拷回去

        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[2]));
        CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], d_A31, capacity, d_x1_2              , 1, &dc_f64[0], d_p1, 1));
        CUBLAS_CHECK(cublasSetStream(blas_handle, streams[3]));
        CUBLAS_CHECK(cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], d_A32, capacity, d_x1_2 + fluid_nrows, 1, &dc_f64[0], d_p2, 1));

        CUDA_CHECK(cudaStreamSynchronize(streams[4]));// 等待流4完成GEMV: A33*x3 (存于d_p3)
        CUBLAS_CHECK(cublasDaxpy(blas_handle, fluid_nrows, &dc_f64[1], d_p2, 1, d_p3, 1));// 将d_p1, d_p2, d_p3累加起来
        CUDA_CHECK(cudaStreamSynchronize(streams[2]));// 等待流2完成GEMV: A31*x1
        CUBLAS_CHECK(cublasDaxpy(blas_handle, fluid_nrows, &dc_f64[1], d_p1, 1, d_p3, 1));
        CUBLAS_CHECK(cublasSetStream(blas_handle, NULL));// 恢复默认流
#ifdef __SYNC
        CUDA_CHECK(cudaStreamSynchronize(streams[3]));
#endif
    } else {
        cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, 1.0, h_A31, capacity, x1_2              , 1, 1.0, b3, 1);// b3 <- b3 + A31*x1
        cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, 1.0, h_A32, capacity, x1_2 + fluid_nrows, 1, 1.0, b3, 1);// b3 <- b3 + A32*x2
    
        // cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, 1.0, h_A31, capacity, x1_2              , 1, 0.0, p1, 1);
        // cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows, 1.0, h_A32, capacity, x1_2 + fluid_nrows, 1, 0.0, p2, 1);
        // #pragma omp parallel for schedule(static) num_threads(gnt)
        // for (int i = 0; i < fluid_nrows; i++) b3[i] = p1[i] + p2[i] + p3[i];
    }
#ifdef TICK_TOCK
    auto tick4 = std::chrono::high_resolution_clock::now();
#endif
    if (solver_type >= 1000) {
        CUDA_CHECK(cudaMemcpyAsync(b3, d_p3, sizeof(*d_p3) * fluid_nrows, cudaMemcpyDeviceToHost, streams[3]));
        CUDA_CHECK(cudaStreamSynchronize(streams[6]));// 等待流6拷贝完x1, x2
        CUDA_CHECK(cudaStreamSynchronize(streams[3]));// 等待流3拷贝完b3
    }
#ifdef TICK_TOCK
    auto tick5 = std::chrono::high_resolution_clock::now();
    t_trans += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick0 - tickneg_1)).count() * 1e-9);
    t_gemv  += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick1 - tick0)).count() * 1e-9);
    t_setup += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick2 - tick1)).count() * 1e-9);
    t_trsv  += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick3 - tick2)).count() * 1e-9);
    t_gemv  += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick4 - tick3)).count() * 1e-9);
    t_trans += ((double)(std::chrono::duration_cast<std::chrono::nanoseconds>(tick5 - tick4)).count() * 1e-9);
    if (fluid_nrows > last_nrows) {
        printf("curr fluid_nrows %d call %d fluid_lda %d T_setup %.5f T_trsv %.5f T_gemv %.5f T_trans %.5f\n",
            fluid_nrows, invoked_cnt, fluid_lda, t_setup, t_trsv, t_gemv, t_trans);
    }
#endif
    if (fluid_nrows > last_nrows && solver_type == 1022 && prec_type == 0) {
        device_extend_invLU_and_invA(invLU, invAs, last_nrows << 1, capacity << 1, last_mem, solid_nrows, capacity << 1,
				streams[8], streams[9], streams[10], streams[11], blas_handle);
    }

    last_nrows = fluid_nrows;// 更新
    invoked_cnt ++;
    wait_cnt --;
    // delete[] p1; delete[] p2; delete[] p3;

    set_num_threads(1);// 返回时修改线程数
}

void FracSolver::gemv(const int fluid_nrows, const int fluid_lda,
                const double* A, const double* x, double* y, int* x_start, int* x_end)
{
    // int id = gemv_cnt % 5;
    // if (fluid_nrows > mat_capa[id]) {
    //     if (solver_type >= 1000) {
    //         device_expand_mat(mat_arrs[id], mat_capa[id], mat_size[id], mat_capa[id] << 1, id);
    //     } else {
    //         host_expand_mat(mat_arrs[id], mat_capa[id], mat_size[id], mat_capa[id] << 1);
    //     }
    //     mat_capa[id] <<= 1;
    // }
    // if (fluid_nrows > mat_size[id]) {
    //     if (solver_type >= 1000) {
    //         device_update_mat(mat_arrs[id], mat_capa[id], A, fluid_lda, mat_size[id], fluid_nrows, id, hbuf[id]);
    //     } else {
    //         host_update_mat(mat_arrs[id], mat_capa[id], A, fluid_lda, mat_size[id], fluid_nrows);
    //     }
    //     mat_size[id] = fluid_nrows;
    // }
    // if (solver_type >= 1000) {
    //     // double* rhs[3] = {d_x}
    //     cudaMemcpyAsync(d_p1, x + *x_start - 1, fluid_nrows * sizeof(double), cudaMemcpyHostToDevice, streams[id]);
    //     cublasDgemv(blas_handle, CUBLAS_OP_N, fluid_nrows, fluid_nrows, &dc_f64[1], mat_arrs[id], mat_capa[id], d_p1, )
    // } else {
    //     cblas_dgemv(CblasColMajor, CblasNoTrans, fluid_nrows, fluid_nrows,  1.0, mat_arrs[id], mat_capa[id],
    //         x + *x_start - 1, 1, 0.0, y, 1);
    // }

    // gemv_cnt ++;
}

void deep_copy(const FracSolver * old_solver, FracSolver * new_solver)
{
    assert(new_solver->solver_type >= 1000 && old_solver->solver_type < 1000);
    // 从CPU到GPU拷贝
    // printf("DC ???\n");
    int capa = new_solver->capacity;
    CUDA_CHECK(cudaMemcpy(new_solver->d_A13, old_solver->h_A13, sizeof(double) * capa * capa, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(new_solver->d_A23, old_solver->h_A23, sizeof(double) * capa * capa, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(new_solver->d_A33, old_solver->h_A33, sizeof(double) * capa * capa, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(new_solver->d_A31, old_solver->h_A31, sizeof(double) * capa * capa, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(new_solver->d_A32, old_solver->h_A32, sizeof(double) * capa * capa, cudaMemcpyHostToDevice));
    // CUDA_CHECK(cudaMemcpy(new_solver->d_A13, old_solver->h_A13, sizeof(double) * (capa << 1) * (capa << 1), cudaMemcpyHostToDevice));
    // CUDA_CHECK(cudaMemcpy(new_solver->d_As , old_solver->h_As sizeof(*d_As ) * (capacity << 1) * (capacity << 1) ));
    // printf("DC 000\n");
    if (new_solver->solver_type == 1022) {
        CUDA_CHECK(cudaMemcpy(new_solver->last_mem, old_solver->last_mem, sizeof(double) * (capa << 1) * (capa << 1), cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(new_solver->invAs, old_solver->invAs, sizeof(double) * (capa << 1) * (capa << 1), cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(new_solver->invLU, old_solver->invLU, sizeof(double) * (capa << 1) * (capa << 1), cudaMemcpyHostToDevice));
    } else if (new_solver->solver_type == 1033) {
        CUDA_CHECK(cudaMemcpy(new_solver->invAs, old_solver->invAs, sizeof(double) * (capa << 1) * (capa << 1), cudaMemcpyHostToDevice));
    }
    new_solver->last_nrows = old_solver->last_nrows;
    new_solver->prev_hfs = old_solver->prev_hfs;

    // printf("DC 111\n");
    // 剩下的d_As在CPU侧没有存，
}