#include "box_ILU.hpp"
#include "scal_kernels.hpp"
#include "csr_matop.hpp"

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
BoxILU<idx_t, data_t, setup_t, calc_t, dof>::BoxILU(idx_t _type) : Solver<idx_t, setup_t, calc_t, dof>(), ilu_type(_type)
{
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
BoxILU<idx_t, data_t, setup_t, calc_t, dof>::~BoxILU()
{
    if (own_A) { delete A_calc; A_calc = nullptr; }
    delete [] D_inv;
    delete [] TDG_Ls; delete [] TDG_Us;
    delete [] aux_array;
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::truncate() {}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::SetOperator(const Operator<idx_t, setup_t, dof> & op)
{
    const par_CSRMatrix<idx_t, setup_t, setup_t, dof> & par_A = (const par_CSRMatrix<idx_t, setup_t, setup_t, dof> &) op;
    int my_pid; MPI_Comm_rank(par_A.comm, & my_pid);
    // if (my_pid == 0) printf("Setup BILU type %d #dof %d wgt %.2f maxfill %d droptol %.3e\n", ilu_type, dof, this->weight, maxfil, droptol);
    assert(this->setup_called == false);
    this->setup_time = - wall_time();

    if constexpr (sizeof(setup_t) == sizeof(calc_t) && sizeof(setup_t) == sizeof(data_t)) {
        own_A = false;
        A_calc = & par_A;
    }
    else {
        own_A = true;
        par_CSRMatrix<idx_t, data_t, calc_t, dof> * A_copy = new par_CSRMatrix<idx_t, data_t, calc_t, dof>
            (par_A.comm, par_A.rows_partition, par_A.cols_partition);
        // 拷贝数据，但不拷贝通信包
        A_copy->commpkg           = par_A.commpkg;
        A_copy->commpkg_transpose = par_A.commpkg_transpose;
        A_copy->col_map_offd      = par_A.col_map_offd;
        auto copy_trunc = [](const CSRMatrix<idx_t, setup_t, setup_t, dof> & src, CSRMatrix<idx_t, data_t, calc_t, dof> & dst) {
            dst.nrows = src.nrows;
            dst.ncols = src.ncols;
            dst.nnz   = src.nnz;
            dst.alloc_mem(false);

            #pragma omp parallel
            {
                #pragma omp for schedule(static) nowait
                for (idx_t i = 0; i <= dst.nrows; i++) {
                    dst.row_ptr[i] = src.row_ptr[i];
                }
                #pragma omp for schedule(static) nowait
                for (idx_t j = 0; j < dst.nnz; j++) {
                    dst.col_idx[j] = src.col_idx[j];
                }
                #pragma omp for schedule(static) nowait
                for (idx_t j = 0; j < dst.nnz * (long long) dof*dof; j++) {
                    dst.vals   [j] = src.vals   [j];
                }
            }
        };
        copy_trunc(par_A.diag, A_copy->diag);
        copy_trunc(par_A.offd, A_copy->offd);

        A_calc = A_copy;
    }
    
    droptol = 1.0e-3;
    // 然后对对角块做ILU分解
    if (ilu_type == 0) {
        factorize_lev0(par_A.diag);
    } else if (ilu_type == 1) {
        factorize_threshold(par_A.diag);

    } else assert(false);

    const idx_t A_nnz = par_A.diag.nnz,
                ILU_nnz = L.nnz + U.nnz + par_A.diag.nrows;
#if 1
    long long loc_nnzs[2] = {A_nnz, ILU_nnz};
    long long glb_nnzs[2] = { 0, 0 };
    MPI_Reduce(loc_nnzs, glb_nnzs, 2, MPI_LONG_LONG, MPI_SUM, 0, par_A.comm);
    const double cplx = (double)glb_nnzs[1]/(double)glb_nnzs[0];
    // 统计一下各进程的负载差异
    // idx_t loc_nnz = ILU_nnz, min_nnz, max_nnz;
    // MPI_Reduce(& loc_nnz, & min_nnz, 1, MPI_INT, MPI_MIN, 0, par_A.comm);
    // MPI_Reduce(& loc_nnz, & max_nnz, 1, MPI_INT, MPI_MAX, 0, par_A.comm);
    // if (my_pid == 0) printf("BILU min %d max %d min/max %.4f\n", min_nnz, max_nnz, (double)min_nnz/(double)max_nnz);
#else
    const double cplx = ILU_nnz / A_nnz;
#endif
    if (my_pid == 0) printf("Setup BILU type %d wgt %.2f mf %d drpt %.3e cmplx %.4e\n", ilu_type, this->weight, maxfil, droptol, cplx);
    // if (my_pid == 0) printf("Proc %d ILU complexity %.4e\n", my_pid, (double)ILU_nnz/(double)A_nnz);

    SetExecNumThreads(omp_get_max_threads());
    if (nthreads > 1) {
        TDG_Ls = build_TDG(L, 'L');
        // printf("Proc %d Done L analysis\n", my_pid);
        TDG_Us = build_TDG(U, 'U');
        // printf("Proc %d Done U analysis\n", my_pid);
    }
    aux_array = new calc_t [L.nrows * dof];

    this->setup_time += wall_time();
    this->setup_called = true;
}

#include <queue>
template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
TaskDepGraph<idx_t> * BoxILU<idx_t, data_t, setup_t, calc_t, dof>::build_TDG(CSRMatrix<idx_t, data_t, calc_t, dof> & tri_mat,
    const char tri_type)
{
    constexpr int e_size = dof*dof;
    CSRMatrix<idx_t, data_t, calc_t, 1> A;// 原CSR矩阵中的每个非零元代表入边
    A.nrows = tri_mat.nrows;
    A.ncols = tri_mat.ncols;
    A.nnz   = tri_mat.nnz;
    A.row_ptr = tri_mat.row_ptr;
    A.col_idx = tri_mat.col_idx;
    A.vals    = new data_t [A.nnz];// 不需要用到，简单占个位，方便调用转置函数

    // std::vector<double> tics;
    // tics.push_back(wall_time());

    CSRMatrix<idx_t, data_t, calc_t, 1> AT;// 转置后的CSR矩阵中的每个非零元代表入边
    CSRMatrixTranspose(A, AT);
    assert(A.nrows == AT.nrows);
    const idx_t nrows = A.nrows;

    int my_pid; MPI_Comm_rank(A_calc->comm, & my_pid);
    // if (my_pid == 0 && tri_type == 'L') {
    //     for (idx_t i = 0; i < nrows; i ++) {
    //         printf("%d : ", i);
    //         for (idx_t p = A.row_ptr[i]; p < A.row_ptr[i + 1]; p ++)
    //             printf("%d ", A.col_idx[p]);
    //         printf("\n");
    //     }
    // }

    // tics.push_back(wall_time());

    struct TASK {
        idx_t id = -1;// 任务的行号
        idx_t lev = -1;// 任务属于哪一层
        bool operator < (const TASK & b) const {
            if (lev == b.lev) return id < b.id;
            else return lev < b.lev;
        }
    };
    TASK * tasks = new TASK [nrows];// 记录每一行（task）处在哪一层上
#if 1 // 利用三角矩阵的特性加快确定层号
    auto find_levels = [& AT, & tasks] (const idx_t ibeg, const idx_t iend, const idx_t istep) {
        for (idx_t i = ibeg; i != iend; i += istep) {
            tasks[i].id = i;
            if (tasks[i].lev == -1) // 之前没被人发现过，那就是无入边
                tasks[i].lev = 0;
            const idx_t my_lev = tasks[i].lev;
            assert(my_lev >= 0);
            for (idx_t p = AT.row_ptr[i]; p < AT.row_ptr[i + 1]; p ++) {// 遍历每一条出边
                const idx_t j = AT.col_idx[p];
                if (j == i) continue;// 跳过自己
                const idx_t prev_lev = tasks[j].lev;// 之前的层号，有可能比目前的大
                tasks[j].lev = std::max(my_lev + 1, prev_lev);
            }
        }
    };
    if      (tri_type == 'L') {// 下三角矩阵（L）
        assert ((A.row_ptr[0] == A.row_ptr[1]) ||
                (A.row_ptr[0] == A.row_ptr[1] - 1 && A.col_idx[A.row_ptr[0]] == 0));
        find_levels(0, nrows, 1);// 从上往下扫
    } 
    else if (tri_type == 'U') {// 上三角矩阵（U）
        assert ((A.row_ptr[nrows - 1] == A.row_ptr[nrows]) ||
                (A.row_ptr[nrows - 1] == A.row_ptr[nrows] - 1 && A.col_idx[A.row_ptr[nrows - 1]] == nrows - 1));
        find_levels(nrows - 1, -1, -1);// 从下往上扫
    } else assert(false);
#else // 用拓扑排序确定层号（很慢）
    std::queue<int> Q;
    for (int i = 0; i < nrows; i++) {
        tasks[i].id = i;
        if ( A.row_ptr[i] == A.row_ptr[i + 1] ||// 这一行没有入度边
            (A.row_ptr[i] + 1 == A.row_ptr[i + 1] && A.col_idx[A.row_ptr[i]] == i)) {// 或者入度边是自己
            tasks[i].lev = 0;
            Q.push(i);
        }
    }
    // 此时Q内为入度为0的点
    assert(Q.empty() == false);
    // 确定各行（任务）所属的层
    while (Q.empty() == false) {
        const int i = Q.front();
        const int i_level = tasks[i].lev; assert(i == tasks[i].id && i_level >= 0);
        Q.pop();

        for (int p = AT.row_ptr[i]; p < AT.row_ptr[i + 1]; p ++) {// 遍历每一条出边
            int j = AT.col_idx[p];
            if (j != i && // 不能是自己
                tasks[j].lev <= i_level) {// 之前确定的level比实际偏小了
                tasks[j].lev  = i_level + 1;// 更新level
                Q.push(j);
            }
        }
    }
#endif
    // if (my_pid == 0 && tri_type == 'L') {
    //     for (idx_t i = 0; i < nrows; i++) {
    //         printf("%d %d\n", tasks[i].id, tasks[i].lev);
    //     }
    // }
    // MPI_Barrier(A_calc->comm);
    // char filename [1024];
    // sprintf(filename, "Proc%d.%c", my_pid, tri_type);
    // FILE * fp = fopen(filename, "w+");
    // for (idx_t i = 0; i < nrows; i++) {
    //     fprintf(fp, "%d %d\n", tasks[i].id, tasks[i].lev);
    // }
    // fclose(fp);
    // tics.push_back(wall_time());

    // 对任务进行排序：层号小的在前，每一层内点号小的在前
    std::sort(tasks, tasks + nrows);
    // tics.push_back(wall_time());
    assert(tasks[0].lev == 0);
    const int num_levels = tasks[nrows - 1].lev + 1;// 总共的层数
    idx_t * task_rpt = new idx_t [num_levels + 1];
    task_rpt[0] = 0;
    for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
        idx_t ptr = task_rpt[ilev];
        assert(tasks[ptr].lev == ilev);
        while (ptr < nrows && tasks[ptr].lev == ilev) ptr ++;
        if (ptr < nrows) assert(tasks[ptr].lev == ilev + 1);
        task_rpt[ilev + 1] = ptr;
    } assert(task_rpt[num_levels] == nrows);
    // tics.push_back(wall_time());

    // printf("num lev %d nthreads %d\n", num_levels, nthreads);

    // coarsen: 根据线程数做静态负载分配，形成 super-task
    // idx_t SPcnts_lev_thread [num_levels][nthreads + 1];// 记录每一层、每一线程所负责的super-task所包含的task数
    std::vector<std::vector<idx_t> > SPcnts_lev_thread(num_levels, std::vector<idx_t>(nthreads + 1));// 记录每一层、每一线程所负责的super-task所包含的task数
    #pragma omp parallel for schedule(static)
    for (idx_t ilev = 0; ilev < num_levels; ilev ++) {// 可多线程naive并行
        const idx_t tot_ntasks = task_rpt[ilev + 1] - task_rpt[ilev];
        if (false) {
// #if 0 // 按非零元均分
            idx_t nnz_offset [tot_ntasks + 1];
            nnz_offset[0] = 0;
            for (idx_t tkp = 0; tkp < tot_ntasks; tkp ++) {// tkp: task ptr
                const idx_t i = tasks[task_rpt[ilev] + tkp].id;// task ID
                nnz_offset[tkp + 1] = nnz_offset[tkp] + A.row_ptr[i + 1] - A.row_ptr[i];
            }
            const idx_t tot_nnz = nnz_offset[tot_ntasks];
            idx_t avg_nnz = tot_nnz / nthreads;
            idx_t rem_nnz = tot_nnz - nthreads * avg_nnz;
            SPcnts_lev_thread[ilev][0] = 0;
            idx_t cnt = 0;
            for (int t = 0; t < nthreads; t ++) {
                const idx_t last_cnt = cnt;
                const idx_t thread_nnz = (t < rem_nnz) ? (avg_nnz + 1) : avg_nnz;
                while (cnt < tot_ntasks && nnz_offset[cnt] - nnz_offset[last_cnt] < thread_nnz) cnt ++;
                SPcnts_lev_thread[ilev][t + 1] = cnt;
            }
            // printf("ilev %d tot_ntasks %d tot_nnz %d SPcnts %d %d %d\n", ilev, tot_ntasks, tot_nnz,
            //     SPcnts_lev_thread[ilev][0], SPcnts_lev_thread[ilev][1], SPcnts_lev_thread[ilev][2]);
            assert(SPcnts_lev_thread[ilev][nthreads] == tot_ntasks);
        }
        else {
// #else // 按行数均分
            idx_t avg = tot_ntasks / nthreads;// 线程均分的数目
            idx_t rem = tot_ntasks - nthreads * avg;// 均分后剩余的数目
            SPcnts_lev_thread[ilev][0] = 0;
            for (int t = 0; t < nthreads; t ++) {
                const idx_t cnt = (t < rem) ? (avg + 1) : avg;
                SPcnts_lev_thread[ilev][t + 1] = SPcnts_lev_thread[ilev][t] + cnt;
            } assert(SPcnts_lev_thread[ilev][nthreads] == tot_ntasks);
// #endif
        }
    }
    struct LevelThread {
        idx_t lev;
        int thread;
    };
    LevelThread * map_r2lt = new LevelThread [nrows];// map row ID to level ID and thread ID
    #pragma omp parallel for schedule(static)
    for (idx_t ilev = 0; ilev < num_levels; ilev ++) {// 可多线程naive并行
        int tid = 0;
        for (idx_t p = task_rpt[ilev]; p < task_rpt[ilev + 1]; p ++) {
            if (SPcnts_lev_thread[ilev][tid + 1] <= p - task_rpt[ilev]) tid ++;// 检查是否已到下一线程的负责范围
            const TASK & tk = tasks[p]; assert(tk.lev == ilev);
            LevelThread dst;
            dst.lev = ilev;
            dst.thread = tid;
            map_r2lt[tk.id] = dst;
        }
    }
    // tics.push_back(wall_time());

    // 构建TDG时消去intra-thread依赖和共同祖先依赖
    for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
        for (idx_t p = task_rpt[ilev]; p < task_rpt[ilev + 1]; p ++) {
            const TASK & tk = tasks[p]; assert(tk.lev == ilev);
        }
    }
    // 分配每个线程的TDG元数据
    TaskDepGraph<idx_t> * TDG_rets = new TaskDepGraph<idx_t> [nthreads];
    std::unordered_set< idx_t > * records = new std::unordered_set<idx_t> [num_levels * nthreads];// 外维为层，内维为线程，key值定义为层号*线程数+线程号
    idx_t twspace [nthreads * 2];
#define __MT_REORDER
#ifdef __MT_REORDER
    idx_t * reor_rpt = new idx_t [nrows + 1]; reor_rpt[0] = 0;
    idx_t * reor_cid = new idx_t [A.nnz];// 重排后的列序号
    data_t* reor_val = new data_t[A.nnz * e_size];
    data_t* reor_D_inv = (tri_type == 'U' && D_inv) ? // 处理的是上三角矩阵（U）且有分离主对角线
        (new data_t[nrows * e_size]) : nullptr;
#endif
    #pragma omp parallel
    {
        int nt  = omp_get_num_threads(); assert(nt == nthreads);
        int tid = omp_get_thread_num();
        TaskDepGraph<idx_t> & TDG = TDG_rets[tid];
        TDG.num_levs = num_levels;
        TDG.dep_rpt = new idx_t [num_levels + 1]; TDG.dep_rpt[0] = 0;
        TDG.rid_rpt = new idx_t [num_levels + 1]; TDG.rid_rpt[0] = 0;
        // 统计每一层有多少个任务数
        for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
            TDG.rid_rpt[ilev + 1] = TDG.rid_rpt[ilev] +
                SPcnts_lev_thread[ilev][tid + 1] - SPcnts_lev_thread[ilev][tid];
        }
        TDG.num_rids = TDG.rid_rpt[num_levels];
        TDG.rids = new idx_t [TDG.num_rids];
        idx_t my_tot_nnz = 0;
        // 记录本线程负责的行号，并记录super-task之间的依赖关系，在此时就已剔除intra-thread依赖和共同祖先依赖
        for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
            const idx_t lev_off = task_rpt[ilev];
            const idx_t tid_off = SPcnts_lev_thread[ilev][tid];
            const idx_t cnt = SPcnts_lev_thread[ilev][tid + 1] - tid_off;
            std::unordered_set<idx_t> & dep_record = records[ilev * nthreads + tid];
            for (idx_t j = 0; j < cnt; j ++) {
                const idx_t rid = tasks[lev_off + tid_off + j].id;
                TDG.rids[TDG.rid_rpt[ilev] + j] = rid;
                my_tot_nnz +=  A.row_ptr[rid + 1] - A.row_ptr[rid];
                for (idx_t p = A.row_ptr[rid]; p < A.row_ptr[rid + 1]; p ++) {
                    const idx_t cid = A.col_idx[p];// 列序号，即所依赖的上游任务
                    if (cid == rid) continue;
                    SuperTaskDep<idx_t> dep;
                    dep.dep_lev = map_r2lt[cid].lev; assert(dep.dep_lev < ilev);
                    dep.dep_tid = map_r2lt[cid].thread;
                    // 剔除intra-thread依赖
                    if (dep.dep_tid == tid) continue;
                    // 剔除共同祖先依赖（即之前已经有别的task添加过了）
                    const idx_t key = dep.dep_lev * nthreads + dep.dep_tid;// 以 dep_lev*nthreads + dep_tid为哈希的key
                    if (dep_record.find(key) == dep_record.end())
                        dep_record.emplace(key);
                }
            }
        }
        // 重排矩阵：使得每个线程访问的矩阵行是连续的
        twspace[tid*2  ] = TDG.num_rids;// 通知别的线程，我负责的行数
        twspace[tid*2+1] = my_tot_nnz;// 通知别的线程，我负责的行里所包含的非零元数
        #pragma omp barrier
        TDG.reor_off = 0;
        idx_t nnz_off = 0;
        for (int pt = 0; pt < tid; pt ++) {
            TDG.reor_off += twspace[pt*2  ];// 累加前面线程的行数之和
            nnz_off      += twspace[pt*2+1];// 累加前面线程的非零元数之和
        }

#ifdef __MT_REORDER
        // 填入重排后的矩阵数据
        const idx_t rec_nnz_off = nnz_off;
        for (idx_t new_i = TDG.reor_off; new_i < TDG.reor_off + TDG.num_rids; new_i ++) {
            const idx_t old_i = TDG.rids[new_i - TDG.reor_off];// 重排后新位置所对应的旧行号
            const idx_t old_i_nnz = tri_mat.row_ptr[old_i + 1] - tri_mat.row_ptr[old_i];
            reor_rpt[new_i + 1] = nnz_off + old_i_nnz;
            for (idx_t new_p  = nnz_off       ; new_p  < reor_rpt[new_i + 1]       ; new_p  ++)
                reor_cid[new_p ] = tri_mat.col_idx[tri_mat.row_ptr[old_i]        + new_p  - nnz_off       ];
            for (idx_t new_pf = nnz_off*e_size; new_pf < reor_rpt[new_i + 1]*e_size; new_pf ++)
                reor_val[new_pf] = tri_mat.vals   [tri_mat.row_ptr[old_i]*e_size + new_pf - nnz_off*e_size];
            nnz_off += old_i_nnz;
            if (tri_type == 'U' && D_inv) {
                for (int f = 0; f < e_size; f ++) reor_D_inv[new_i*e_size + f] = D_inv[old_i*e_size + f];
            }
        }
        #pragma omp barrier
        assert(rec_nnz_off == reor_rpt[TDG.reor_off]);
        if (tid == 0) {
            delete [] tri_mat.row_ptr; tri_mat.row_ptr = reor_rpt;
            delete [] tri_mat.col_idx; tri_mat.col_idx = reor_cid;
            delete [] tri_mat.vals   ; tri_mat.vals    = reor_val;
            if (tri_type == 'U' && D_inv) { delete [] D_inv; D_inv = reor_D_inv; }
        }
#endif
        // 各进程独立地删去一些不必要的依赖（一）
        // k <-- j    如果i和j是属于同一个线程的不同层的任务，那么由于在形成super-task的时候就已剔除intra-thread依赖和共同祖先依赖
        // ^          所以无法保留j和i之间的依赖，导致k会重复依赖，实际上只需要依赖i和j中层号较大的即可
        // |-----i
        // 删去目标为我给定的某一层，来源为同一线程、不同层的依赖
        //           myself    other                myself    other
        // L_{i-3}                .   因为潜藏的                 .
        //                        |  intra-thread依赖              
        // L_{i-2}               .|   实际只需保留              .
        //                       ||     =====>                  
        // L_{i-1}              .||                           .
        //                      |||                           |
        // L_{i  }     . <---------                  . <-------
        for (idx_t ilev = num_levels - 1; ilev >= 0; ilev --) {
            std::unordered_set<idx_t> & k_record = records[ilev * nthreads + tid];
            std::unordered_map<int, idx_t> map_t2l;// map thread ID to level ID
            for (auto kp = k_record.begin(); kp != k_record.end(); kp ++) {// 检查该层super-task的每一个依赖kp
                const idx_t dep_lev = *kp / nthreads;
                const int   dep_tid = *kp - nthreads * dep_lev;
                if (map_t2l.find(dep_tid) == map_t2l.end()) map_t2l.emplace(dep_tid, dep_lev);
                else {
                    const idx_t old_lev = map_t2l[dep_tid];
                    map_t2l[dep_tid] = std::max(old_lev, dep_lev);
                }
            }
            k_record.clear();
            for (auto rp = map_t2l.begin(); rp != map_t2l.end(); rp ++) {
                const int   dep_tid = rp->first;
                const idx_t dep_lev = rp->second;
                k_record.emplace(dep_lev * nthreads + dep_tid);
            }
        }
        // 各进程独立地删去一些不必要的依赖（二）
        // 删去来源为同一线程同一层，但目标在我的不同层的依赖
        //           myself    other                myself    other
        // L_{i-3}               .   因为潜藏的                  .  
        //                       |  intra-thread依赖            |  
        // L_{i-2}     . <-------|   实际只需保留      . <-------- 
        //                       |     =====>            
        // L_{i-1}     . <-------|                    .  
        //                       |                       
        // L_{i  }     . <--------                    .  
        std::unordered_map<idx_t, idx_t> map_rec2l;// map record to level ID
        for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
            std::unordered_set<idx_t> & k_record = records[ilev * nthreads + tid];
            for (auto kp = k_record.begin(); kp != k_record.end(); ) {// 检查该层super-task的每一个依赖kp
                if (map_rec2l.find(*kp) == map_rec2l.end()) {
                    map_rec2l.emplace(*kp, ilev);
                    kp ++;
                } else { assert(map_rec2l[*kp] < ilev);
                    k_record.erase(kp ++);// 在执行erase的时候，iterator已经被释放无效了，此时通过该iterator无法链接到下一个元素了
                }
            }
        }
#if 1
        // 剔除2-hop依赖：我的依赖的依赖，不是我的依赖？？
        // m <-- k <-- j <-- i        m <-- k <-- j <-- i
        // ^     ^           |    =>
        // |     |-----------|   最佳
        // |-----------------|
        // 如果先检查k，发现它的依赖j有一依赖i，同时也只自己的依赖，故删去 k<--i 的边；再检查m，则不会知道它的依赖k也依赖i，因为刚刚被删去了
        // 所以应当先检查层号大的，逐步向前排查
#if 1 // 各线程分别检视自己的依赖
        for (idx_t ilev = num_levels - 1; ilev >= 0; ilev --) {// 从后往前删除
            std::vector<idx_t> dele_buf;// buf to erase
            #pragma omp barrier // 线程间同步等待对records的修改更新
            std::unordered_set<idx_t> & k_record = records[ilev * nthreads + tid];
            for (auto kp = k_record.begin(); kp != k_record.end(); kp ++) {// 检查k的每一个依赖kp，记为j
                assert(*kp < ilev * nthreads);// 键值应当保证依赖的层号小于当前处理的层号
                const std::unordered_set<idx_t> & j_record = records[*kp];// j的依赖
                for (auto jp = j_record.begin(); jp != j_record.end(); jp ++) {// 检查每一个j的依赖jp，记为i
                    if (k_record.find(*jp) != k_record.end()) {// k的依赖中确实也存在该i（依赖的依赖）
                        dele_buf.push_back(*jp);
                        break;
                    }
                }
                for (size_t ki = 0; ki < dele_buf.size(); ki ++) {// 从k中删去k<--i的依赖
                    // printf("Proc %d tid %d lev %d erase dep from tid %d lev %d\n", my_pid, tid, ilev,
                    //     dele_buf[ki]/nthreads, dele_buf[ki]-(dele_buf[ki]/nthreads));
                    k_record.erase(dele_buf[ki]);
                }
            }
        }
        
#else // 让线程0串行检视对所有线程的依赖
        #pragma omp barrier
        if (tid == 0) {
            std::vector<idx_t> dele_buf;// buf to erase
            for (idx_t ilev = num_levels - 1; ilev >= 0; ilev --) {// 从后往前删除
                for (int t = 0; t < nthreads; t ++) {
                    std::unordered_set<idx_t> & k_record = records[ilev * nthreads + t];
                    dele_buf.clear();
                    for (auto kp = k_record.begin(); kp != k_record.end(); kp ++) {// 检查k的每一个依赖kp，记为j
                        const std::unordered_set<idx_t> & j_record = records[*kp];// j的依赖
                        for (auto jp = j_record.begin(); jp != j_record.end(); jp ++) {// 检查每一个j的依赖jp，记为i
                            if (k_record.find(*jp) != k_record.end())// k的依赖中确实也存在该i（依赖的依赖）
                                dele_buf.push_back(*jp);
                        }
                    }
                    for (size_t ki = 0; ki < dele_buf.size(); ki ++) // 从k中删去k<--i的依赖
                        k_record.erase(dele_buf[ki]);
                }
            }
        }
        #pragma omp barrier
#endif
#endif
        for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
            const std::unordered_set<idx_t> & dep_record = records[ilev * nthreads + tid];
            TDG.dep_rpt[ilev + 1] = TDG.dep_rpt[ilev] + dep_record.size();
        }
        TDG.num_deps = TDG.dep_rpt[num_levels];
        TDG.deps = new SuperTaskDep<idx_t> [TDG.num_deps];
        for (idx_t ilev = 0; ilev < num_levels; ilev ++) {
            const idx_t ndeps = TDG.dep_rpt[ilev + 1] - TDG.dep_rpt[ilev];
            const std::unordered_set<idx_t> & dep_record = records[ilev * nthreads + tid]; 
            assert(ndeps == (idx_t) dep_record.size());
            idx_t cnt = 0;
            for (auto it = dep_record.begin(); it != dep_record.end(); it ++) {
                SuperTaskDep<idx_t> dep;
                dep.dep_lev = *it / nthreads;
                dep.dep_tid = *it - dep.dep_lev * nthreads;
                TDG.deps[TDG.dep_rpt[ilev] + cnt] = dep;
                cnt ++;
            } assert(cnt == ndeps);
            std::sort(TDG.deps + TDG.dep_rpt[ilev], TDG.deps + TDG.dep_rpt[ilev] + ndeps);
        }

    }// omp region
    // tics.push_back(wall_time());
    {// 检查线程负载的均衡性
        idx_t min_nnz = twspace[1], max_nnz = twspace[1];
        for (int t = 1; t < nthreads; t++) {
            min_nnz = std::min(min_nnz, twspace[t*2+1]);
            max_nnz = std::min(max_nnz, twspace[t*2+1]);
        }
        double ratio = (double)max_nnz/(double)min_nnz;
        int my_pid; MPI_Comm_rank(A_calc->comm, & my_pid);
        if (ratio > 1.1)
            printf("Warning !! Proc %d tw ratio %.4f\n", my_pid, ratio);
        // idx_t num_deps = 0;
        // for (int t = 0; t < nthreads; t++) num_deps += TDG_rets[t].num_deps;
        // printf("Proc %d #Dep %d #Super-task %d\n", my_pid, num_deps, nthreads * num_levels);
    }
    delete [] records;
    delete [] map_r2lt;
    delete [] task_rpt;
    delete [] tasks;

    // printf("A nrows %d nnz %d\n", nrows, tri_mat.nnz);
    // for (idx_t i = 0; i < (idx_t) tics.size() - 1; i++) {
    //     printf("phase %d %.5f\n", i, tics[i + 1] - tics[i]);
    // }
    
    A.row_ptr = nullptr;
    A.col_idx = nullptr;// 借用的内存不能释放
#ifdef DEBUG
    printf("Num levels %d\n", TDG_rets[0].num_levs);
    for (int t = 0; t < nthreads; t++) {
        printf("Thread %d\n", t);
        const TaskDepGraph<idx_t> & TDG = TDG_rets[t];
        for (idx_t ilev = 0; ilev < TDG.num_levs; ilev ++) {
            printf("    lev#%d ntasks %d ndeps %d\n", ilev,
                TDG.rid_rpt[ilev + 1] - TDG.rid_rpt[ilev],TDG.dep_rpt[ilev + 1] - TDG.dep_rpt[ilev]);
            printf("        Task : ");
            for (idx_t p = TDG.rid_rpt[ilev]; p < TDG.rid_rpt[ilev + 1]; p ++)
                printf("%d ", TDG.rids[p]);
            printf("\n");
            printf("        Dep  : ");
            for (idx_t p = TDG.dep_rpt[ilev]; p < TDG.dep_rpt[ilev + 1]; p ++)
                printf("(L%d,T%d) ", TDG.deps[p].dep_lev, TDG.deps[p].dep_tid);
            printf("\n");
        }
    }
    // MPI_Barrier(MPI_COMM_WORLD);
    // MPI_Abort(MPI_COMM_WORLD, -209);
#endif
    return TDG_rets;
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::SetExecNumThreads(int num_threads)
{
    nthreads = num_threads;
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::factorize_lev0(const CSRMatrix<idx_t, setup_t, setup_t, dof> & A)
{
    const idx_t nrows = A.nrows;
    L.nrows = nrows; L.ncols = A.ncols; L.row_ptr = new idx_t [nrows + 1]; L.row_ptr[0] = 0;
    U.nrows = nrows; U.ncols = A.ncols; U.row_ptr = new idx_t [nrows + 1]; U.row_ptr[0] = 0;

    // 统计非零元
    for (idx_t i = 0; i < nrows; i++) {
        for (auto p = A.row_ptr[i]; p < A.row_ptr[i + 1] - 1; p++) {// 检查升序
            if (A.col_idx[p] >= A.col_idx[p + 1]) printf("Error !! i %d : %d %d\n", i, A.col_idx[p], A.col_idx[p + 1]);
            assert(A.col_idx[p] < A.col_idx[p + 1]);
        }
        idx_t   lnz = 0,// 下三角非零元数
                rnz = 0;// 包含主对角元的上三角非零元数
        for (auto p = A.row_ptr[i]; p < A.row_ptr[i + 1]; p++) {
            if      (A.col_idx[p] < i) lnz ++;
            else if (A.col_idx[p] > i) rnz ++;
        }
        assert(lnz + rnz + 1 == A.row_ptr[i + 1] - A.row_ptr[i]);
        L.row_ptr[i + 1] = L.row_ptr[i] + lnz;
        U.row_ptr[i + 1] = U.row_ptr[i] + rnz;
    }
    L.nnz = L.row_ptr[nrows];
    U.nnz = U.row_ptr[nrows];

    constexpr int e_size = dof*dof;
    L.col_idx = new idx_t [L.row_ptr[nrows]];
    U.col_idx = new idx_t [U.row_ptr[nrows]];

    setup_t * L_vals = new setup_t [L.row_ptr[nrows] * e_size];
    setup_t * U_vals = new setup_t [U.row_ptr[nrows] * e_size];
    setup_t * D_vals = new setup_t [          nrows  * e_size];// inv D

    // 执行ILU分解过程
    for (idx_t i = 0; i < nrows; i++) {
        const idx_t nnz = A.row_ptr[i + 1] - A.row_ptr[i];// 第i行的非零元数目
        const idx_t offset = A.row_ptr[i];// 第i行的非零元在A矩阵中的起始偏移

        setup_t rbuf[nnz * e_size];// 拷入当前行的值
        const idx_t lnz = L.row_ptr[i + 1] - L.row_ptr[i];// 记录该行有多少个左非零元（位于对角线左侧）
        // const idx_t rnz = U.row_ptr[i + 1] - U.row_ptr[i];// 记录该行有多少个右非零元（包含对角元）

        for (idx_t p = 0; p < nnz * e_size; p++) {// 将该行的非零元拷入缓冲区准备计算
            rbuf[p] = A.vals[offset * e_size + p];
        }

        // 逐个消去左边的元素
        for (idx_t lp = 0; lp < lnz; lp++) {
            const idx_t lj = A.col_idx[lp + offset];// 欲消去的该非零元的列号
            if constexpr (dof == 1) {
                // rbuf[lp] /= D_vals[lj];
                rbuf[lp] *= D_vals[lj];
            } else { static_assert(dof > 1);
                setup_t tmp_buf [e_size];
                for (int f = 0; f < e_size; f++) tmp_buf[f] = rbuf[lp * e_size + f];
                // 注意这里的小矩阵相乘顺序 
                matmat_mla<setup_t, dof>(1.0, tmp_buf, D_vals + lj * e_size, 0.0, rbuf + lp * e_size);
            }
            const setup_t* u_vals = & U_vals   [ U.row_ptr[lj] * e_size];// 该列对应的那一行的非零元值（注意是之前已经分解过的U了）
            const idx_t  * u_cids = & U.col_idx[ U.row_ptr[lj]         ];// 该列对应的那一行的列索引（注意是之前已经分解过的U了）
            // 欲消该元会对该元右边元素产生影响
            for (idx_t rp = lp + 1; rp < nnz; rp++) {// 逐个遍历右非零元，将该影响累加上去
                const idx_t rj = A.col_idx[rp + offset];
                // 检查一下矩阵的第lj行是否有列为rj的非零元
                for (idx_t k = 0; k < U.row_ptr[lj + 1] - U.row_ptr[lj]; k++) {
                    if (u_cids[k] == rj) {// 如果有，就会产生累加
                        if constexpr (dof == 1) {
                            rbuf[rp] -= rbuf[lp] * u_vals[k];
                        } else { static_assert(dof > 1);
                            setup_t u_buf [e_size];
                            for (int f = 0; f < e_size; f++) u_buf[f] = u_vals[k * e_size + f];
                            // 注意小矩阵相乘的顺序
                            matmat_mla<setup_t, dof>(-1.0, rbuf + lp * e_size, u_buf,
                                1.0, rbuf + rp * e_size);
                        }   
                    }
                }// k loop
            }// rp loop
        }// lp loop
        // 将缓冲区的数据拷回到L和U中
        for (idx_t lp = 0; lp < lnz; lp++) {
            L.col_idx[L.row_ptr[i] + lp] = A.col_idx[offset + lp];
            for (int f = 0; f < e_size; f++)
                L_vals[(L.row_ptr[i] + lp) * e_size + f] = rbuf[lp * e_size + f];
        }

        matinv_row<setup_t, dof>(rbuf + lnz * e_size, D_vals + i * e_size);

        for (idx_t up = lnz + 1; up < nnz; up++) {
            U.col_idx[U.row_ptr[i] + up - lnz - 1] = A.col_idx[offset + up];
            for (int f = 0; f < e_size; f++)
                U_vals[(U.row_ptr[i] + up - lnz - 1) * e_size + f] = rbuf[up * e_size + f];
        }
    }

    if constexpr (sizeof(setup_t) == sizeof(data_t)) {
        L.vals = L_vals;
        U.vals = U_vals;
        D_inv  = D_vals;
    } else {
        L.vals = new data_t [L.row_ptr[nrows] * e_size];
        U.vals = new data_t [U.row_ptr[nrows] * e_size];
        D_inv  = new data_t [          nrows  * e_size];
        for (idx_t j = 0; j < L.row_ptr[nrows] * e_size; j++) {
            L.vals[j] = L_vals[j];
        }
        for (idx_t j = 0; j < U.row_ptr[nrows] * e_size; j++) {
            U.vals[j] = U_vals[j];
        }
        for (idx_t i = 0; i < nrows * e_size; i++) {
            D_inv [i] = D_vals[i]; 
        }
        delete [] L_vals; L_vals = nullptr;
        delete [] U_vals; U_vals = nullptr;
        delete [] D_vals; D_vals = nullptr;
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::factorize_threshold(const CSRMatrix<idx_t, setup_t, setup_t, dof> & A)
{
    constexpr int e_size = dof*dof;
    const idx_t nrows = A.nrows; assert(nrows == A.ncols);

    if (maxfil > nrows) maxfil = nrows;

    setup_t * D_vals = new setup_t [nrows * e_size];// 存放主对角元的逆

    idx_t * flag = new idx_t [nrows];
    for (idx_t i = 0; i < nrows; i ++) flag[i] = -1;

    setup_t * rbuf = new setup_t [nrows * e_size];// row buf
    setup_t * vbuf = new setup_t [nrows];

    idx_t orig_lnz = 0, orig_rnz = 0;// 原矩阵pattern的左侧和右侧非零元个数
    for (idx_t i = 0; i < nrows; i ++) {
        for (idx_t p = A.row_ptr[i]; p < A.row_ptr[i + 1]; p ++) {
            if      (A.col_idx[p] < i) orig_lnz ++;
            else if (A.col_idx[p] > i) orig_rnz ++;
        }
    }

    idx_t * L_rpt = new idx_t [nrows + 1]; L_rpt[0] = 0;
    idx_t * U_rpt = new idx_t [nrows + 1]; U_rpt[0] = 0;
    idx_t * L_cid = new idx_t [orig_lnz + nrows * maxfil + nrows];
    idx_t * U_cid = new idx_t [orig_rnz + nrows * maxfil + nrows];
    setup_t * L_val = new setup_t [(orig_lnz + nrows * maxfil + nrows) * e_size];
    setup_t * U_val = new setup_t [(orig_rnz + nrows * maxfil + nrows) * e_size];

    for (idx_t i = 0; i < nrows; i ++) {
        const idx_t ptrL0 = L_rpt[i], ptrU0 = U_rpt[i];// 该行的起始偏移指针
        idx_t ptrL1 = ptrL0, ptrU1 = ptrU0;// 第一部分（原矩阵pattern就包含）的非零元的末尾（开区间）指针

        flag[i] = i;
        mat_zero<setup_t, dof>(rbuf + i * e_size);

        setup_t row_norm = 0.0;// 待分解矩阵本行内的最大范数值
        for (idx_t p = A.row_ptr[i]; p < A.row_ptr[i + 1]; p ++) {
            idx_t j = A.col_idx[p];
            setup_t __norm = matNorm_inf<setup_t, dof>(A.vals + p * e_size);// 计算范数
            row_norm = std::max(row_norm, __norm);

            flag[j] = i;// 记录这个位置所存的是分解哪一行时留下的
            mat_copy<setup_t, dof>(A.vals + p * e_size, rbuf + j * e_size);
            // 记录分解后的列序号
            if      (j < i) L_cid[ptrL1 ++] = j;
            else if (j > i) U_cid[ptrU1 ++] = j;
        }
        idx_t ptrL2 = ptrL1, ptrU2 = ptrU1;// 第二部分（消元过程中新引入）的非零元的末尾（开区间）指针
        idx_t ptrL  = ptrL1, ptrU  = ptrU1;
        
        my_qsort0(L_cid, ptrL0, ptrL1 - 1);// 将L部分的列序号按升序排列
        // printf("i %d ptrL0 %d ptrL1 %d ptrL2 %d ptrU0 %d ptrU1 %d ptrU2 %d\n", i, ptrL0, ptrL1, ptrL2, ptrU0, ptrU1, ptrU2);

        idx_t k1 = ptrL0, k2 = ptrL1;
        // k1 < ptrL1 表示仍有原矩阵pattern中的非零元未消去
        // k2 < ptrL2 表示仍有新分解后产生的非零元未消去（这些新非零元是在消去前者时引入的）
        while (k1 < ptrL1 || k2 < ptrL2) {
            const idx_t k = (
                // k2==ptrL2 意味着没有新产生的非零元需要消去了，在 k2!=ptrL2 (则必然<)且仍有原非零元未消的情况下，需要选择两者中列序号较小者
                k2 == ptrL2 || (k1 < ptrL1 && L_cid[k1] < L_cid[k2]) ?
                    k1 : k2
            ) ++;
            const idx_t j = L_cid[k];
            //  k < ptrL1 意味着是原矩阵pattern中的非零元，必然保留
            //  否则若是新产生的非零元，则需要满足数值超过给定阈值
            // printf("i %d Check k %d norm %.4e anorm %.4e\n", i, k, matNorm_inf<setup_t, dof>(rbuf + j * e_size), row_norm);
            if (k < ptrL1 || matNorm_inf<setup_t, dof>(rbuf + j * e_size) > droptol * row_norm) {
                setup_t tmp_buf [e_size];
                mat_copy<setup_t, dof>(rbuf + j * e_size, tmp_buf);
                matmat_mul<setup_t, setup_t, setup_t, dof>(1.0, tmp_buf, D_vals + j * e_size, rbuf + j * e_size);

                if (k >= ptrL1)// 是新产生的非零元，要添加入L中
                    L_cid[ptrL ++] = j;
                // 消去该左非零元需要减掉的行号为j
                for (idx_t p = U_rpt[j]; p < U_rpt[j + 1]; p ++) {
                    idx_t elim_c = U_cid[p];
                    if (flag[elim_c] == i) {// 如果该位置存储的数据是处理当前行留下的，可以直接使用
                        matmat_mla<setup_t, dof>(-1.0, rbuf + j * e_size, U_val + p * e_size, 1.0, rbuf + elim_c * e_size);
                    } else {// 否则是处理之前的行留下的，需要拷贝更新
                        flag[elim_c] = i;
                        mat_zero<setup_t, dof>(rbuf + elim_c * e_size);
                        matmat_mla<setup_t, dof>(-1.0, rbuf + j * e_size, U_val + p * e_size, 1.0, rbuf + elim_c * e_size);
                        if (elim_c < i) {// 消掉该元会在本行的左侧引入新的非零元
                            // 因为要保持L中列号升序，所以要寻找放置该新非零元（列号为elim_c）的合适位置
                            for (idx_t new_p = ptrL2; ; new_p --) {
                                if (new_p == k2 || L_cid[new_p - 1] < elim_c) {
                                    L_cid[new_p] = elim_c;
                                    break;
                                }
                                L_cid[new_p] = L_cid[new_p - 1];
                            }
                            ptrL2 ++;
                        } else {// 消掉该元会在本行的右侧引入新的非零元
                            assert(elim_c > i);
                            U_cid[ptrU2 ++] = elim_c;
                        }
                    }
                }// U_rpt[j] ~ U_rpt[j + 1]
            }// elimination
        }
        //  ptrL 记录前面分解过程产生的总非零元数
        if (ptrL > ptrL1 + maxfil) {// 第二阶段填入的左侧元素超过了控制的数量
            for (idx_t p = ptrL1; p < ptrL; p ++)
                vbuf  [p - ptrL1] = matNorm_inf<setup_t, dof>(rbuf + L_cid[p] * e_size);
            // my_qsort1<idx_t, setup_t, -1>(L_cid + ptrL1, vbuf, 0, ptrL - ptrL1 - 1);// 只保留最大的前maxfil个
            my_qsort1<idx_t, setup_t, idx_t, -1>(vbuf, L_cid + ptrL1, 0, ptrL - ptrL1 - 1);// 只保留值最大的前maxfil个
            ptrL = ptrL1 + maxfil;
        }
        my_qsort0(L_cid, ptrL0, ptrL - 1);// 将L部分的列序号按升序排列
        for (idx_t p = ptrL0; p < ptrL; p ++)// 将分解后的左侧元素拷入L中
            mat_copy<setup_t, dof>(rbuf + L_cid[p] * e_size, L_val + p * e_size);// L_cid[p]记录了列号
        
        // 遍历所有新产生的左侧非零元
        for (idx_t p = ptrU1; p < ptrU2; p ++) {
            const idx_t j = U_cid[p];
            setup_t jnorm = matNorm_inf<setup_t, dof>(rbuf + j * e_size);
            if (jnorm > droptol * row_norm) {// 超过阈值，可以保留
                vbuf [ptrU - ptrU1] = jnorm;
                U_cid[ptrU ++     ] = j;
            }
        }
        if (ptrU > ptrU1 + maxfil) {// 填入的右侧元素超过了控制的数量
            // my_qsort1<idx_t, setup_t, idx_t, -1>(U_cid + ptrU1, vbuf, 0, ptrU - ptrU1 - 1);// 只保留最大的前maxfil个
            my_qsort1<idx_t, setup_t, idx_t, -1>(vbuf, U_cid + ptrU1, 0, ptrU - ptrU1 - 1);// 只保留值最大的前maxfil个
            ptrU = ptrU1 + maxfil;
        }
        my_qsort0(U_cid, ptrU0, ptrU - 1);// 将U部分的列序号按升序排列
        for (idx_t p = ptrU0; p < ptrU; p ++)// 将分解后的右侧元素拷入U中
            mat_copy<setup_t, dof>(rbuf + U_cid[p] * e_size, U_val + p * e_size);// U_cid[p]记录了列号

        setup_t inv_buf [e_size];
        for (int f = 0; f < e_size; f ++) inv_buf[f] = (rbuf + i * e_size)[f];
        matinv_row<setup_t, dof>(inv_buf, D_vals + i * e_size);

        L_rpt[i + 1] = ptrL;// 记录分解后矩阵的行偏移
        U_rpt[i + 1] = ptrU;
    }

    L.nrows   = nrows;
    L.ncols   = nrows;
    L.nnz     = L_rpt[nrows];
    L.row_ptr = L_rpt;
    L.col_idx = L_cid;
    U.nrows   = nrows;
    U.ncols   = nrows;
    U.nnz     = U_rpt[nrows];
    U.row_ptr = U_rpt;
    U.col_idx = U_cid;
    if constexpr (sizeof(setup_t) == sizeof(data_t)) {
        L.vals    = L_val;
        U.vals    = U_val;
    } else {
        L.vals = new data_t [L.nnz * e_size];
        U.vals = new data_t [U.nnz * e_size];
        #pragma omp parallel
        {
            #pragma omp for schedule(static) nowait
            for (idx_t i = 0; i < L.nnz * e_size; i ++)
                L.vals[i] = L_val[i];
            #pragma omp for schedule(static) nowait
            for (idx_t i = 0; i < U.nnz * e_size; i ++)
                U.vals[i] = U_val[i];
        }
        delete [] L_val; L_val = nullptr;
        delete [] U_val; U_val = nullptr;
    }
    if constexpr (sizeof(setup_t) == sizeof(data_t)) {
        D_inv = D_vals;
    } else {
        D_inv = new data_t [nrows * e_size];
        #pragma omp parallel for schedule(static)
        for (idx_t i = 0; i < nrows * e_size; i ++)
            D_inv[i] = D_vals[i];
        delete [] D_vals; D_vals = nullptr;
    }
    delete [] flag; delete [] rbuf; delete[] vbuf;
}

// #define PROFILE
template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::Mult_ipl(const par_Vector<idx_t, calc_t, dof> & input,
    par_Vector<idx_t, calc_t, dof> & output) const
{
#ifdef PROFILE
    double L_mem_size = (
        (double)   L.nrows      * 2 * dof * sizeof(calc_t) // 向量
        +(double) (L.nrows + 1) *           sizeof(idx_t) // 对角部分
        +(double) (L.nnz      ) *           sizeof(idx_t)
        +(double) (L.nnz      ) * dof*dof * sizeof(data_t)
    ) / (1024*1024*1024);// GB
    double U_mem_size = (
        (double)   U.nrows      * 2 * dof * sizeof(calc_t) // 向量
        +(double) (U.nrows + 1) *           sizeof(idx_t) // 对角部分
        +(double) (U.nnz      ) *           sizeof(idx_t)
        +(double) (U.nnz + U.nrows) * dof*dof * sizeof(data_t)// + nrows是主对角线
    ) / (1024*1024*1024);// GB
    double t_L = 0.0, t_U = 0.0;
    int __warm_cnt = 3, __perf_cnt = 10;
#endif
    if (this->zero_guess) {
#ifdef PROFILE
        for (int __test = 0; __test < __warm_cnt; __test ++)
        forward ( input.local_vector->data, aux_array);
        t_L -= wall_time();
        for (int __test = 0; __test < __perf_cnt; __test ++)
        forward ( input.local_vector->data, aux_array);
        t_L += wall_time();
        
        for (int __test = 0; __test < __warm_cnt; __test ++)
        backward(aux_array, output.local_vector->data);
        t_U -= wall_time();
        for (int __test = 0; __test < __perf_cnt; __test ++)
        backward(aux_array, output.local_vector->data);
        t_U += wall_time();
        int my_pid; MPI_Comm_rank(input.comm, & my_pid);
        if (my_pid == 0) {
            double t_single = t_L / __perf_cnt;
            double effectBW = L_mem_size / t_single;
            printf("perf_sptrsv(F) %.5f s per count (num %.2f) mem size %.2f GB %.2f GB/s\n", t_single,
                (float)L.nnz/L.nrows, L_mem_size, effectBW);
            t_single = t_U / __perf_cnt;
            effectBW = U_mem_size / t_single;
            printf("perf_sptrsv(B) %.5f s per count (num %.2f) mem size %.2f GB %.2f GB/s\n", t_single,
                (float)U.nnz/U.nrows, U_mem_size, effectBW);
        } 
#else
        // 直接执行前代回代
        forward ( input.local_vector->data, output.local_vector->data);
        backward(output.local_vector->data, output.local_vector->data);
#endif
    }
    else {
        par_Vector<idx_t, calc_t, dof> glb_resi(input);
        // resi <- A*x
        this->A_calc->Mult(output, glb_resi, false);
        vec_add(input, (calc_t) -1.0, glb_resi, glb_resi);
        // 对残差resi执行前代回代
        forward (glb_resi.local_vector->data, glb_resi.local_vector->data);
        backward(glb_resi.local_vector->data, glb_resi.local_vector->data);
        vec_add<idx_t, calc_t, calc_t, dof>(output, (calc_t) 1.0, glb_resi, output);
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::forward(const calc_t * rhs, calc_t * sol) const
{
    if (nthreads > 1) { assert(TDG_Ls);
        bool * flags = new bool [nthreads * TDG_Ls[0].num_levs];// 标记，外维为层，内维为线程
        #pragma omp parallel
        {
            int nt  = omp_get_num_threads(); assert(nt == nthreads);
            int tid = omp_get_thread_num();
            const TaskDepGraph<idx_t> & TDG = TDG_Ls[tid];
            const idx_t nlevs = TDG.num_levs;
            for (idx_t ilev = 0; ilev < nlevs; ilev ++)// 初始化标记
                flags[tid * nlevs + ilev] = false;
            #pragma omp barrier // 同步等待标记初始化完成
            for (idx_t ilev = 0; ilev < nlevs; ilev ++) {
                for (idx_t _dep  = TDG.dep_rpt[ilev]; _dep  < TDG.dep_rpt[ilev + 1]; _dep ++) {// 等待依赖完成
                    const SuperTaskDep<idx_t> dep = TDG.deps[_dep];
                    const idx_t key = dep.dep_lev * nthreads + dep.dep_tid;
                    while ( __atomic_load_n(& flags[key], __ATOMIC_ACQUIRE) == false ) {  }// wait for depencece
                }
                for (idx_t _task = TDG.rid_rpt[ilev]; _task < TDG.rid_rpt[ilev + 1]; _task ++) {
#ifdef __MT_REORDER
                    const idx_t new_i = TDG.reor_off + _task;
                    const idx_t old_i = TDG.rids[_task];
                    if constexpr (dof == 1) {// 单自由度
                        calc_t tmp = rhs[old_i];
                        for (idx_t p = L.row_ptr[new_i]; p < L.row_ptr[new_i + 1]; p ++) {
                            tmp -= L.vals[p] * sol[L.col_idx[p]];
                        }
                        sol[old_i] = tmp;
                    } else { static_assert(dof > 1);
                        constexpr int e_size = dof*dof;
                        calc_t tmp[dof];
                        for (int f = 0; f < dof; f++) tmp[f] = rhs[old_i * dof + f];
                        for (idx_t p = L.row_ptr[new_i]; p < L.row_ptr[new_i + 1]; p ++) {
                            matvec_mls<idx_t, data_t, calc_t, dof>(L.vals + p * e_size, sol + L.col_idx[p] * dof, tmp);
                        }
                        for (int f = 0; f < dof; f++) sol[old_i * dof + f] = tmp[f];
                    }
#else // no reorder
                    const idx_t i = TDG.rids[_task];// row ID of task
                    if constexpr (dof == 1) {// 单自由度
                        calc_t tmp = rhs[i];
                        for (idx_t p = L.row_ptr[i]; p < L.row_ptr[i + 1]; p ++) {
                            tmp -= L.vals[p] * sol[L.col_idx[p]];
                        }
                        sol[i] = tmp;
                    } else { static_assert(dof > 1);
                        constexpr int e_size = dof*dof;
                        calc_t tmp[dof];
                        for (int f = 0; f < dof; f++) tmp[f] = rhs[i * dof + f];
                        for (idx_t p = L.row_ptr[i]; p < L.row_ptr[i + 1]; p ++) {
                            matvec_mls<idx_t, data_t, calc_t, dof>(L.vals + p * e_size, sol + L.col_idx[p] * dof, tmp);
                        }
                        for (int f = 0; f < dof; f++) sol[i * dof + f] = tmp[f];
                    }
#endif
                }// _task loop
                __atomic_store_n(& flags[ilev * nthreads + tid], true, __ATOMIC_RELEASE);
            }// level loop
        }// omp region
        delete [] flags;
    }
    else { assert(nthreads == 1);
        if constexpr (dof == 1) {
            for (idx_t i = 0; i < L.nrows; i++) {
                calc_t tmp = rhs[i];
                for (idx_t p = L.row_ptr[i]; p < L.row_ptr[i + 1]; p++) { assert(L.col_idx[p] < i);
                    tmp -= L.vals[p] * sol[L.col_idx[p]];
                }
                sol[i] = tmp;
            }
        } else { static_assert(dof > 1);
            constexpr int e_size = dof*dof;
            for (idx_t i = 0; i < L.nrows; i++) {
                calc_t tmp[dof];
                for (int f = 0; f < dof; f++) tmp[f] = rhs[i * dof + f];
                for (idx_t p = L.row_ptr[i]; p < L.row_ptr[i + 1]; p++) { assert(L.col_idx[p] < i);
                    matvec_mls<idx_t, data_t, calc_t, dof>(L.vals + p * e_size, sol + L.col_idx[p] * dof, tmp);
                }
                for (int f = 0; f < dof; f++) sol[i * dof + f] = tmp[f];
            }
        }
    }// 单线程
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void BoxILU<idx_t, data_t, setup_t, calc_t, dof>::backward(const calc_t * rhs, calc_t * sol) const
{
    if (nthreads > 1) { assert(TDG_Us);
        bool * flags = new bool [nthreads * TDG_Us[0].num_levs];// 标记，外维为层，内维为线程
        #pragma omp parallel
        {
            int nt  = omp_get_num_threads(); assert(nt == nthreads);
            int tid = omp_get_thread_num();
            const TaskDepGraph<idx_t> & TDG = TDG_Us[tid];
            const idx_t nlevs = TDG.num_levs;
            for (idx_t ilev = 0; ilev < nlevs; ilev ++)// 初始化标记
                flags[tid * nlevs + ilev] = false;
            #pragma omp barrier // 同步等待标记初始化完成
            for (idx_t ilev = 0; ilev < nlevs; ilev ++) {
                for (idx_t _dep  = TDG.dep_rpt[ilev]; _dep  < TDG.dep_rpt[ilev + 1]; _dep ++) {// 等待依赖完成
                    const SuperTaskDep<idx_t> dep = TDG.deps[_dep];
                    const idx_t key = dep.dep_lev * nthreads + dep.dep_tid;
                    while ( __atomic_load_n(& flags[key], __ATOMIC_ACQUIRE) == false ) {  }// wait for depencece
                }
                for (idx_t _task = TDG.rid_rpt[ilev]; _task < TDG.rid_rpt[ilev + 1]; _task ++) {
#ifdef __MT_REORDER
                    const idx_t new_i = TDG.reor_off + _task;
                    const idx_t old_i = TDG.rids[_task];
                    if constexpr (dof == 1) {// 单自由度
                        calc_t tmp = rhs[old_i];
                        const idx_t offset = U.row_ptr[new_i]; 
                        const idx_t * u_cids = & U.col_idx[offset];
                        const data_t* u_vals = & U.vals   [offset];
                        const idx_t num = U.row_ptr[new_i + 1] - offset;
                        for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > old_i);
                            tmp -= u_vals[p] * sol[u_cids[p]];
                        }
                        sol[old_i] = tmp * D_inv[new_i];
                    } else { static_assert(dof > 1);
                        constexpr int e_size = dof*dof;
                        calc_t tmp[dof];
                        for (int f = 0; f < dof; f++) tmp[f] = rhs[old_i * dof + f];
                        const idx_t offset = U.row_ptr[new_i]; 
                        const idx_t * u_cids = & U.col_idx[offset           ];
                        const data_t* u_vals = & U.vals   [offset * e_size];
                        const idx_t num = U.row_ptr[new_i + 1] - offset;
                        for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > old_i);
                            matvec_mls<idx_t, data_t, calc_t, dof>(u_vals + p * e_size, sol + u_cids[p] * dof, tmp);
                        }
                        matvec_mul<idx_t, data_t, calc_t, dof>(D_inv + new_i * e_size, tmp, sol + old_i * dof, 1.0);
                    }
#else // no reorder
                    const idx_t i = TDG.rids[_task];// row ID of task
                    if constexpr (dof == 1) {// 单自由度
                        calc_t tmp = rhs[i];
                        const idx_t offset = U.row_ptr[i]; 
                        const idx_t * u_cids = & U.col_idx[offset];
                        const data_t* u_vals = & U.vals   [offset];
                        const idx_t num = U.row_ptr[i + 1] - offset;
                        for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > i);
                            tmp -= u_vals[p] * sol[u_cids[p]];
                        }
                        sol[i] = tmp * D_inv[i];
                    } else { static_assert(dof > 1);
                        constexpr int e_size = dof*dof;
                        calc_t tmp[dof];
                        for (int f = 0; f < dof; f++) tmp[f] = rhs[i * dof + f];
                        const idx_t offset = U.row_ptr[i]; 
                        const idx_t * u_cids = & U.col_idx[offset         ];
                        const data_t* u_vals = & U.vals   [offset * e_size];
                        const idx_t num = U.row_ptr[i + 1] - offset;
                        for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > i);
                            matvec_mls<idx_t, data_t, calc_t, dof>(u_vals + p * e_size, sol + u_cids[p] * dof, tmp);
                        }
                        matvec_mul<idx_t, data_t, calc_t, dof>(D_inv + i * e_size, tmp, sol + i * dof, 1.0);
                    }
#endif
                }// _task loop
                __atomic_store_n(& flags[ilev * nthreads + tid], true, __ATOMIC_RELEASE);
            }// level loop
        }// omp region
        delete [] flags;
    }
    else { assert(nthreads == 1);
        if constexpr (dof == 1) {
            for (idx_t i = U.nrows - 1; i >= 0; i--) {
                calc_t tmp = rhs[i];
                const idx_t offset = U.row_ptr[i]; 
                const idx_t * u_cids = & U.col_idx[offset];
                const data_t* u_vals = & U.vals   [offset];
                const idx_t num = U.row_ptr[i + 1] - offset;
                for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > i);
                    tmp -= u_vals[p] * sol[u_cids[p]];
                }
                sol[i] = tmp * D_inv[i];
            }
        } else { static_assert(dof > 1);
            constexpr int e_size = dof*dof;
            for (idx_t i = U.nrows - 1; i >= 0; i--) {
                calc_t tmp[dof];
                for (int f = 0; f < dof; f++) tmp[f] = rhs[i * dof + f];
                const idx_t offset = U.row_ptr[i]; 
                const idx_t * u_cids = & U.col_idx[offset         ];
                const data_t* u_vals = & U.vals   [offset * e_size];
                const idx_t num = U.row_ptr[i + 1] - offset;
                for (idx_t p = 0; p < num; p++) { assert(u_cids[p] > i);
                    matvec_mls<idx_t, data_t, calc_t, dof>(u_vals + p * e_size, sol + u_cids[p] * dof, tmp);
                }
                matvec_mul<idx_t, data_t, calc_t, dof>(D_inv + i * e_size, tmp, sol + i * dof, 1.0);
            }
        }
    }// 单线程
}

template class BoxILU<int, double, double, double, 1>;
template class BoxILU<int, double, double, double, 2>;
template class BoxILU<int, double, double, double, 3>;
template class BoxILU<int, double, double, double, 4>;

template class BoxILU<int, float , double, double, 1>;
template class BoxILU<int, float , double, double, 2>;
template class BoxILU<int, float , double, double, 3>;
template class BoxILU<int, float , double, double, 4>;

template class BoxILU<int, float , double, float , 1>;
template class BoxILU<int, float , double, float , 2>;
template class BoxILU<int, float , double, float , 3>;
template class BoxILU<int, float , double, float , 4>;

template class BoxILU<int, float , float , float , 1>;
template class BoxILU<int, float , float , float , 2>;
template class BoxILU<int, float , float , float , 3>;
template class BoxILU<int, float , float , float , 4>;
#ifdef USE_FP16
template class BoxILU<int, __fp16, double, float , 1>;
template class BoxILU<int, __fp16, double, float , 2>;
template class BoxILU<int, __fp16, double, float , 3>;
template class BoxILU<int, __fp16, double, float , 4>;

template class BoxILU<int, __fp16, float , float , 1>;
template class BoxILU<int, __fp16, float , float , 2>;
template class BoxILU<int, __fp16, float , float , 3>;
template class BoxILU<int, __fp16, float , float , 4>;
#endif