#include "MG.hpp"
#include "point_Jacobi.hpp"
#include "point_GS.hpp"
#include "box_ILU.hpp"
#include "global_LU.hpp"

#include "coarsen.hpp"
#include "prolong.hpp"
#include "parcsr_matop.hpp"

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::UnstructMG(const std::string _config_path)
{
    ParseConfig(_config_path);
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::~UnstructMG()
{
    for (idx_t i = 0; i < num_levs; i++) {
        delete U_array[i]; delete F_array[i]; delete aux_arr[i]; delete smoother[i];
    }
    delete [] U_array; delete [] F_array; delete [] aux_arr; delete [] smoother;
    for (idx_t i = 0; i < num_levs; i++) {
        delete restrictor[i]; delete prolngator[i];
        delete rstr_smth [i]; delete prlg_smth [i];
    }
    delete [] restrictor; delete [] prolngator;
    delete [] rstr_smth ; delete [] prlg_smth ;

    if (own_A0) { delete A_array_high[0]; delete A_array_low[0]; }
    for (idx_t i = 1; i < num_levs; i++) {
        delete A_array_high[i]; delete A_array_low[i];
    }
    delete [] A_array_high; delete [] A_array_low;

    delete [] relax_types;
    delete [] relax_wgts;
    delete [] coarsen_types;
    delete [] coarsen_passes;
    delete [] coarsen_thetas;
    delete [] coarsen_smwgts;
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::SetCycleType(const idx_t _cycle_type)
{
    cycle_type = _cycle_type;
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::SetOperator(const Operator<idx_t, setup_t, dof> & op)
{
    assert(this->setup_called == false);
    this->setup_time = wall_time();

    const par_CSRMatrix<idx_t, setup_t, setup_t, dof> & par_A = (const par_CSRMatrix<idx_t, setup_t, setup_t, dof> &) op;
    base_A_ptr = & par_A;

    int my_pid; MPI_Comm_rank(par_A.comm, & my_pid);
    if (my_pid == 0) {
        printf("AggMG Setup C %d R %d data_t %d calc_t %d\n", coarse_nlevs, num_levs, (int)sizeof(data_t), (int)sizeof(calc_t));
        printf("lev#%d nrows %d\n", 0, par_A.glb_nrows);
        if (num_levs == 1) printf("  Warning: only 1 layer exists.\n");
    }

    const bool need_stat = true;
    // 用于在setup过程中记录生成的不同粗化层的矩阵，其中A_merged是记录发生进程收缩后的矩阵
    par_CSRMatrix<idx_t, setup_t, setup_t, dof> * A_setup[coarse_nlevs];//, * A_merged[coarse_nlevs];
    // 用于在setup过程中记录生成的不同粗化层的限制矩阵
    par_CSRMatrix   <idx_t, setup_t, setup_t, 1> * R_setup [coarse_nlevs];
    // 用于在setup过程中记录生成的不同粗化层的插值矩阵
    par_CSRMatrix   <idx_t, setup_t, setup_t, 1> * P_setup [coarse_nlevs];
    
    for (idx_t i = 0; i < coarse_nlevs; i++) {// 各级指针预先赋空值
        R_setup[i] = nullptr; A_setup[i] = nullptr; P_setup[i] = nullptr;// A_merged[i] = nullptr;
    }
    // 起始的最密层由外层调用者给定
    A_setup[0] = (par_CSRMatrix<idx_t, setup_t, setup_t, dof> *) & par_A;// 强制转换
    double t_coarsen = wall_time();
    // 逐层遍历，一直到所需生成的最粗层
    for (idx_t fi = 0; fi < coarse_nlevs - 1; fi ++) {
        CoarseConfig config;
        if (coarsen_passes[fi] == 0) config.iter = 4;// 至少取四轮迭代
        else                         config.iter = coarsen_passes[fi];
        config.theta = coarsen_thetas[fi];
        config.smoothed_weight = coarsen_smwgts[fi];
        double t = wall_time();

        par_CSRMatrix<idx_t, setup_t, setup_t, dof> * CA = nullptr;
        Coarsen(coarsen_types[fi], A_setup[fi], R_setup[fi], CA, P_setup[fi], config);

        t = wall_time() - t;
        // if (my_pid == 0) printf("lev#%d nrows %d type %d theta %.3f smwgt %.3f time %.6f\n", fi + 1, CA->glb_nrows,
        //     coarsen_types[fi], config.theta, config.smoothed_weight, t);

        A_setup[fi + 1] = CA;        
    }
    t_coarsen = wall_time() - t_coarsen;
    double t_merge = wall_time();
    // 将跳过的层挤掉
    if (num_levs < coarse_nlevs) {
        assert(skipped[0] == 0);// 首层（最细层）不能被跳过
        assert(skipped[coarse_nlevs - 1] == 0);// 最粗层不应被跳过
        for (idx_t i = 1, ptr_rem = 1; i < coarse_nlevs; i ++) {
            if (skipped[i] == 1) {// 该层被跳过
                // if (my_pid == 0) printf("skipped %d merge RP\n", i);
                // 应当注意，R和P不拥有行列划分，而是A拥有
                par_CSRMatrix<idx_t, setup_t, setup_t, 1> * tmp_R = new par_CSRMatrix<idx_t, setup_t, setup_t, 1>(
                    R_setup[i - 1]->comm, R_setup[i]->rows_partition, R_setup[i - 1]->cols_partition);// R[i] <- R[i] * R[i-1]
                par_CSRMatrixMultiply(*R_setup[i], *R_setup[i - 1], *tmp_R);
                par_CSRMatrix<idx_t, setup_t, setup_t, 1> * tmp_P = new par_CSRMatrix<idx_t, setup_t, setup_t, 1>(
                    P_setup[i - 1]->comm, P_setup[i - 1]->rows_partition, P_setup[i]->cols_partition);// P[i] <- P[i-1] * P[i]
                par_CSRMatrixMultiply(*P_setup[i - 1], *P_setup[i], *tmp_P);

                // printf("Proc %d R row [%d, %d) col [%d, %d) P row [%d, %d) col [%d, %d)\n", my_pid,
                //     tmp_R->beg_row, tmp_R->end_row, tmp_R->beg_col, tmp_R->end_col,
                //     tmp_P->beg_row, tmp_P->end_row, tmp_P->beg_col, tmp_P->end_col );

                delete R_setup[i];
                delete P_setup[i];
                R_setup[i] = tmp_R;
                P_setup[i] = tmp_P;
            } else {// 该层不被跳过
                // if (my_pid == 0) printf("NOT skipped %d move RAP\n", i);
                if (ptr_rem != i) { assert(ptr_rem < i);
                    delete A_setup[ptr_rem    ];
                    delete R_setup[ptr_rem - 1];
                    delete P_setup[ptr_rem - 1];
                    A_setup[ptr_rem    ] = A_setup[i    ]; A_setup[i    ] = nullptr;
                    R_setup[ptr_rem - 1] = R_setup[i - 1]; R_setup[i - 1] = nullptr;
                    P_setup[ptr_rem - 1] = P_setup[i - 1]; P_setup[i - 1] = nullptr;
                } else assert(ptr_rem == i);// 否则不需要动
                ptr_rem ++;
            }
        }
    }
    t_merge = wall_time() - t_merge;

    double t_smagg = wall_time();
    // 光滑聚合在这里做
    par_CSRMatrix<idx_t, setup_t, setup_t, dof> * A_smth [num_levs], * P_smth [num_levs], * R_smth [num_levs];
    A_smth[0] = R_smth[0] = P_smth[0] = nullptr;
    for (idx_t i = 0; i < num_levs - 1; i ++) {
        A_smth[i+1] = nullptr;
        R_smth[i] = P_smth[i] = nullptr;
        if (coarsen_smwgts[i] > 0.0) {
            if (my_pid == 0) printf("Smth Agg %d-lev wgt %.2f\n", i + 1, coarsen_smwgts[i]);
            par_CSRMatrix<idx_t, setup_t, setup_t, dof> * jac_mat = Jacobi_A((setup_t) coarsen_smwgts[i], * A_setup[i]);
            P_smth[i] = new par_CSRMatrix<idx_t, setup_t, setup_t, dof>(A_setup[i]->comm, P_setup[i]->rows_partition, P_setup[i]->cols_partition);
            par_CSRMatrixMultiply(* jac_mat, * P_setup[i], * P_smth[i]);// P <- (I - w*D^{-1}*A^F)*Pbar
            delete jac_mat;

            A_smth[i+1] = new par_CSRMatrix<idx_t, setup_t, setup_t, dof>(A_setup[i+1]->comm,
                A_setup[i+1]->rows_partition, A_setup[i+1]->rows_partition);
            assert(A_setup[i+1]->owns_row_partits == true);
            A_setup[i+1]->owns_row_partits = false;
            A_smth [i+1]->owns_row_partits = true;// 转移所有

            par_CSRMatrix<idx_t, setup_t, setup_t, dof> tmp_1(A_setup[i]->comm, A_setup[i]->rows_partition, P_setup[i]->cols_partition);// A*P
            par_CSRMatrixMultiply(* A_setup[i], * P_smth[i], tmp_1);

            if (coarsen_rstrs[i]) {
                R_smth[i] = new par_CSRMatrix<idx_t, setup_t, setup_t, dof>(A_setup[i]->comm);
                par_CSRMatrixTranspose(* P_smth[i], * R_smth[i], true);
                {// 修改限制矩阵的值，使其行和的范数为1.0
                    constexpr int e_size = dof*dof;
                    auto calc_blk_norm = [] (const setup_t * val) {
                        setup_t ret = 0.0;
                        for (int f = 0; f < e_size; f++) ret += val[f] * val[f];
                        return sqrt(ret);
                    };
                    for (idx_t j = 0; j < R_smth[i]->diag.nrows; j ++) {
                        setup_t row_sum = 0.0;
                        for (idx_t p = R_smth[i]->diag.row_ptr[j]; p < R_smth[i]->diag.row_ptr[j + 1]; p ++)
                            row_sum += calc_blk_norm(R_smth[i]->diag.vals + p * e_size);
                        for (idx_t p = R_smth[i]->offd.row_ptr[j]; p < R_smth[i]->offd.row_ptr[j + 1]; p ++)
                            row_sum += calc_blk_norm(R_smth[i]->offd.vals + p * e_size);
                        // 归一化
                        for (idx_t p = R_smth[i]->diag.row_ptr[j]; p < R_smth[i]->diag.row_ptr[j + 1]; p ++)
                        for (int f = 0; f < e_size; f ++)
                            R_smth[i]->diag.vals[p * e_size + f] /= row_sum;
                        for (idx_t p = R_smth[i]->offd.row_ptr[j]; p < R_smth[i]->offd.row_ptr[j + 1]; p ++)
                        for (int f = 0; f < e_size; f ++)
                            R_smth[i]->offd.vals[p * e_size + f] /= row_sum;
                    }
                }
                par_CSRMatrixMultiply(* R_smth [i], tmp_1, * A_smth[i+1]);
            }
            else {
                par_CSRMatrixMultiply(* R_setup[i], tmp_1, * A_smth[i+1]);
            }
        }
    }
    for (idx_t i = 1; i < num_levs; i ++) {
        if (A_smth[i]) {
            delete A_setup[i]; A_setup[i] = A_smth[i];
            A_smth[i] = nullptr;
        }
    }
    t_smagg = wall_time() - t_smagg;

    double t_smoother = wall_time();

    long long nrow_nnz_levs [num_levs * 2];
    for (idx_t i = 0; i < num_levs; i++) {
        nrow_nnz_levs[2*i  ] = A_setup[i]->diag.nrows;
        nrow_nnz_levs[2*i+1] = A_setup[i]->diag.nnz + A_setup[i]->offd.nnz;
        SetupSmoother(i, * A_setup[i]);
        // 根据该层的矩阵规格建立该层的向量
        U_array[i] = new par_Vector<idx_t, calc_t, dof>(A_setup[i]->comm, A_setup[i]->glb_nrows,
            A_setup[i]->beg_row, A_setup[i]->end_row);
        F_array[i] = new par_Vector<idx_t, calc_t, dof>(*U_array[i]);
        aux_arr[i] = new par_Vector<idx_t, calc_t, dof>(*U_array[i]);

        U_array[i]->set_val(0.0);
        F_array[i]->set_val(0.0);
        aux_arr[i]->set_val(0.0);
    }
    t_smoother = wall_time() - t_smoother;
    
    // 截断各层矩阵
    for (idx_t i = 0; i < num_levs; i++) {
        if constexpr (sizeof(data_t) == sizeof(setup_t) && sizeof(calc_t) == sizeof(setup_t)) {
            A_array_high[i] = A_setup[i]; A_setup[i] = nullptr;
            if (i > 0) {// 才有层间转移
                rstr_smth   [i-1] = R_smth [i-1]; R_smth [i-1] = nullptr;
                prlg_smth   [i-1] = P_smth [i-1]; P_smth [i-1] = nullptr;
                restrictor  [i-1] = R_setup[i-1]; R_setup[i-1] = nullptr;
                prolngator  [i-1] = P_setup[i-1]; P_setup[i-1] = nullptr;
            }
        } else { static_assert(sizeof(data_t) <= sizeof(calc_t) && sizeof(calc_t) <= sizeof(setup_t));
            if (i == 0) {// 拷贝内存
                if constexpr (sizeof(data_t) == sizeof(calc_t)) {// K64P32D32
                    par_CSRMatrix<idx_t, calc_t, calc_t, dof> * trans_A = new par_CSRMatrix<idx_t, calc_t, calc_t, dof>(A_setup[i]->comm, A_setup[i]->rows_partition, A_setup[i]->cols_partition);
                    trans_A->col_map_offd      = A_setup[i]->col_map_offd;
                    trans_A->commpkg           = A_setup[i]->commpkg;
                    trans_A->commpkg_transpose = A_setup[i]->commpkg_transpose;
                    CSRMatrixCopy<idx_t, setup_t, setup_t, calc_t, calc_t, dof>(A_setup[i]->diag, trans_A->diag);
                    CSRMatrixCopy<idx_t, setup_t, setup_t, calc_t, calc_t, dof>(A_setup[i]->offd, trans_A->offd);
                    A_array_high[i] = trans_A;
                }
                else {// K64P64D32
                    par_CSRMatrix<idx_t, data_t, calc_t, dof> * trans_A = new par_CSRMatrix<idx_t, data_t, calc_t, dof>(A_setup[i]->comm, A_setup[i]->rows_partition, A_setup[i]->cols_partition);
                    trans_A->col_map_offd      = A_setup[i]->col_map_offd;
                    trans_A->commpkg           = A_setup[i]->commpkg;
                    trans_A->commpkg_transpose = A_setup[i]->commpkg_transpose;
                    CSRMatrixCopy<idx_t, setup_t, setup_t, data_t, calc_t, dof>(A_setup[i]->diag, trans_A->diag);
                    CSRMatrixCopy<idx_t, setup_t, setup_t, data_t, calc_t, dof>(A_setup[i]->offd, trans_A->offd);
                    A_array_low [i] = trans_A;
                }
            } else {// 转移内存
                if constexpr (sizeof(data_t) == sizeof(calc_t)) {// K64P32D32
                    par_CSRMatrix<idx_t, calc_t, calc_t, dof> * trans_A = nullptr;
                    par_CSRMatrixTransfer<idx_t, setup_t, setup_t, calc_t, calc_t, dof>(A_setup[i], trans_A);
                    A_array_high[i] = trans_A;
                }
                else {// K64P64D32
                    par_CSRMatrix<idx_t, data_t, calc_t, dof> * trans_A = nullptr;
                    par_CSRMatrixTransfer<idx_t, setup_t, setup_t, data_t, calc_t, dof>(A_setup[i], trans_A);
                    A_array_low [i] = trans_A;
                }
            }
            if (i > 0) {// 才有层间转移
                par_CSRMatrixTransfer<idx_t, setup_t, setup_t, data_t, calc_t, dof>(R_smth[i-1], rstr_smth[i-1]);
                par_CSRMatrixTransfer<idx_t, setup_t, setup_t, data_t, calc_t, dof>(P_smth[i-1], prlg_smth[i-1]);
                par_CSRMatrixTransfer<idx_t, setup_t, setup_t, data_t, calc_t, 1>(R_setup[i-1], restrictor[i-1]);
                par_CSRMatrixTransfer<idx_t, setup_t, setup_t, data_t, calc_t, 1>(P_setup[i-1], prolngator[i-1]);
            }
        }
    }
    for (idx_t i = 0; i < coarse_nlevs; i++) {
        if (i == 0 && A_setup[i] != nullptr) ;// do nothing
        else   delete A_setup[i];
        delete R_setup[i];
        delete P_setup[i];
    }

    if (my_pid == 0) printf("finish setup coarsen %.5f merge %.5f smoother %.5f\n", t_coarsen, t_merge, t_smoother);
    this->setup_called = true;
    this->setup_time = wall_time() - this->setup_time;

#ifndef NDEBUG // 检验限制和插值正确性
    // double res;
    // // 检验限制算子的正确性
    // aux_arr[0]->set_val(1.0); 
    // res = vec_dot<idx_t, calc_t, double, dof>(*(aux_arr[0]), *(aux_arr[0]));
    // if (my_pid == 0) printf("(U0, U0): %.18e\n", res);
    // for (idx_t i = 0; i < num_levs - 1; i++) {
    //     rstrResi(i);
    //     res = vec_dot<idx_t, calc_t, double, dof>(*F_array[i+1], *F_array[i+1]);
    //     if (my_pid == 0) printf("(U%d, U%d): %.18e\n", i+1, i+1, res);
    //     vec_copy(*F_array[i+1], *aux_arr[i+1]);
    // }
    // vec_copy(*F_array[num_levs - 1], *U_array[num_levs - 1]);
    // // 检验插值算子的正确性
    // for (idx_t i = num_levs - 1; i >= 1; i--) {
    //     prlgErr(i);
    //     res = vec_dot<idx_t, calc_t, double, dof>(*U_array[i-1], *U_array[i-1]);
    //     if (my_pid == 0) printf("(U%d, U%d): %.18e\n", i-1, i-1, res);
    // }
    // for (idx_t i = 0; i < num_levs; i++) {
    //     U_array[i]->set_val(0.0);
    //     F_array[i]->set_val(0.0);
    //     aux_arr[i]->set_val(0.0);
    // }
#endif
    if (need_stat) {
        long long nrow_nnz_glbs [num_levs * 2];
        MPI_Reduce(nrow_nnz_levs, nrow_nnz_glbs, num_levs * 2, MPI_LONG_LONG, MPI_SUM, 0, par_A.comm);
        if (my_pid == 0) {
            printf("Operator Matrix Information:\n");
            printf("lev      nrows        nnz     \n");
            printf("==============================\n");
            long long tot_nrows = 0, tot_nnz = 0;
            for (idx_t i = 0; i < num_levs; i++) {
                double rate_nrows = (i == 0) ? 0 : ((double)nrow_nnz_glbs[2*i  ]/nrow_nnz_glbs[2*i-2]);
                double rate_nnz   = (i == 0) ? 0 : ((double)nrow_nnz_glbs[2*i+1]/nrow_nnz_glbs[2*i-1]);
                printf(" %2d %10lld %12lld   %.3f  %.3f\n", i, nrow_nnz_glbs[2*i], nrow_nnz_glbs[2*i+1], rate_nrows, rate_nnz);
                tot_nrows += nrow_nnz_glbs[2*i  ];
                tot_nnz   += nrow_nnz_glbs[2*i+1];
            }
            const float complx_grid = tot_nrows / (float) nrow_nnz_glbs[0],
                        complx_oper = tot_nnz   / (float) nrow_nnz_glbs[1];
            printf("    Complexity:    grid = %.6f\n", complx_grid);
            printf("               operator = %.6f\n", complx_oper);
            fflush(stdout);
        }
    }
    // MPI_Barrier(base_A_ptr->comm);
    // MPI_Abort(base_A_ptr->comm, -1909);
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::SetupSmoother(const idx_t ilev, const par_CSRMatrix<idx_t, setup_t, setup_t, dof> & A)
{
    assert(ilev < num_levs);
    // 根据不同的配置参数，选择第ilev层的smoother类型

    // Point-wise Symmetric Gauss-Seidel
    if (relax_types[ilev] == "PGS") {
        smoother[ilev] = new PointGS<idx_t, data_t, setup_t, calc_t, dof>();
    }
    else if (relax_types[ilev] == "PJ") {
        smoother[ilev] = new PointJacobi<idx_t, data_t, setup_t, calc_t, dof>(relax_wgts[ilev]);
    }
    else if (relax_types[ilev] == "ILU") {
        smoother[ilev] = new BoxILU<idx_t, data_t, setup_t, calc_t, dof>();
    }
    else if (relax_types[ilev] == "ILUT") {
        smoother[ilev] = new BoxILU<idx_t, data_t, setup_t, calc_t, dof>(1);
    }
    // Global LU Factorization (一般只用在最粗层)
    else if (relax_types[ilev] == "GlbLU") {
        smoother[ilev] = new GlobalLU<idx_t, data_t, setup_t, calc_t, dof>();
    }
    else MPI_Abort(A.comm, -789);
    // 向smoother传递建立所需的矩阵
    smoother[ilev]->SetOperator(A);
    // 设置该层的smoother系数
    smoother[ilev]->SetRelaxWeight(relax_wgts[ilev]);

    int my_pid; MPI_Comm_rank(A.comm, & my_pid);
    // 打印一下以便确认
    // if (my_pid == 0) printf("lev #%d Smoother %s Wgt %.3f\n", ilev, relax_types[ilev].c_str(), relax_wgts[ilev]);
}



#define FUSSION_OPT // 是否通过融合减少一次向量拷贝
template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::Mult_ipl(const par_Vector<idx_t, calc_t, dof> & b,
    par_Vector<idx_t, calc_t, dof> & x) const
{
    // 需要准备好最细层的数据
#ifdef FUSSION_OPT
    par_Vector<idx_t, calc_t, dof> * F0_tmp = F_array[0], * U0_tmp = U_array[0];
    F_array[0] = (par_Vector<idx_t, calc_t, dof> *) & b;// 强制转换
    U_array[0] = & x;
    if (this->zero_guess) U_array[0]->set_val(0.0);
#else
    // 最细层的数据准备：残差即为右端向量b
    vec_copy(b, *F_array[0]);
    if (!this->zero_guess) {
        vec_copy(x, *U_array[0]);
    }// 否则，由第0层的光滑子负责将第0层初始解设为0
    // 但注意如果没有将初始解设为0，同时在光滑子里面又走了zero_guess==false的分支，则会误用数组里面的“伪值”计算而致错误
#endif
    // 根据配置好的cycle类型参数确定走哪个分支
    switch (cycle_type)
    {
    case 1: V_Cycle(); break;
    case 2: W_Cycle(); break;
    default: MPI_Abort(b.comm, -9011);
    }

#ifdef FUSSION_OPT
    F_array[0] = F0_tmp;
    U_array[0] = U0_tmp;
#else
    vec_copy(*U_array[0], x);// 最后将结果拷出来
#endif
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::V_Cycle() const
{
    for (idx_t i = 0; i < num_levs; i++) {
        // 前平滑
        preSmooth(i);
        if (i+1 < num_levs) {
            // 计算残差
            calcResi(i);
            // 限制：从aux_arr[i]计算出下一层的右端项F_array[i+1]
            // 如果有收缩发生，则gather内含在此步完成
            rstrResi(i);
        }
    }

    for (idx_t i = num_levs - 1; i >= 0; i--) {
        // 后平滑
        postSmooth(i);
        if (i-1 >= 0) {
            // 插值：从U_array[i]修改到aux_arr[i-1]
            // 如果有收缩发成，则scatter内含在此步完成
            prlgErr(i);
        }
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::W_Cycle() const 
{
    idx_t lev_counter[num_levs];
    lev_counter[0] = 1;
    for (idx_t i = 1; i < num_levs; i++) lev_counter[i] = cycle_type;

    bool Not_Finished = true;
    idx_t i = 0;

    while (Not_Finished) {
        preSmooth(i);// W-Cycle中没有前后平滑的区别

        // Decrement the control counter 
        lev_counter[i] -- ;
        // and determine which grid to visit next
        if (lev_counter[i] >= 0 && i != num_levs - 1) {// Visit coarser level next.
            calcResi(i);// Compute residual
            rstrResi(i);// Perform restriction            
            // Reset counters and cycling parameters for coarse level
            i ++;
            lev_counter[i] = std::max(lev_counter[i], cycle_type);
            this->zero_guess = true;// 下一层是解上一层的残差方程
        }
        else if (i != 0) {// Visit finer level next.
            // Interpolate and add correction using hypre_ParCSRMatrixMatvec.
            prlgErr(i);
            // Reset counters and cycling parameters for finer level.
            i --;
            this->zero_guess = false;
        }
        else Not_Finished = false;
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::preSmooth(const idx_t i) const 
{
    assert(i < num_levs);
    if ((smoother[i])) {// 本进程在细网格层有计算
        for (idx_t j = 0; j < num_grid_sweeps[0]; j++)
            smoother[i]->Mult(*F_array[i], *U_array[i], this->zero_guess && j == 0);
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::postSmooth(const idx_t i) const 
{
    assert(i < num_levs);
    if ((smoother[i])) {
        // 当前层对残差做平滑（对应后平滑），平滑后结果在U_array中：相当于在当前层解一次Au=f
        for (idx_t j = 0; j < num_grid_sweeps[1]; j++) {
            smoother[i]->Mult(*F_array[i], *U_array[i], false);
        }
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::calcResi(const idx_t i) const
{
    assert(i < num_levs);
    if (A_array_high[i] || A_array_low[i]) {// 本进程在细网格层有计算
        // 计算在当前层平滑后的残差
        if (A_array_high[i]) {
            assert(A_array_low[i] == nullptr);
            A_array_high[i]->Mult(-1.0, *U_array[i], 1.0, *F_array[i], *aux_arr[i]);
        } else {
            assert(A_array_high[i] == nullptr);
            A_array_low [i]->Mult(-1.0, *U_array[i], 1.0, *F_array[i], *aux_arr[i]);
        }
    }
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void calc_spmv(const par_CSRMatrix<idx_t, data_t, calc_t, 1> & par_mat, const par_Vector<idx_t, calc_t, dof> & in_vec,
    par_Vector<idx_t, calc_t, dof> & out_vec)
{
    int my_pid; MPI_Comm_rank(par_mat.comm, & my_pid);
    assert(par_mat.beg_row == out_vec.beg_row && par_mat.end_row == out_vec.end_row);
    assert(par_mat.beg_col ==  in_vec.beg_row && par_mat.end_col ==  in_vec.end_row);

    calc_t * offd_x = nullptr, * send_buf = nullptr;
    if (par_mat.commpkg) {
        offd_x = new calc_t [par_mat.offd.ncols * dof];
        send_buf = new calc_t [par_mat.commpkg->send_map_starts[par_mat.commpkg->num_sends] * dof];
        par_mat.commpkg->init_update_halo(dof, in_vec.local_vector->data, send_buf, offd_x);
    }

    // 对角部分
    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < par_mat.end_row - par_mat.beg_row; i ++) {
        calc_t tmp [dof];
        for (int f = 0; f < dof; f++) tmp[f] = 0.0;

        for (idx_t p = par_mat.diag.row_ptr[i]; p < par_mat.diag.row_ptr[i + 1]; p++) {
            const calc_t v = par_mat.diag.vals[p];
            const  idx_t j = par_mat.diag.col_idx[p];
            for (int f = 0; f < dof; f++)
                tmp[f] += v * in_vec.local_vector->data[j * dof + f];
        }

        for (int f = 0; f < dof; f++)
            out_vec.local_vector->data[i * dof + f] = tmp[f];
    }

    if (par_mat.commpkg) {
        par_mat.commpkg->wait_update_halo();
        // 非对角部分
        for (idx_t i = 0; i < par_mat.end_row - par_mat.beg_row; i ++) {
            for (idx_t p = par_mat.offd.row_ptr[i]; p < par_mat.offd.row_ptr[i + 1]; p++) {
                const calc_t v = par_mat.offd.vals[p];
                const  idx_t j = par_mat.offd.col_idx[p];
                for (int f = 0; f < dof; f++)
                    out_vec.local_vector->data[i * dof + f] += v * offd_x[j * dof + f];
            }
        }
        delete [] offd_x;
        delete [] send_buf;
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::rstrResi(const idx_t i) const 
{
    // 将当前层残差限制到下一层去: R_ops[i]->apply(aux_arr[i], F_array[i+1]);
    assert(i < num_levs);
    if (apply_RP[i] && rstr_smth[i]) {
        rstr_smth[i]->Mult(*aux_arr[i], *F_array[i + 1], false);
    } else { assert(restrictor[i]);
        calc_spmv<idx_t, data_t, calc_t, dof>(*restrictor[i], *aux_arr[i], *F_array[i + 1]);
    }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::prlgErr(const idx_t i) const
{
    assert(i >= 1);
    if (apply_RP[i] && prlg_smth[i - 1]) {
        prlg_smth[i - 1]->Mult(*U_array[i], *aux_arr[i - 1], false);
    } else {
        calc_spmv<idx_t, data_t, calc_t, dof>(*prolngator[i - 1], *U_array[i], *aux_arr[i - 1]);
    }
    vec_add(*U_array[i-1], (calc_t) 1.0, *aux_arr[i-1], *U_array[i-1]);
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::truncate()
{
}

#include "json/json.h"
#include <fstream>

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void UnstructMG<idx_t, data_t, setup_t, calc_t, dof>::ParseConfig(const std::string config_file)
{
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, & my_pid);
    int num_procs; MPI_Comm_size(MPI_COMM_WORLD, & num_procs);

    if (my_pid == 0) printf("reading from %s\n", config_file.c_str());
    std::ifstream f(config_file);
    Json::Reader reader;
    Json::Value data;
    bool ret = reader.parse(f, data); assert(ret == true);

    // 读取层数信息
    coarse_nlevs = data["C_nlevs"].asInt();
    num_levs = data["num_levs"].asInt(); assert(num_levs <= coarse_nlevs);
    if (coarse_nlevs != num_levs) {
        skipped.resize(coarse_nlevs);
        idx_t skip_cnt = 0;
        for (idx_t i = 0; i < coarse_nlevs - 1; i++) {
            skipped[i] = data["skipped"][i].asInt();
            skip_cnt += skipped[i];
        }
        assert(num_levs + skip_cnt == coarse_nlevs);
    }

    // 读取cycle信息
    std::string cyc = data["cycle"].asString();
    if      (cyc == "V") cycle_type = 1;
    else if (cyc == "W") cycle_type = 2;
    num_grid_sweeps[0] = data["sweep"][0].asInt();
    num_grid_sweeps[1] = data["sweep"][1].asInt();

    if (my_pid == 0) {
        printf("Pre %d Post %d\n", num_grid_sweeps[0], num_grid_sweeps[1]);
        printf("%s-Cycle num_levs %d\n", cyc.c_str(), num_levs);
    }

    // 读取平滑信息
    relax_types = new std::string [num_levs];
    relax_wgts  = new calc_t      [num_levs];
    apply_RP    = new idx_t       [num_levs];
    for (idx_t i = 0; i < num_levs; i++) {
        relax_types[i] = data["smoother"][i].asString();
        relax_wgts [i] = data["weight"][i].asDouble();
    }
    for (idx_t i = 0; i < num_levs; i++) {
        apply_RP[i] = data["apply_RP"][i].asInt();
    }
    

    if (iso_wells.empty() == false) iso_wells.clear();
    const idx_t num_isos = data["num_isos"].asInt();
    for (idx_t iw = 0; iw < num_isos; iw ++) iso_wells.insert(data["iso_wells"][iw].asInt());
    if (my_pid == 0) {
        printf("Isolated Wells: ");
        for (auto it = iso_wells.begin(); it != iso_wells.end(); it ++) printf("%d ", *it);
        printf("\n");
    }

    coarsen_types = new idx_t [coarse_nlevs - 1];
    coarsen_passes= new idx_t  [coarse_nlevs - 1];
    coarsen_thetas= new setup_t[coarse_nlevs - 1];
    coarsen_smwgts= new setup_t[coarse_nlevs - 1];
    coarsen_rstrs = new idx_t  [coarse_nlevs - 1];
    for (idx_t i = 0; i < coarse_nlevs - 1; i++) {
        coarsen_types[i] = data["coarsen"][i].asInt();
        coarsen_passes[i]= data["theta"][i].asInt();
        coarsen_thetas[i]= data["theta"][i].asDouble();
        coarsen_smwgts[i]= data["smwgt"][i].asDouble();
        coarsen_rstrs [i]= data["RAP_rstr"][i].asInt();
    }

    // 读取完各种控制参数后，根据参数分配内存空间
    A_array_high = new par_CSRMatrix<idx_t, calc_t, calc_t, dof> * [num_levs];
    A_array_low  = new par_CSRMatrix<idx_t, data_t, calc_t, dof> * [num_levs];

    U_array = new par_Vector<idx_t, calc_t, dof> * [num_levs];
    F_array = new par_Vector<idx_t, calc_t, dof> * [num_levs];
    aux_arr = new par_Vector<idx_t, calc_t, dof> * [num_levs];
    smoother= new Solver<idx_t, setup_t, calc_t, dof> * [num_levs];
    restrictor = new par_CSRMatrix<idx_t, data_t, calc_t, 1> * [num_levs];
    prolngator = new par_CSRMatrix<idx_t, data_t, calc_t, 1> * [num_levs];
    rstr_smth  = new par_CSRMatrix<idx_t, data_t, calc_t, dof>*[num_levs];
    prlg_smth  = new par_CSRMatrix<idx_t, data_t, calc_t, dof>*[num_levs];

    // 各类二级指针预先赋空值
    for (idx_t i = 0; i < num_levs; i++) {
        A_array_high[i] = nullptr;
        A_array_low [i] = nullptr;
        smoother[i] = nullptr;
        U_array[i] = F_array[i] = aux_arr[i] = nullptr;
        restrictor[i] = nullptr; rstr_smth[i] = nullptr;
        prolngator[i] = nullptr; prlg_smth[i] = nullptr;
    }

    MPI_Barrier(MPI_COMM_WORLD);
    if (my_pid == 0) printf("Proc %d done parsing\n", my_pid);
}

template<typename idx_t, typename data_t, typename setup_t1, typename setup_t2,
    typename calc_t1, typename calc_t2, int dof>
void BorrowMG(const UnstructMG<idx_t, data_t, setup_t1, calc_t1, dof> * src,
                    UnstructMG<idx_t, data_t, setup_t2, calc_t2, dof> * & dst)
{
    /*
    // 要求dst已经按照相同的类型的config过
    assert(dst->coarse_nlevs == src->coarse_nlevs);
    assert(dst->num_levs == src->num_levs);
    const idx_t num_levs = dst->num_levs;

    dst->base_A_ptr = new par_CSRMatrix<idx_t, setup_t2, setup_t2, dof>(src->base_A_ptr->comm,
        src->base_A_ptr->rows_partition, src->base_A_ptr->cols_partition);
    dst->skipped = src->skipped;
    int my_pid; MPI_Comm_rank(dst->base_A_ptr->comm, & my_pid);
    if (my_pid == 0) printf("borrow MG %d levs\n", num_levs);
    for (idx_t ilev = 0; ilev < num_levs; ilev ++) {
        {// 借用矩阵
            assert(src->A_array_low[ilev] != nullptr);
            const par_CSRMatrix<idx_t, data_t, calc_t1, dof> * src_A = src->A_array_low[ilev];
            par_CSRMatrix<idx_t, data_t, calc_t2, dof> * & dst_A = dst->A_array_low[ilev];
            dst_A = new par_CSRMatrix<idx_t, data_t, calc_t2, dof>(src_A->comm,
                    src_A->rows_partition, src_A->cols_partition);
            dst_A->col_map_offd = src_A->col_map_offd;
            dst_A->commpkg      = src_A->commpkg;
            dst_A->diag.nrows   = src_A->diag.nrows  ;      dst_A->offd.nrows   = src_A->offd.nrows  ;
            dst_A->diag.ncols   = src_A->diag.ncols  ;      dst_A->offd.ncols   = src_A->offd.ncols  ;
            dst_A->diag.nnz     = src_A->diag.nnz    ;      dst_A->offd.nnz     = src_A->offd.nnz    ;
            dst_A->diag.row_ptr = src_A->diag.row_ptr;      dst_A->offd.row_ptr = src_A->offd.row_ptr;
            dst_A->diag.col_idx = src_A->diag.col_idx;      dst_A->offd.col_idx = src_A->offd.col_idx;
            dst_A->diag.vals    = src_A->diag.vals   ;      dst_A->offd.vals    = src_A->offd.vals   ;
        }
        if (ilev < num_levs - 1) {// 借用插值和限制矩阵
            assert(src->restrictor[ilev] != nullptr);
            assert(src->prolngator[ilev] != nullptr);
            const par_CSRMatrix<idx_t, data_t, calc_t1, 1> * src_R = src->restrictor[ilev];
            const par_CSRMatrix<idx_t, data_t, calc_t1, 1> * src_P = src->prolngator[ilev];
            par_CSRMatrix<idx_t, data_t, calc_t2, 1> * & dst_R = dst->restrictor[ilev];
            par_CSRMatrix<idx_t, data_t, calc_t2, 1> * & dst_P = dst->prolngator[ilev];
            dst_R = new par_CSRMatrix<idx_t, data_t, calc_t2, 1>(src_R->comm,
                    src_R->rows_partition, src_R->cols_partition);
            dst_P = new par_CSRMatrix<idx_t, data_t, calc_t2, 1>(src_P->comm,
                    src_P->rows_partition, src_P->cols_partition);
            dst_R->col_map_offd = src_R->col_map_offd;      dst_P->col_map_offd = src_P->col_map_offd;
            dst_R->commpkg      = src_R->commpkg;           dst_P->commpkg      = src_P->commpkg;
            dst_R->diag.nrows   = src_R->diag.nrows;        dst_P->diag.nrows   = src_P->diag.nrows;
            dst_R->diag.ncols   = src_R->diag.ncols;        dst_P->diag.ncols   = src_P->diag.ncols;
            dst_R->diag.nnz     = src_R->diag.nnz  ;        dst_P->diag.nnz     = src_P->diag.nnz  ;
            dst_R->diag.row_ptr = src_R->diag.row_ptr;      dst_P->diag.row_ptr = src_P->diag.row_ptr;
            dst_R->diag.col_idx = src_R->diag.col_idx;      dst_P->diag.col_idx = src_P->diag.col_idx;
            dst_R->diag.vals    = src_R->diag.vals;         dst_P->diag.vals    = src_P->diag.vals;
            dst_R->offd.nrows   = src_R->offd.nrows;        dst_P->offd.nrows   = src_P->offd.nrows;
            dst_R->offd.ncols   = src_R->offd.ncols;        dst_P->offd.ncols   = src_P->offd.ncols;
            dst_R->offd.nnz     = src_R->offd.nnz  ;        dst_P->offd.nnz     = src_P->offd.nnz  ;
            dst_R->offd.row_ptr = src_R->offd.row_ptr;      dst_P->offd.row_ptr = src_P->offd.row_ptr;
            dst_R->offd.col_idx = src_R->offd.col_idx;      dst_P->offd.col_idx = src_P->offd.col_idx;
            dst_R->offd.vals    = src_R->offd.vals;         dst_P->offd.vals    = src_P->offd.vals;
        }
        // 借用smoother
        assert(dst->relax_types[ilev] == src->relax_types[ilev]);
        if (dst->relax_types[ilev] == "PGS") {
            // smoother[ilev] = new PointGS<idx_t, data_t, setup_t, calc_t2, dof>();
            assert(false);
        }
        else if (dst->relax_types[ilev] == "PJ") {
            // smoother[ilev] = new PointJacobi<idx_t, data_t, setup_t, calc_t2, dof>();
            assert(false);
        }
        else if (dst->relax_types[ilev] == "ILU" || dst->relax_types[ilev] == "ILUT") {
            dst->smoother[ilev] = new BoxILU<idx_t, data_t, setup_t2, calc_t2, dof>();
            BoxILU<idx_t, data_t, setup_t2, calc_t2, dof>* dst_ilu =
                (BoxILU<idx_t, data_t, setup_t2, calc_t2, dof>*) dst->smoother[ilev];
            const BoxILU<idx_t, data_t, setup_t1, calc_t1, dof>* src_ilu =
                (const BoxILU<idx_t, data_t, setup_t1, calc_t1, dof>*) src->smoother[ilev];
            dst_ilu->weight   = src_ilu->weight;
            dst_ilu->ilu_type = src_ilu->ilu_type;
            dst_ilu->maxfil   = src_ilu->maxfil;
            dst_ilu->droptol  = src_ilu->droptol;
            dst_ilu->A_calc   = dst->A_array_low[ilev];// 借用
            dst_ilu->nthreads = src_ilu->nthreads;
            dst_ilu->TDG_Ls   = src_ilu->TDG_Ls;
            dst_ilu->TDG_Us   = src_ilu->TDG_Us;
            dst_ilu->L.nrows  = src_ilu->L.nrows;       dst_ilu->U.nrows  = src_ilu->U.nrows;
            dst_ilu->L.ncols  = src_ilu->L.ncols;       dst_ilu->U.ncols  = src_ilu->U.ncols;
            dst_ilu->L.nnz    = src_ilu->L.nnz  ;       dst_ilu->U.nnz    = src_ilu->U.nnz  ;
            dst_ilu->L.row_ptr= src_ilu->L.row_ptr;     dst_ilu->U.row_ptr= src_ilu->U.row_ptr;
            dst_ilu->L.col_idx= src_ilu->L.col_idx;     dst_ilu->U.col_idx= src_ilu->U.col_idx;
            dst_ilu->L.vals   = src_ilu->L.vals   ;     dst_ilu->U.vals   = src_ilu->U.vals   ;
            dst_ilu->D_inv    = src_ilu->D_inv;
        }
        // Boxwise LU Factorization
        else if (dst->relax_types[ilev] == "BoxLU") {
            // smoother[ilev] = new BoxLU <idx_t, data_t, setup_t, calc_t2, dof>();
            assert(false);
        }
        // Global LU Factorization (一般只用在最粗层)
        else if (dst->relax_types[ilev] == "GlbLU") {// 这个没法拷贝，只能重新setup
            // MPI_Barrier(dst->base_A_ptr->comm);
            // if (my_pid == 0) printf("bp 000\n");

            dst->smoother[ilev] = new GlobalLU<idx_t, data_t, setup_t2, calc_t2, dof>();
            GlobalLU<idx_t, data_t, setup_t2, calc_t2, dof>* dst_lu =
                (GlobalLU<idx_t, data_t, setup_t2, calc_t2, dof>*) dst->smoother[ilev];
            const GlobalLU<idx_t, data_t, setup_t1, calc_t1, dof>* src_lu =
                (const GlobalLU<idx_t, data_t, setup_t1, calc_t1, dof>*) src->smoother[ilev];
            dst_lu->root_pid = src_lu->root_pid;
            dst_lu->my_nrows = src_lu->my_nrows;
            dst_lu->glb_nrows= src_lu->glb_nrows;

            // MPI_Barrier(dst->base_A_ptr->comm);
            // if (my_pid == 0) printf("bp 111 %d %d\n", dst_lu->glb_nrows, src_lu->glb_nrows);
            
            dst_lu->displs   = src_lu->displs;
            dst_lu->recvcnts = src_lu->recvcnts;
            dst_lu->vec_buf  = new calc_t2 [dst_lu->my_nrows];
            
            int glb_pid; MPI_Comm_rank(dst_lu->A_calc->comm, & glb_pid);
            // MPI_Barrier(dst->base_A_ptr->comm);
            // if (my_pid == 0) printf("bp 222\n");
            
            if (glb_pid == 0) {
                dst_lu->global_b = new calc_t2 [dst_lu->glb_nrows];
                dst_lu->global_x = new calc_t2 [dst_lu->glb_nrows];
                dst_lu->proc_map = src_lu->proc_map;// 借用
                dst_lu->proc_row_ids = src_lu->proc_row_ids;// 借用
                // if (my_pid == 0) printf("bp 333 %d %d\n", dst_lu->glb_nrows, src_lu->glb_nrows);
                idx_t * glb_rpt = new idx_t [dst_lu->glb_nrows + 1];
                for (idx_t i = 0; i < dst_lu->glb_nrows + 1; i ++) glb_rpt[i] = src_lu->glbA_csr->row_ptr[i];
                // if (my_pid == 0) printf("bp 360\n");
                idx_t  * glb_cid = new idx_t  [glb_rpt[dst_lu->glb_nrows]];
                calc_t2* glb_val = new calc_t2[glb_rpt[dst_lu->glb_nrows]];
                for (idx_t j = 0; j < glb_rpt[dst_lu->glb_nrows]; j ++) {
                    glb_cid[j] = src_lu->glbA_csr->col_idx[j];
                    glb_val[j] = src_lu->glbA_csr->vals   [j];
                }
                // if (my_pid == 0) printf("bp 365\n");
                dst_lu->glbA_csr = new CSR_sparseMat<idx_t, calc_t2, calc_t2>(dst_lu->root_pid, dst_lu->glb_nrows,
                    glb_rpt, glb_cid, glb_val);
                bool succ;
                succ = dst_lu->glbA_csr->initA(0, CSR_sparseMat<idx_t, calc_t2, calc_t2>::PATTERN::NONSYMMETRIC); assert(succ);
                // if (my_pid == 0) printf("bp 444\n");
                succ = dst_lu->glbA_csr->initB(1, 0); assert(succ);
                succ = dst_lu->glbA_csr->initX(1, 0); assert(succ);
                // if (my_pid == 0) printf("bp 555\n");
                succ = dst_lu->glbA_csr->initSolver(omp_get_max_threads(), 1, 0); assert(succ);
                // if (my_pid == 0) printf("bp 666\n");
                dst_lu->glbA_csr->analyse(1, 0, 1, omp_get_max_threads(), 1);
                // if (my_pid == 0) printf("bp 777\n");
                dst_lu->glbA_csr->factorize(1e-4);
                // if (my_pid == 0) printf("bp 888\n");
            }
        }
        else MPI_Abort(src->base_A_ptr->comm, -789);
        // MPI_Barrier(dst->base_A_ptr->comm);
        // if (my_pid == 0) printf("done borrow lev %d\n", ilev);
        // 向量不能借，只能自己开辟
        dst->U_array[ilev] = new par_Vector<idx_t, calc_t2, dof>(src->U_array[ilev]->comm,
            src->U_array[ilev]->glb_nrows, src->U_array[ilev]->beg_row, src->U_array[ilev]->end_row);
        dst->F_array[ilev] = new par_Vector<idx_t, calc_t2, dof>(*(dst->U_array[ilev]));
        dst->aux_arr[ilev] = new par_Vector<idx_t, calc_t2, dof>(*(dst->U_array[ilev]));
    }
    */
    assert(false);
}
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF1 *, UnstructMG_I32K64P32D32_DOF1 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF2 *, UnstructMG_I32K64P32D32_DOF2 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF3 *, UnstructMG_I32K64P32D32_DOF3 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF4 *, UnstructMG_I32K64P32D32_DOF4 * &);

template void BorrowMG(const UnstructMG_I32K64P64D32_DOF1 *, UnstructMG_I32All32_DOF1 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF2 *, UnstructMG_I32All32_DOF2 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF3 *, UnstructMG_I32All32_DOF3 * &);
template void BorrowMG(const UnstructMG_I32K64P64D32_DOF4 *, UnstructMG_I32All32_DOF4 * &);


template<typename idx_t, typename data_t, typename setup_t, typename calc_t2, int dof>
void ReturnMG(UnstructMG<idx_t, data_t, setup_t, calc_t2, dof> * & dst)
{
    /*
    int my_pid; MPI_Comm_rank(dst->base_A_ptr->comm, & my_pid);
    // MPI_Barrier(dst->base_A_ptr->comm);
    if (my_pid == 0) printf("return MG\n");
    for (idx_t ilev = 0; ilev < dst->num_levs; ilev ++) {
        // MPI_Barrier(dst->base_A_ptr->comm);
        // if (my_pid == 0) printf("bp begin ilev %d\n", ilev);
        // 归还smoother
        if (dst->relax_types[ilev] == "PGS") {
            assert(false);
        }
        else if (dst->relax_types[ilev] == "PJ") {
            assert(false);
        }
        else if (dst->relax_types[ilev] == "ILU" || dst->relax_types[ilev] == "ILUT") {
            BoxILU<idx_t, data_t, setup_t, calc_t2, dof>* dst_ilu =
                (BoxILU<idx_t, data_t, setup_t, calc_t2, dof>*) dst->smoother[ilev];
            dst_ilu->A_calc   = nullptr;
            dst_ilu->TDG_Ls   = nullptr;
            dst_ilu->TDG_Us   = nullptr;
            dst_ilu->L.row_ptr= nullptr;    dst_ilu->U.row_ptr= nullptr;
            dst_ilu->L.col_idx= nullptr;    dst_ilu->U.col_idx= nullptr;
            dst_ilu->L.vals   = nullptr;    dst_ilu->U.vals   = nullptr;
            dst_ilu->D_inv    = nullptr;
        }
        // Boxwise LU Factorization
        else if (dst->relax_types[ilev] == "BoxLU") {
            // smoother[ilev] = new BoxLU <idx_t, data_t, setup_t, calc_t2, dof>();
            assert(false);
        }
        // Global LU Factorization (一般只用在最粗层)
        else if (dst->relax_types[ilev] == "GlbLU") {// 这个没法拷贝，只能重新setup
            GlobalLU<idx_t, data_t, setup_t, calc_t2, dof>* dst_lu =
                (GlobalLU<idx_t, data_t, setup_t, calc_t2, dof>*) dst->smoother[ilev];
            int glb_pid; MPI_Comm_rank( dst_lu->A_calc->comm, & glb_pid);
            dst_lu->A_calc   = nullptr;
            dst_lu->displs   = nullptr;
            dst_lu->recvcnts = nullptr;
            // dst_lu->vec_buf  = new calc_t2 [dst_lu->my_nrows];
            if (glb_pid == 0) {
                // dst_lu->global_b = new calc_t2 [dst_lu->glb_nrows];
                // dst_lu->global_x = new calc_t2 [dst_lu->glb_nrows];
                dst_lu->proc_map = nullptr;
                dst_lu->proc_row_ids = nullptr;
            }
        }
        else MPI_Abort(dst->base_A_ptr->comm, -789);

        // MPI_Barrier(dst->base_A_ptr->comm);
        // if (my_pid == 0) printf("bp 000\n");
        if (ilev < dst->num_levs - 1) {// 借用插值和限制矩阵
            par_CSRMatrix<idx_t, data_t, calc_t2, 1> * dst_R = dst->restrictor[ilev]; assert(dst_R != nullptr);
            par_CSRMatrix<idx_t, data_t, calc_t2, 1> * dst_P = dst->prolngator[ilev]; assert(dst_P != nullptr);
            dst_R->col_map_offd = nullptr;      dst_P->col_map_offd = nullptr;
            dst_R->commpkg      = nullptr;      dst_P->commpkg      = nullptr;
            dst_R->diag.row_ptr = nullptr;      dst_P->diag.row_ptr = nullptr;
            dst_R->diag.col_idx = nullptr;      dst_P->diag.col_idx = nullptr;
            dst_R->diag.vals    = nullptr;      dst_P->diag.vals    = nullptr;
            dst_R->offd.row_ptr = nullptr;      dst_P->offd.row_ptr = nullptr;
            dst_R->offd.col_idx = nullptr;      dst_P->offd.col_idx = nullptr;
            dst_R->offd.vals    = nullptr;      dst_P->offd.vals    = nullptr;
        }
        // MPI_Barrier(dst->base_A_ptr->comm);
        // if (my_pid == 0) printf("bp 111\n");

        {// 归还矩阵
            par_CSRMatrix<idx_t, data_t, calc_t2, dof> * dst_A = dst->A_array_low[ilev]; assert(dst_A != nullptr);
            dst_A->col_map_offd = nullptr;
            dst_A->commpkg      = nullptr;
            dst_A->diag.row_ptr = nullptr;
            dst_A->diag.col_idx = nullptr;
            dst_A->diag.vals    = nullptr;
            dst_A->offd.row_ptr = nullptr;
            dst_A->offd.col_idx = nullptr;
            dst_A->offd.vals    = nullptr;
        }
        // MPI_Barrier(dst->base_A_ptr->comm);
        // if (my_pid == 0) printf("done return lev %d\n", ilev);
    }
    delete dst->base_A_ptr;
    dst->base_A_ptr = nullptr;
    */
    assert(false);
}
template void ReturnMG(UnstructMG_I32K64P32D32_DOF1 * &);
template void ReturnMG(UnstructMG_I32K64P32D32_DOF2 * &);
template void ReturnMG(UnstructMG_I32K64P32D32_DOF3 * &);
template void ReturnMG(UnstructMG_I32K64P32D32_DOF4 * &);

template class UnstructMG<int, double, double, double, 1>;
template class UnstructMG<int, double, double, double, 2>;
template class UnstructMG<int, double, double, double, 3>;
template class UnstructMG<int, double, double, double, 4>;

template class UnstructMG<int, float , double, double, 1>;
template class UnstructMG<int, float , double, double, 2>;
template class UnstructMG<int, float , double, double, 3>;
template class UnstructMG<int, float , double, double, 4>;

template class UnstructMG<int, float , double, float , 1>;
template class UnstructMG<int, float , double, float , 2>;
template class UnstructMG<int, float , double, float , 3>;
template class UnstructMG<int, float , double, float , 4>;

template class UnstructMG<int, float , float , float , 1>;
template class UnstructMG<int, float , float , float , 2>;
template class UnstructMG<int, float , float , float , 3>;
template class UnstructMG<int, float , float , float , 4>;
#ifdef USE_FP16
template class UnstructMG<int, __fp16, double, float , 1>;
template class UnstructMG<int, __fp16, double, float , 2>;
template class UnstructMG<int, __fp16, double, float , 3>;
template class UnstructMG<int, __fp16, double, float , 4>;

template class UnstructMG<int, __fp16, float , float , 1>;
template class UnstructMG<int, __fp16, float , float , 2>;
template class UnstructMG<int, __fp16, float , float , 3>;
template class UnstructMG<int, __fp16, float , float , 4>;
#endif
