#include "AggMG.hpp"
#include "json.h"
// #include "json/json.h"
#include "ilu.hpp"
#include "denseLU.hpp"
#include "coarsen.hpp"
#include <iostream>
#include <fstream>

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
AggMG<idx_t, data_t, calc_t, setup_t>::AggMG(const std::string _config_file) {
    ParseConfig(_config_file);
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::ParseConfig(std::string config_file)
{
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, & my_pid);
    int num_procs; MPI_Comm_size(MPI_COMM_WORLD, & num_procs);

    if (my_pid == 0) printf("reading from %s\n", config_file.c_str());
    std::ifstream f(config_file);
    Json::Reader reader;
    Json::Value data;
    bool ret = reader.parse(f, data);
    assert(ret == true);

    // 读取层数信息
    coarse_nlevs = data["C_nlevs"].asInt();
    num_levs = data["num_levs"].asInt();
    assert(num_levs <= coarse_nlevs);

    // 读取cycle信息
    std::string cyc = data["cycle"].asString();
    if      (cyc == "V") cycle_type = 1;
    else if (cyc == "W") cycle_type = 2;
    num_grid_sweeps[0] = data["sweep"][0].asInt();
    num_grid_sweeps[1] = data["sweep"][1].asInt();

    if (my_pid == 0) {
        printf("Pre %d Post %d\n", num_grid_sweeps[0], num_grid_sweeps[1]);
        printf("%s-Cycle num_levs %d\n", cyc.c_str(), num_levs);
    }

    // 读取平滑信息
    relax_types = new std::string [num_levs];
    relax_wgts  = new calc_t      [num_levs];
    for (idx_t i = 0; i < num_levs; i++) {
        relax_types[i] = data["smoother"][i].asString();
        relax_wgts [i] = data["weight"][i].asDouble();
    }
    
    // 读取生成粗化层时的控制信息
    skipped = new idx_t [coarse_nlevs];
    alphas  = new calc_t[coarse_nlevs];
    XPSS    = new idx_t [coarse_nlevs];
    shd_setup = new SHADOW * [coarse_nlevs];
    use_coord = new idx_t [coarse_nlevs];
    idx_t zero_cnt = 0;
    for (idx_t i = 0; i < coarse_nlevs; i++) {
        skipped[i] = data["skipped"][i].asInt(); if (skipped[i] == 0) zero_cnt ++;
        alphas [i] = data["alpha"  ][i].asDouble();
        XPSS   [i] = data["XPSS"   ][i].asInt();
        shd_setup[i] = nullptr;
        use_coord[i] = data["coord"][i].asInt();
    }
    assert(zero_cnt == num_levs);

    smth_nt = data["smth_nt"].asInt();
    setup_nt = 128 / num_procs;

    // 读取完各种控制参数后，根据参数分配内存空间
    A_array = new MyParCSRMatrix<idx_t, data_t, calc_t> * [num_levs];
    U_array = new MyParCSRVector<idx_t, calc_t> * [num_levs];
    F_array = new MyParCSRVector<idx_t, calc_t> * [num_levs];
    aux_arr = new MyParCSRVector<idx_t, calc_t> * [num_levs];
    smoother= new Precond<idx_t, calc_t, setup_t> * [num_levs];
    restrictor = new          MyCSRMatrix<idx_t, data_t, calc_t> * [num_levs];
    plain_prlg = new idx_t * [num_levs];
    shd_dict   = new SHADOW *[num_levs];
    // 各类二级指针预先赋空值
    for (idx_t i = 0; i < num_levs; i++) {
        A_array[i] = nullptr;
        smoother[i] = nullptr;
        U_array[i] = F_array[i] = aux_arr[i] = nullptr;
        restrictor[i] = nullptr;
        plain_prlg[i] = nullptr;
        shd_dict[i]= nullptr;
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
AggMG<idx_t, data_t, calc_t, setup_t>::~AggMG() {
    // 析构函数释放各类已经分配过的内存
    if (A_array) {
        // 注意是否要跳过最密层
        for (idx_t i = (own_A0 ? 0 : 1); i < num_levs; i++) { delete A_array[i]; A_array[i] = nullptr; }
        delete [] A_array; A_array = nullptr;
    }
    if (U_array) { assert(F_array); assert(aux_arr);
        for (idx_t i = 0; i < num_levs; i++) {
            delete U_array[i]; U_array[i] = nullptr;
            delete F_array[i]; F_array[i] = nullptr;
            delete aux_arr[i]; aux_arr[i] = nullptr;
        }
        delete [] U_array; U_array = nullptr;
        delete [] F_array; F_array = nullptr;
        delete [] aux_arr; aux_arr = nullptr;
    }
    if (smoother) {
        for (idx_t i = 0; i < num_levs; i++) { delete smoother[i]; smoother[i] = nullptr; }
        delete [] smoother; smoother = nullptr;
    }
    if (plain_prlg) { assert(restrictor);
        for (idx_t i = 0; i < num_levs; i++) {
            delete [] plain_prlg[i]; plain_prlg[i] = nullptr;
            delete restrictor[i]; restrictor[i] = nullptr; 
        }
        delete [] plain_prlg; plain_prlg = nullptr;
        delete [] restrictor; restrictor = nullptr;
    }
    if (shd_dict) {
        for (idx_t i = 0; i < num_levs; i++) delete shd_dict[i];
        delete [] shd_dict; shd_dict = nullptr;
    }
    if (shd_setup) {
        for (idx_t i = 0; i < coarse_nlevs; i++) delete shd_setup[i];
        delete [] shd_setup; shd_setup = nullptr;
    }
    // 二级指针释放
    delete [] skipped; skipped = nullptr;
    delete [] alphas ; alphas  = nullptr;
    delete [] XPSS   ; XPSS    = nullptr;
    delete [] use_coord; use_coord = nullptr;
    delete [] relax_types; relax_types = nullptr;
    delete [] relax_wgts; relax_wgts = nullptr;
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::SetupSmoother(const idx_t ilev, MyParCSRMatrix<idx_t, setup_t, setup_t> & A)
{
    assert(ilev < num_levs);
    // 根据不同的配置参数，选择第ilev层的smoother类型

    // Point-wise Symmetric Gauss-Seidel
    if (relax_types[ilev] == "PGS") {
        smoother[ilev] = new PGS<idx_t, data_t, calc_t, setup_t>();
    }
    // Point-wise Jacobi
    else if (relax_types[ilev] == "PJ") {
        smoother[ilev] = new Jacobi<idx_t, data_t, calc_t, setup_t>();
    }
    // Incomplete LU Factorization
    else if (relax_types[ilev] == "ILU") {
        smoother[ilev] = new ILU<idx_t, data_t, calc_t, setup_t>();
    }
    // Global LU Factorization (一般只用在最粗层)
    else if (relax_types[ilev] == "GlbLU") {
        smoother[ilev] = new DenseLU<idx_t, data_t, calc_t, setup_t>();
    }
    else MPI_Abort(A.comm, -789);
    // 向smoother传递建立所需的矩阵
    smoother[ilev]->Setup    (A);
    // 设置该层的smoother系数
    smoother[ilev]->SetWeight(relax_wgts[ilev]);
    int my_pid; MPI_Comm_rank(A.comm, & my_pid);
    // 打印一下以便确认
    if (my_pid == 0) printf("lev #%d Smoother %s Wgt %.3f\n", ilev, relax_types[ilev].c_str(), relax_wgts[ilev]);
}


template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::Setup(MyParCSRMatrix<idx_t, setup_t, setup_t> & base_A)
{
    base_A_ptr = & base_A;

    int my_pid; MPI_Comm_rank(base_A.comm, &my_pid);
    if (my_pid == 0) {
        printf("AggMG Setup %d Smth %d\n", setup_nt, smth_nt);
        if (num_levs == 1) printf("  Warning: only 1 layer exists.\n");
    }
    // 用于在setup过程中记录生成的不同粗化层的矩阵，其中A_merged是记录发生进程收缩后的矩阵
    MyParCSRMatrix<idx_t, setup_t, setup_t> * A_setup[coarse_nlevs], * A_merged[coarse_nlevs];
    // 用于在setup过程中记录生成的不同粗化层的限制矩阵
    MyCSRMatrix   <idx_t, data_t, calc_t>   * R_setup[coarse_nlevs];
    // 用于在setup过程中记录生成的不同粗化层的插值矩阵
    idx_t * P_setup  [coarse_nlevs];
    
    for (idx_t i = 0; i < coarse_nlevs; i++) {// 各级指针预先赋空值
        R_setup[i] = nullptr; A_setup[i] = nullptr; P_setup[i] = nullptr; A_merged[i] = nullptr;
        shd_setup[i] = nullptr;
    }
    // 起始的最密层由外层调用者给定
    A_setup[0] = & base_A;
    // 逐层遍历，一直到所需生成的最粗层
    for (idx_t fi = 0; fi < coarse_nlevs - 1; fi ++) {
        MyParCSRMatrix<idx_t, setup_t, setup_t> * fine_mat = A_setup[fi];// 刚得到的上层矩阵（有可能是空）
        if (fine_mat && XPSS[fi] > 1) {// 如果当前层需要merge
            fine_mat = reducePx(XPSS[fi], shd_setup[fi], A_setup[fi]);// 部分进程返回的指针为空指针
            A_merged[fi] = fine_mat;// 记录下以便等会儿释放
        }
        // 只有本进程还拥有对当前细网格层矩阵的计算，才进行粗化！
        if (fine_mat != nullptr) {
            // 在当前层矩阵的通信域内的进程号
            int lev_pid; MPI_Comm_rank(fine_mat->comm, & lev_pid);
            int lev_nproc; MPI_Comm_size(fine_mat->comm, & lev_nproc);

            if (lev_pid == 0) printf("lev #%2d NRows %9d alpha %.4f #Procs %3d do coarsening\n",
                    fi, fine_mat->glb_nrows, alphas[fi], lev_nproc);
            // 执行一次聚对粗化，同时计算粗网格矩阵
            CoarsenWrapper<idx_t, setup_t, calc_t> * wrapper =
                coarsen<idx_t, setup_t, calc_t>(*fine_mat, use_coord[fi], 0.25, 1, alphas[fi]);// 只聚合一次
            // 转移内存所有者
            A_setup [fi + 1] = wrapper->coar_mat   ; wrapper->coar_mat    = nullptr;
            P_setup [fi    ] = wrapper->loc_map_f2c; wrapper->loc_map_f2c = nullptr;
            R_setup [fi    ] = new MyCSRMatrix<idx_t, data_t, calc_t>(
                A_setup[fi + 1]->end_row - A_setup[fi + 1]->beg_row,
                fine_mat->end_row - fine_mat->beg_row);
            delete [] R_setup[fi]->row_ptr;
            R_setup [fi]->row_ptr = wrapper->R_rp; wrapper->R_rp = nullptr;
            R_setup [fi]->col_idx = wrapper->R_ci; wrapper->R_ci = nullptr;
            delete wrapper; wrapper = nullptr;
        }
    }// ilev loop

    // 生成完所有粗层矩阵后，根据skipped关系，设置A_array和建立真正的层间映射关系
    std::vector<idx_t> real_cs;
    for (idx_t i = 0; i < coarse_nlevs; i++) {
        if (skipped[i] == 0) real_cs.emplace_back(i);// 提取那些没有被跳过的层
    } assert(real_cs[0] == 0);// 最密层总不能被跳过
    assert((idx_t) real_cs.size() == num_levs);// 应该等于设置的层数

    if (my_pid == 0) {
        printf("\n ============ Summary of Levels ============\n");
    }

    for (idx_t i = 0; i < num_levs; i++) {
        const idx_t cid = real_cs[i];// 真正的第i层对应于粗化时的第cid层
        if (A_setup[cid]) {// 在该粗化层有参与计算
            if constexpr (sizeof(setup_t) == sizeof(calc_t) && sizeof(setup_t) == sizeof(data_t)) {
                A_array[i] = A_setup[cid];// 将矩阵的内存所有权转移
                A_setup[cid] = nullptr;// 原来的置空，以便清理
            } else assert(false);

            int flev_pid; MPI_Comm_rank(A_array[i]->comm, & flev_pid);
            int flev_nproc; MPI_Comm_size(A_array[i]->comm, & flev_nproc);
            if (flev_pid == 0) { 
                idx_t avg_wl = A_array[i]->glb_nrows / flev_nproc;
                printf("lev #%2d NRows %9d #Proc %3d Avg load %9d\n", i, A_array[i]->glb_nrows, flev_nproc, avg_wl);
            }
        }
    }

    for (idx_t i = 0; i < num_levs; i++) {
        const idx_t cid = real_cs[i];// 真正的第i层对应于粗化时的第cid层
        if (A_array[i]) {// 本进程在第i层有计算
            // 本进程在第i层矩阵的通信域内的进程号
            int flev_pid; MPI_Comm_rank(A_array[i]->comm, & flev_pid);
            int flev_nproc; MPI_Comm_size(A_array[i]->comm, & flev_nproc);
            // 根据该层的矩阵规格建立该层的向量
            U_array[i] = new MyParCSRVector<idx_t, calc_t>(A_array[i]->comm, A_array[i]->glb_nrows,
                    A_array[i]->beg_row, A_array[i]->end_row, nullptr);
            F_array[i] = new MyParCSRVector<idx_t, calc_t>(*U_array[i]);
            aux_arr[i] = new MyParCSRVector<idx_t, calc_t>(*U_array[i]);
            // 根据该层的矩阵建立该层的smoother
            SetupSmoother(i, *A_array[i]);
            // 如果本层还不是最粗层，则需要建立层间转移的限制和插值算子
            if (i + 1 < num_levs) {
                const idx_t next_cid = real_cs[i + 1];// 真正的第i+1层对应于粗化时的第next_cid层
                // 在[cid, next_cid)间发生收缩的步长的累计乘积
                // 即如果连续3层的收缩步长为 1, 2, 1 则累乘步长为1*2*1=2
                idx_t accum_stride = 1;
                for (idx_t _str = cid; _str < next_cid; _str ++) accum_stride *= XPSS[_str];// 计算累乘步长

                if (next_cid == cid + 1) {// 连续两层被激活
                    // 注意当accum_stride==1时，shd_setup[cid]实际应为空指针
                    if (accum_stride == 1) assert(shd_setup[cid] == nullptr);
                    // 当累乘步长大于1时，也可以直接复用
                    shd_dict  [i] = shd_setup[cid]; shd_setup[cid] = nullptr;
                    // 限制和插值算子转移所有权即可，不需重新建立
                    restrictor[i] =   R_setup[cid];   R_setup[cid] = nullptr;
                    plain_prlg[i] =   P_setup[cid];   P_setup[cid] = nullptr;
                }
                else if (accum_stride == 1) {// 中间有隔层被skip掉了，但没有发生聚合
                    const idx_t num_fine = A_array[i]->end_row - A_array[i]->beg_row;
                    // 每个进程内部自己叠加式地计算插值和限制矩阵即可
                    idx_t * merged_map_f2c = new idx_t [num_fine];
                    #pragma omp parallel
                    {
                        int tid = omp_get_thread_num();
                        int nt  = omp_get_num_threads();
                        int tbeg = 0, tend = 0;
                        thread_load_balance(num_fine, nt, tid, tbeg, tend);
                        {// 先拷贝最近一层的插值算子
                            const idx_t * loc_map = P_setup[cid]; assert(loc_map);
                            for (idx_t j = tbeg; j < tend; j++)
                            merged_map_f2c[j] = loc_map[j];
                        }
                        // 再逐层叠加，相当于不断查表跳转的过程
                        for (idx_t clv = cid + 1; clv < next_cid; clv ++) {
                            const idx_t * loc_map = P_setup[clv]; assert(loc_map);
                            for (idx_t j = tbeg; j < tend; j++)
                                merged_map_f2c[j] = loc_map[merged_map_f2c[j]];
                        }
                    }
                    plain_prlg[i] = merged_map_f2c;// 赋给插值算子
                    // 限制矩阵通过插值的转置得到
                    assert(R_setup[next_cid - 1]);
                    restrictor[i] = new MyCSRMatrix<idx_t, data_t, calc_t>(
                        R_setup[next_cid - 1]->nrows, num_fine, plain_prlg[i]);
                }
                else {// 中间有隔层被skip掉了，且发生了聚合
                    // 首先建立shadow
                    shd_dict[i] = new SHADOW;
                    if (A_array[i + 1]) {// 下一次粗化后本进程仍然参与粗网格的平滑计算（即本进程为master进程）
                        // 记录收缩的元数据
                        shd_dict[i]->num_recvs = accum_stride - 1;
                        shd_dict[i]->recv_pids = new int [accum_stride - 1];
                        shd_dict[i]->recv_offsets = new int [accum_stride];
                        idx_t accum_offset = A_array[i]->end_row - A_array[i]->beg_row;
                        const idx_t * fparti = A_array[i]->rows_partition;
                        // 记录与本进程对应的各个slave进程的进程号和接收它们的消息时的存放位置的偏移
                        for (int slv = 0; slv < shd_dict[i]->num_recvs; slv++) {
                            const idx_t ngb_pid = flev_pid + slv + 1;
                            shd_dict[i]->recv_pids   [slv] = ngb_pid;
                            shd_dict[i]->recv_offsets[slv] = accum_offset;
                            accum_offset += fparti[ngb_pid + 1] - fparti[ngb_pid];
                        }
                        shd_dict[i]->recv_offsets[accum_stride - 1] = accum_offset;
                    } else {// slave进程记录要给master进程发送的消息长度和对方的进程号
                        shd_dict[i]->send_len = A_array[i]->end_row - A_array[i]->beg_row;
                        shd_dict[i]->send_pid = flev_pid - (flev_pid % accum_stride);
                    }

                    // 然后建立插值和限制矩阵
                    // 插值矩阵通过从cid到next_cid逐层传递（类似于上一个分支中的查表跳转的过程）获得
                    // 而限制矩阵通过插值矩阵转置而得

                    idx_t nfi = A_array[i]->end_row - A_array[i]->beg_row;// 本进程在第i层矩阵A_array[i]中负责的行数（可能会在推进过程中变）
                    // 融合的映射：记录本进程在第i层负责的[1,nfi)号元素对应于第i+1层的元素编号
                    // 之所以称之为merged_，是因为第i和第i+1层中间隔了若干层粗化层
                    idx_t * merged_map_f2c = new idx_t [nfi];
                    // 先初始化为单位映射
                    #pragma omp parallel for schedule(static)
                    for (idx_t j = 0; j < nfi; j++)
                        merged_map_f2c[j] = j;
                    
                    for (idx_t clv = cid; clv < next_cid; clv++) {// 逐层向前推进
                        if (shd_setup[clv] == nullptr) {// 这一层没有发生聚合，则查这一层的表以确定细点到粗点的跳转关系
                            assert(XPSS[clv] == 1);
                            const idx_t * loc_map = P_setup[clv]; assert(loc_map);
                            #pragma omp parallel for schedule(static)
                            for (idx_t j = 0; j < nfi; j++)
                                merged_map_f2c[j] = loc_map[merged_map_f2c[j]];// 向前推进一层
                        }
                        else {// 这一层有聚合，则slave将自己的映射发送给master
                            const MyParCSRMatrix<idx_t, setup_t, setup_t> * clv_mat = nullptr;
                            if (A_setup[clv] == nullptr) clv_mat = A_array[i];
                            else clv_mat = A_setup[clv];
                            assert(clv_mat);
                            
                            int clv_pid; MPI_Comm_rank(clv_mat->comm, & clv_pid);
                            const SHADOW * shd = shd_setup[clv];
                            if (shd->send_pid == MPI_PROC_NULL) {// master进程
                                idx_t recv_pos   [shd->num_recvs + 1]; recv_pos[0] = nfi;
                                MPI_Request reqs [shd->num_recvs];
                                // 首先从slave进程处接收得知它们要给自己传多少个元素
                                for (idx_t r = 0; r < shd->num_recvs; r++) {
                                    MPI_Irecv( recv_pos + 1 + r, 1 * sizeof(idx_t), MPI_BYTE,
                                        r + 1, 501, shd->subComm, & reqs[r]);
                                }
                                MPI_Waitall(shd->num_recvs, reqs, MPI_STATUSES_IGNORE);
                                // 确定每个slave进程收回来的消息所放置位置的偏移
                                for (idx_t r = 0; r < shd->num_recvs; r++)
                                    recv_pos[r + 1] += recv_pos[r];
                                const idx_t tot_num = recv_pos[shd->num_recvs];

                                // 记录新的映射，因为收缩之后长度变了，需要重新构造
                                idx_t * shd_map_f2c = new idx_t [tot_num];
                                const idx_t * master_map = P_setup[clv]; assert(master_map);

                                for (idx_t r = 0; r < shd->num_recvs; r++) {
                                    MPI_Irecv( shd_map_f2c + recv_pos[r],
                                        (recv_pos[r + 1] - recv_pos[r]) * sizeof(idx_t), MPI_BYTE,
                                        r + 1, 505, shd->subComm, & reqs[r]);
                                }
                                // 拷贝自己的
                                #pragma omp parallel for schedule(static)
                                for (idx_t j = 0; j < recv_pos[0]; j++)// 自己的部分总在最前
                                    shd_map_f2c[j] = master_map[merged_map_f2c[j]];// 向前推进一层
                                MPI_Waitall(shd->num_recvs, reqs, MPI_STATUSES_IGNORE);
                                // 拷贝所属的slaves进程的
                                for (idx_t r = 0; r < shd->num_recvs; r++) {
                                    const idx_t coar_offset = clv_mat->rows_partition[clv_pid + r + 1]
                                                            - clv_mat->rows_partition[clv_pid];// 本slave进程前面一共产生了多少个粗点
                                    #pragma omp parallel for schedule(static)
                                    for (idx_t j = recv_pos[r]; j < recv_pos[r + 1]; j++)
                                        shd_map_f2c[j] = master_map[coar_offset + shd_map_f2c[j]];// 向前推进一层，注意这里有一个偏移
                                }
                                // 置换自己当前的映射
                                delete [] merged_map_f2c; merged_map_f2c = shd_map_f2c;
                                // 同时更新自己的num_fine
                                nfi = tot_num;
                                // 然后继续到下一层检查是否发生了聚合，以及自己是否为master进程
                            } else {// slaves
                                // 告知master进程自己将要给它发送多少个元素的数据
                                MPI_Send( & nfi, 1 * sizeof(idx_t), MPI_BYTE,
                                    0, 501, shd->subComm);// 首先告诉master我要发送多长的数据
                                // 发送真正的数据
                                MPI_Send(merged_map_f2c, nfi * sizeof(idx_t), MPI_BYTE,
                                    0, 505, shd->subComm);
                                delete [] merged_map_f2c; merged_map_f2c = nullptr;
                                break;// 直接跳出clv的循环，因为从该clv层开始，本进程不可能再有计算
                            }
                        }
                    }// clv loop
                    // 结束循环时应该只有master中的master的映射表非空
                    if (A_array[i + 1]) {// master构造自己的插值矩阵
                        plain_prlg[i] = merged_map_f2c;// 赋给插值算子
                        // 限制矩阵通过插值的转置得到
                        assert(R_setup[next_cid - 1]);
                        restrictor[i] = new MyCSRMatrix<idx_t, data_t, calc_t>(
                            R_setup[next_cid - 1]->nrows, nfi, plain_prlg[i]);
                    } else assert(merged_map_f2c == nullptr);
                }
            }// i + 1 still inside
        }// I am doing A_array[i]
    }// i loop
    if (A_array[0] == & base_A) own_A0 = false;

    // 清理中间结果（solve阶段不需要用到的那些层）
    for (idx_t i = 0; i < coarse_nlevs; i++) {
        if (i == 0) assert(A_setup[i] == nullptr);
        delete    A_setup[i];
        delete    R_setup[i];
        delete [] P_setup[i];
        delete    A_merged[i];
    }
}

#define FUSSION_OPT // 是否通过融合减少一次向量拷贝
template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::Solve(const MyParCSRVector<idx_t, calc_t> & b,
    MyParCSRVector<idx_t, calc_t> & x, bool _zg)
{
    // 是否要执行零初值优化
    this->zero_guess = _zg;
    // 需要准备好最细层的数据
#ifdef FUSSION_OPT
    MyParCSRVector<idx_t, calc_t> * F0_tmp = F_array[0], * U0_tmp = U_array[0];
    F_array[0] = (MyParCSRVector<idx_t, calc_t> *) & b;// 强制转换
    U_array[0] = & x;
    if (this->zero_guess) U_array[0]->set_val(0.0);
#else
    // 最细层的数据准备：残差即为右端向量b
    vec_copy(b, *F_array[0]);
    if (!this->zero_guess) {
        vec_copy(x, *U_array[0]);
    }// 否则，由第0层的光滑子负责将第0层初始解设为0
    // 但注意如果没有将初始解设为0，同时在光滑子里面又走了zero_guess==false的分支，则会误用数组里面的“伪值”计算而致错误
#endif
    // 根据配置好的cycle类型参数确定走哪个分支
    switch (cycle_type)
    {
    case 1: V_Cycle(); break;
    case 2: W_Cycle(); break;
    default: MPI_Abort(b.comm, -9011);
    }

#ifdef FUSSION_OPT
    F_array[0] = F0_tmp;
    U_array[0] = U0_tmp;
#else
    vec_copy(*U_array[0], x);// 最后将结果拷出来
#endif
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::V_Cycle() const
{
    for (idx_t i = 0; i < num_levs; i++) {
        // 前平滑
        preSmooth(i);
        if (i+1 < num_levs) {
            // 计算残差
            calcResi(i);
            // 限制：从aux_arr[i]计算出下一层的右端项F_array[i+1]
            // 如果有收缩发生，则gather内含在此步完成
            rstrResi(i);
        }
    }
    for (idx_t i = num_levs - 1; i >= 0; i--) {
        // 后平滑
        postSmooth(i);
        if (i-1 >= 0) {
            // 插值：从U_array[i]修改到aux_arr[i-1]
            // 如果有收缩发成，则scatter内含在此步完成
            prlgErr(i);
        }
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::W_Cycle() const 
{
    idx_t lev_counter[num_levs];
    lev_counter[0] = 1;
    for (idx_t i = 1; i < num_levs; i++) lev_counter[i] = cycle_type;

    bool Not_Finished = true;
    idx_t i = 0;

    while (Not_Finished) {
        preSmooth(i);// W-Cycle中没有前后平滑的区别

        // Decrement the control counter 
        lev_counter[i] -- ;
        // and determine which grid to visit next
        if (lev_counter[i] >= 0 && i != num_levs - 1) {// Visit coarser level next.
            calcResi(i);// Compute residual
            rstrResi(i);// Perform restriction            
            // Reset counters and cycling parameters for coarse level
            i ++;
            lev_counter[i] = std::max(lev_counter[i], cycle_type);
            this->zero_guess = true;// 下一层是解上一层的残差方程
        }
        else if (i != 0) {// Visit finer level next.
            prlgErr(i);
            // Reset counters and cycling parameters for finer level.
            i --;
            this->zero_guess = false;
        }
        else Not_Finished = false;
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::preSmooth(const idx_t i) const 
{
    assert(i < num_levs);
    if ((smoother[i])) {// 本进程在细网格层有计算
        for (idx_t j = 0; j < num_grid_sweeps[0]; j++)
            smoother[i]->Solve(*F_array[i], *U_array[i], this->zero_guess && j == 0);
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::postSmooth(const idx_t i) const 
{
    assert(i < num_levs);
    if ((smoother[i])) {
        // 当前层对残差做平滑（对应后平滑），平滑后结果在U_array中：相当于在当前层解一次Au=f
        for (idx_t j = 0; j < num_grid_sweeps[1]; j++) {
            smoother[i]->Solve(*F_array[i], *U_array[i], false);
        }
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::calcResi(const idx_t i) const
{
    assert(i < num_levs);
    // if (A_array_high[i] || A_array_low[i]) {// 本进程在细网格层有计算
    if (A_array[i]) {
        // 计算在当前层平滑后的残差
        if constexpr (sizeof(calc_t) == sizeof(data_t))// 三种精度一致
            // A_array_high[i]->Mult(-1.0, *U_array[i], 1.0, *F_array[i], *aux_arr[i]);// 此时残差存放在aux_arr中
            A_array[i]->Mult(-1.0, *U_array[i], 1.0, *F_array[i], *aux_arr[i]);// 此时残差存放在aux_arr中
        else {
            assert(false);
        }
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::rstrResi(const idx_t i) const 
{
    // 将当前层残差限制到下一层去: R_ops[i]->apply(aux_arr[i], F_array[i+1]);
    assert(i < num_levs);
    if (U_array[i]) {// 本进程在细网格层有计算
        // 执行限制计算的kernel
        auto rstr_kernel = [](const idx_t num_coar, const idx_t num_fine,
            const idx_t * row_ptr, const idx_t * col_idx, const calc_t * src, calc_t * dst) {
            #pragma omp parallel for schedule(static)
            for (idx_t ci = 0; ci < num_coar; ci++) {
                calc_t sum = 0.0;
                // 限制矩阵的系数
                calc_t wgt = 1.0 / (row_ptr[ci + 1] - row_ptr[ci]);
                for (idx_t p = row_ptr[ci]; p < row_ptr[ci + 1]; p++) {
                    const idx_t src_id = col_idx[p]; assert(src_id < num_fine);
                    sum += src[src_id];
                }
                dst[ci] = sum * wgt;
            }
        };
        
        const SHADOW * shd = shd_dict[i];
        if (shd != nullptr) {// 有收缩发生。本进程的目标F_array[i+1]很可能是空指针
            if (F_array[i + 1] != nullptr) {// 本进程在下一层有计算：即master进程
                assert(shd->send_pid == MPI_PROC_NULL);
                calc_t * recv_buf = new calc_t [shd->recv_offsets[shd->num_recvs]];
                MPI_Request reqs[shd->num_recvs];
                for (idx_t r = 0; r < shd->num_recvs; r++) {
                    MPI_Irecv(recv_buf + shd->recv_offsets[r], 
                        (shd->recv_offsets[r + 1] - shd->recv_offsets[r]) * sizeof(calc_t), MPI_BYTE,
                        shd->recv_pids[r], 99, U_array[i]->comm, & reqs[r]);
                }
                // 自己部分自己拷贝
                #pragma omp parallel for schedule(static)
                for (idx_t j = 0; j < shd->recv_offsets[0]; j++)
                    recv_buf[j] = aux_arr[i]->data[j];

                MPI_Waitall(shd->num_recvs, reqs, MPI_STATUSES_IGNORE);
                // 再进行限制计算
                assert(F_array[i + 1] != nullptr); assert(restrictor[i] != nullptr);
                const idx_t num_coar = restrictor[i]->nrows;
                assert(num_coar == F_array[i + 1]->end_row - F_array[i + 1]->beg_row);
                rstr_kernel(num_coar, shd->recv_offsets[shd->num_recvs],
                    restrictor[i]->row_ptr, restrictor[i]->col_idx,
                    recv_buf, F_array[i + 1]->data);

                delete [] recv_buf;
            } else {// 本进程在下一层无计算：即slave进程
                assert(shd->send_pid != MPI_PROC_NULL); 
                assert(aux_arr[i]->end_row - aux_arr[i]->beg_row == shd->send_len);
                // 需要将本进程的数据发送给master进程
                MPI_Send(aux_arr[i]->data, shd->send_len * sizeof(calc_t), MPI_BYTE,
                    shd->send_pid, 99, U_array[i]->comm);
                // 自己不用做限制计算
            }
        } else {// 没有收缩发生，计算如常
            assert(F_array[i + 1] != nullptr); assert(restrictor[i] != nullptr);
            const idx_t num_coar = restrictor[i]->nrows;
            assert(num_coar == F_array[i + 1]->end_row - F_array[i + 1]->beg_row);
            rstr_kernel(num_coar, aux_arr[i]->end_row - aux_arr[i]->beg_row,
                restrictor[i]->row_ptr, restrictor[i]->col_idx,
                aux_arr[i]->data, F_array[i + 1]->data);
        }
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::prlgErr(const idx_t i) const
{
    // 插值回到更细一层，并更新细层的解: P_ops[i-1]->apply(U_array[i], aux_arr[i-1]);
    if (U_array[i - 1]) {// 本进程在细网格层有计算
        // 执行插值计算的kernel
        auto prlg_kernel = [](const idx_t num_fine, const idx_t * f2c_map,
            const calc_t * src, calc_t * dst) {
            #pragma omp parallel for schedule(static)
            for (idx_t fi = 0; fi < num_fine; fi++) {
                const idx_t src_id = f2c_map[fi];
                dst[fi] = src[src_id];
            }
        };

        const SHADOW * shd = shd_dict[i - 1];
        if (shd != nullptr) {// 有收缩发生。本进程的源U_array[i]很可能是空指针
            if (U_array[i] != nullptr) {// master进程
                // 先在本地做完c2f的插值（相当于帮助slave进程计算了）
                assert(shd->send_pid == MPI_PROC_NULL);
                const idx_t num_fine = shd->recv_offsets[shd->num_recvs];
                calc_t * send_buf = new calc_t [num_fine];
                prlg_kernel(num_fine, plain_prlg[i - 1], U_array[i]->data, send_buf);
                MPI_Request reqs[shd->num_recvs];
                // 然后再将slave进程需要的值发送回去
                for (idx_t s = 0; s < shd->num_recvs; s++) {
                    MPI_Isend(send_buf + shd->recv_offsets[s], 
                        (shd->recv_offsets[s + 1] - shd->recv_offsets[s]) * sizeof(calc_t), MPI_BYTE,
                        shd->recv_pids[s], 88, aux_arr[i - 1]->comm, & reqs[s]);
                }
                // 自己的部分直接拷贝
                #pragma omp parallel for schedule(static)
                for (idx_t j = 0; j < shd->recv_offsets[0]; j++)
                    aux_arr[i - 1]->data[j] = send_buf[j];
                MPI_Waitall(shd->num_recvs, reqs, MPI_STATUSES_IGNORE);
                delete [] send_buf;
            } else {// slave进程只管接收就行
                assert(shd->send_pid != MPI_PROC_NULL); 
                assert(aux_arr[i - 1]->end_row - aux_arr[i - 1]->beg_row == shd->send_len);
                MPI_Recv(aux_arr[i - 1]->data, shd->send_len * sizeof(calc_t), MPI_BYTE,
                    shd->send_pid, 88, aux_arr[i - 1]->comm, MPI_STATUS_IGNORE);
            }
        } else { assert(U_array[i] != nullptr); assert(plain_prlg[i - 1] != nullptr);
            const idx_t num_fine = aux_arr[i - 1]->end_row - aux_arr[i - 1]->beg_row;
            prlg_kernel(num_fine, plain_prlg[i - 1], U_array[i]->data, aux_arr[i - 1]->data);
        }

        // 所有进程在本地执行累加
        vec_add(*U_array[i-1], (calc_t) 1.0, *aux_arr[i-1], *U_array[i-1]);
    }
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
MyParCSRMatrix<idx_t, setup_t, setup_t>* AggMG<idx_t, data_t, calc_t, setup_t>::reducePx(const idx_t stride,
    SHADOW * & shd, const MyParCSRMatrix<idx_t, setup_t, setup_t> * old_A)
{
    assert(old_A);
    const MPI_Comm old_comm = old_A->comm;// 收缩前的矩阵的通信子
    int old_pid, old_nprocs;// 本进程在收缩前的通信域内的进程号和总进程数
    MPI_Comm_rank(old_comm, & old_pid);
    MPI_Comm_size(old_comm, & old_nprocs);
    if (old_pid == 0) printf("X Process Shrink Stride: %d\n", stride);

    assert(old_nprocs % stride == 0);// 目前的实现必须要求原来的进程数整除步长

    const int master_old_pid = old_pid - (old_pid % stride);// 主进程的进程号
    int group_pids[stride];// 注意这是old_comm中的进程号
    int group_ranges[stride][2];// 这一组进程所负责的行号范围
    for (idx_t gm = 0; gm < stride; gm ++) {// 确定这一组内（含master）的进程号
        group_pids[gm] = master_old_pid + gm;
        group_ranges[gm][0] = old_A->rows_partition[ master_old_pid + gm     ];
        group_ranges[gm][1] = old_A->rows_partition[ master_old_pid + gm + 1 ];
    }
    assert(shd == nullptr);

    shd = new SHADOW;
    MPI_Comm & new_comm = shd->masterComm;// 收缩之后的所有master进程组成的通信域
    MPI_Comm & sub_comm = shd->subComm;// 局部的通信域
    // 从原来的收缩前的通信域中拆分出新的通信子
    MPI_Comm_split(old_comm, (old_pid == master_old_pid) ? 1 : MPI_UNDEFINED, old_pid, &new_comm);
    MPI_Comm_split(old_comm, master_old_pid, old_pid, & sub_comm);
    // 收缩之后的矩阵
    MyParCSRMatrix<idx_t, setup_t, setup_t>* shadow_A = nullptr;
    
    constexpr MPI_Datatype mpi_idx_type = (sizeof(idx_t) == 4) ? MPI_INT : MPI_LONG_LONG;

    const idx_t new_beg_row = group_ranges[0][0],// 收缩后本组内的master进程所负责的起始行号（闭区间）
                new_end_row = group_ranges[stride - 1][1];// 收缩后本组内的master进程的末尾行号（开区间）
    const idx_t new_loc_nrows = new_end_row - new_beg_row;// 收缩后本组内的master进程将要负责的行数
    // 在新的收缩后的矩阵的通信域内的进程号和进程数
    int new_pid = -1, new_nprocs = -1;
    if (new_comm != MPI_COMM_NULL) {// 本进程在收缩过程中为master
        MPI_Comm_rank(new_comm, &new_pid);
        MPI_Comm_size(new_comm, &new_nprocs);

        shd->num_recvs = stride - 1;
        shd->recv_pids = new int [stride - 1];
        shd->recv_offsets = new int [shd->num_recvs + 1];
    }

    // 收缩后的矩阵的对角部分和非对角部分
    MyCSRMatrix<idx_t, setup_t, setup_t> * new_diag = nullptr, * new_offd = nullptr;
    {
        const MyCSRMatrix<idx_t, setup_t, setup_t> * old_offd = old_A->offd;// 原来的矩阵的对角部分
        const MyCSRMatrix<idx_t, setup_t, setup_t> * old_diag = old_A->diag;// 原来的矩阵的非对角部分
        
        // 因为stride个原来的进程收缩之后，起始行号和结束行号的范围扩大了
        // 即对角部分增大，而非对角部分缩小，所以需要将
        // 原来的矩阵的对角部分和非对角部分“重组”，形成新的两部分
        // 为了负载均衡考虑，这个重组由各进程完成后，再发送给master进程

        idx_t * merged_diag_rpt = new idx_t [old_diag->nrows + 1]; merged_diag_rpt[0] = 0;// “重组”后的对角部分的行偏移
        idx_t * merged_diag_cids= new idx_t [old_diag->nnz + old_offd->nnz];// “重组”后的对角部分的列序哈，按最大可能开辟
        setup_t* merged_diag_vals = new setup_t [old_diag->nnz + old_offd->nnz];// “重组”后的对角部分的值，按最大可能开辟

        idx_t * merged_offd_rpt = new idx_t [old_offd->nrows + 1]; merged_offd_rpt[0] = 0;// “重组”后的非对角部分的行偏移
        idx_t * merged_offd_cids= new idx_t [old_offd->nnz];// “重组”后的非对角部分的列序号，按最大可能开辟
        setup_t* merged_offd_vals = new setup_t [old_offd->nnz];// “重组”后的非对角部分的值，按最大可能开辟

        idx_t tot_nnzs[2] = {0, 0};
        for (idx_t i = 0; i < old_A->end_row - old_A->beg_row; i++) {// 遍历原矩阵中的所有行
            idx_t offd_cnt = 0, diag_cnt = 0;
            const idx_t diag_offset = tot_nnzs[0], offd_offset = tot_nnzs[1];
            // 遍历该行内的非对角部分的所有非零元
            for (idx_t p = old_offd->row_ptr[i]; p < old_offd->row_ptr[i + 1]; p++) {
                const idx_t glb_id = old_A->col_map_offd[ old_offd->col_idx[p] ];
                if (glb_id < new_beg_row || glb_id >= new_end_row ) {// 该非零元在收缩后仍然属于非对角部分
                    merged_offd_cids[offd_offset + offd_cnt] = glb_id;// 注意这里是全局的序号
                    merged_offd_vals[offd_offset + offd_cnt] = old_offd->vals[p];
                    offd_cnt ++;
                } else {// 该非零元在收缩后属于对角部分
                    merged_diag_cids[diag_offset + diag_cnt] = glb_id - new_beg_row;// 注意这里是局部的序号
                    merged_diag_vals[diag_offset + diag_cnt] = old_offd->vals[p];
                    diag_cnt ++;
                }
            }// offd loop
            // 遍历该行内的对角部分的所有非零元，它们在收缩后的矩阵内一定还属于对角部分
            for (idx_t p = old_diag->row_ptr[i]; p < old_diag->row_ptr[i + 1]; p++) {
                merged_diag_cids[diag_offset + diag_cnt] = old_diag->col_idx[p] + old_A->beg_row - new_beg_row;
                merged_diag_vals[diag_offset + diag_cnt] = old_diag->vals[p];
                diag_cnt ++;
            }// diag loop
            // 保证重组后的对角部分的列序号是升序的
            bubble_sort<idx_t, setup_t>(diag_cnt, merged_diag_cids + diag_offset, merged_diag_vals + diag_offset);
            // 非对角部分不需显式排序，因为col_map_offd本身就是有序的，可以保证减少后的非对角部分仍然有序
            // 更新行偏移
            merged_diag_rpt[i + 1] = diag_cnt;
            merged_offd_rpt[i + 1] = offd_cnt;
            tot_nnzs[0] += diag_cnt;
            tot_nnzs[1] += offd_cnt;
        }
        // 记录本组内每个进程会向收缩后的矩阵的对角部分、非对角部分分别贡献多少各非零元
        idx_t grp_nnzs[2 * stride];
        // 本组内的集合通信获知非零元数目
        MPI_Allgather(tot_nnzs, 2, mpi_idx_type, grp_nnzs, 2, mpi_idx_type, sub_comm);
        for (idx_t k = 0; k < old_pid % stride; k++) {
            merged_diag_rpt[0] += grp_nnzs[k * 2 + 0];
            merged_offd_rpt[0] += grp_nnzs[k * 2 + 1];
        }
        for (idx_t i = 0; i < old_A->end_row - old_A->beg_row; i++) {
            merged_diag_rpt[i + 1] += merged_diag_rpt[i];
            merged_offd_rpt[i + 1] += merged_offd_rpt[i]; 
        }

        if (new_comm != MPI_COMM_NULL) {// master进程
            // 收缩后的矩阵在组装后会在对角部分和非对角部分有多少个非零元
            idx_t assem_diag_nnz = 0, assem_offd_nnz = 0;
            for (idx_t k = 0; k < stride; k++) {
                assem_diag_nnz += grp_nnzs[k * 2 + 0];
                assem_offd_nnz += grp_nnzs[k * 2 + 1];
                // printf(" %d [ %d , %d )\n", k, group_ranges[k][0], group_ranges[k][1]);
            }
            // 对角部分
            new_diag = new MyCSRMatrix<idx_t, setup_t, setup_t>(new_loc_nrows, new_loc_nrows);
            new_diag->row_ptr[0] = 0;
            new_diag->nnz = assem_diag_nnz;
            new_diag->col_idx = new idx_t   [assem_diag_nnz];
            new_diag->vals    = new setup_t [assem_diag_nnz];
            // 先拷贝自己的
            #pragma omp parallel for schedule(static)
            for (idx_t i = 0; i < old_diag->nrows; i++) {
                new_diag->row_ptr[i + 1] = merged_diag_rpt[i + 1];
                for (idx_t p = merged_diag_rpt[i]; p < merged_diag_rpt[i + 1]; p++) {
                    new_diag->col_idx[p] = merged_diag_cids[p];
                    new_diag->vals   [p] = merged_diag_vals[p];
                }
            }
            // 然后接收别的进程的
            shd->num_recvs = stride - 1;
            idx_t accum_offset = old_A->end_row - old_A->beg_row;
            for (idx_t ngb = 1; ngb < stride; ngb++) {
                const idx_t ngb_offset = old_A->rows_partition[old_pid + ngb    ] - old_A->rows_partition[old_pid],
                            ngb_nrows  = old_A->rows_partition[old_pid + ngb + 1] - old_A->rows_partition[old_pid + ngb];
                // printf("Proc old %d recv from %d ngb_offset %d ngb_nrows %d\n", old_pid, old_pid + ngb, ngb_offset, ngb_nrows);
                MPI_Recv(new_diag->row_ptr + ngb_offset + 1, ngb_nrows, mpi_idx_type, 
                    old_pid + ngb, 11, old_comm, MPI_STATUS_IGNORE);
                const idx_t nnz_offset = new_diag->row_ptr[ngb_offset];

                MPI_Recv(new_diag->col_idx + nnz_offset, grp_nnzs[ngb * 2 + 0] * sizeof(idx_t)  , MPI_BYTE,
                    old_pid + ngb, 12, old_comm, MPI_STATUS_IGNORE);
                MPI_Recv(new_diag->vals    + nnz_offset, grp_nnzs[ngb * 2 + 0] * sizeof(setup_t), MPI_BYTE,
                    old_pid + ngb, 13, old_comm, MPI_STATUS_IGNORE);
                
                shd->recv_pids[ngb - 1] = group_pids[ngb];
                shd->recv_offsets[ngb - 1] = accum_offset;
                accum_offset += ngb_nrows;
            }
            assert(accum_offset == new_end_row - new_beg_row);
            shd->recv_offsets[shd->num_recvs] = accum_offset;// 最后一个偏移
            // 非对角部分
            new_offd = new MyCSRMatrix<idx_t, setup_t, setup_t>(new_loc_nrows, -1);
            new_offd->row_ptr[0] = 0;
            new_offd->nnz = assem_offd_nnz;
            new_offd->col_idx = new idx_t   [assem_offd_nnz];
            new_offd->vals    = new setup_t [assem_offd_nnz];
            // 先拷贝自己的
            #pragma omp parallel for schedule(static)
            for (idx_t i = 0; i < old_offd->nrows; i++) {
                new_offd->row_ptr[i + 1] = merged_offd_rpt[i + 1];
                for (idx_t p = merged_offd_rpt[i]; p < merged_offd_rpt[i + 1]; p++) {
                    new_offd->col_idx[p] = merged_offd_cids[p];
                    new_offd->vals   [p] = merged_offd_vals[p];
                }
            }
            // 然后接收别的进程的
            for (idx_t ngb = 1; ngb < stride; ngb++) {
                const idx_t ngb_offset = old_A->rows_partition[old_pid + ngb    ] - old_A->rows_partition[old_pid],
                            ngb_nrows  = old_A->rows_partition[old_pid + ngb + 1] - old_A->rows_partition[old_pid + ngb];
                // printf("old_pid + ngb %d\n", old_pid + ngb);
                MPI_Recv(new_offd->row_ptr + ngb_offset + 1, ngb_nrows, mpi_idx_type, 
                    old_pid + ngb, 21, old_comm, MPI_STATUS_IGNORE);
                const idx_t nnz_offset = new_offd->row_ptr[ngb_offset];

                MPI_Recv(new_offd->col_idx + nnz_offset, grp_nnzs[ngb * 2 + 1] * sizeof(idx_t)  , MPI_BYTE,
                    old_pid + ngb, 22, old_comm, MPI_STATUS_IGNORE);
                MPI_Recv(new_offd->vals    + nnz_offset, grp_nnzs[ngb * 2 + 1] * sizeof(setup_t), MPI_BYTE,
                    old_pid + ngb, 23, old_comm, MPI_STATUS_IGNORE);
            }

            // 建立影子矩阵
            shadow_A = new MyParCSRMatrix<idx_t, setup_t, setup_t>(new_comm);
            shadow_A->glb_nrows = old_A->glb_nrows;
            shadow_A->beg_row = new_beg_row;
            shadow_A->end_row = new_end_row;
            shadow_A->rows_partition = new idx_t [new_nprocs + 1];
            for (int cp = 0; cp < new_nprocs; cp++) {
                shadow_A->rows_partition[cp] = old_A->rows_partition[cp * stride];
            }
            shadow_A->rows_partition[new_nprocs] = old_A->glb_nrows;

            shadow_A->diag = new_diag;
            shadow_A->offd = new_offd;
            if (old_A->points) {// 如果原来的（收缩前的）矩阵有点坐标的信息，那么收缩也需要收集点坐标数据
                shadow_A->points = new Point<data_t> [new_end_row - new_beg_row];
                // 先拷贝自己得的
                #pragma omp parallel for schedule(static)
                for (idx_t i = 0; i < old_diag->nrows; i++) {
                    shadow_A->points[i] = old_A->points[i];
                }
                // 然后接收别的进程的
                for (idx_t ngb = 1; ngb < stride; ngb++) {
                    const idx_t ngb_offset = old_A->rows_partition[old_pid + ngb    ] - old_A->rows_partition[old_pid],
                                ngb_nrows  = old_A->rows_partition[old_pid + ngb + 1] - old_A->rows_partition[old_pid + ngb];
                    MPI_Recv(shadow_A->points + ngb_offset, ngb_nrows * sizeof(Point<data_t>), MPI_BYTE,
                        old_pid + ngb, 31, old_comm, MPI_STATUS_IGNORE);
                }
            }
            
            // 生成全局列序号到局部的映射
            std::unordered_map<idx_t, std::pair<int, idx_t> > gid2pidlid;
            idx_t offd_ncols = 0;// 收缩后的矩阵的非对角部分经压缩后有多少列
            for (idx_t j = 0; j < new_offd->nnz; j++) {
                const idx_t glb_j = new_offd->col_idx[j];// 注意此时非对角部分存储的仍然是全局序号
                if (gid2pidlid.find( glb_j ) == gid2pidlid.end()) {
                    offd_ncols ++;
                    std::pair<int, idx_t> tmp(-1, -1);
                    gid2pidlid.emplace(glb_j, tmp);// 记录
                }
            }
            assert(offd_ncols == (idx_t) gid2pidlid.size());
            new_offd->ncols = offd_ncols;
            shadow_A->buildup_comm_handles(gid2pidlid);// 这里面会修改哈希表

            // 同时修改非对角部分的列索引为局部序号
            for (idx_t j = 0; j < new_offd->nnz; j++) {
                const idx_t glb_j = new_offd->col_idx[j];// 注意此时非对角部分存储的仍然是全局序号
                assert(gid2pidlid.find(glb_j) != gid2pidlid.end());
                new_offd->col_idx[j] = gid2pidlid[glb_j].second;
            }
        } else {// slave进程
            // 记录要向master进程发送的数据长度和进程号
            shd->send_pid = master_old_pid;
            shd->send_len = old_A->end_row - old_A->beg_row;
            // 向master进程发送自己“重组”后的对角部分
            MPI_Send(merged_diag_rpt + 1, old_diag->nrows, mpi_idx_type,        master_old_pid, 11, old_comm);
            MPI_Send(merged_diag_cids, tot_nnzs[0] * sizeof(idx_t  ), MPI_BYTE, master_old_pid, 12, old_comm);
            MPI_Send(merged_diag_vals, tot_nnzs[0] * sizeof(setup_t), MPI_BYTE, master_old_pid, 13, old_comm);
            // 向master进程发送自己“重组”后的非对角部分
            MPI_Send(merged_offd_rpt + 1, old_offd->nrows, mpi_idx_type,        master_old_pid, 21, old_comm);
            MPI_Send(merged_offd_cids, tot_nnzs[1] * sizeof(idx_t  ), MPI_BYTE, master_old_pid, 22, old_comm);
            MPI_Send(merged_offd_vals, tot_nnzs[1] * sizeof(setup_t), MPI_BYTE, master_old_pid, 23, old_comm);
            if (old_A->points) {// 如果原来的（收缩前的）矩阵有点坐标的信息，那么收缩也需要收集点坐标数据
                MPI_Send(old_A->points, shd->send_len * sizeof(Point<data_t>), MPI_BYTE, master_old_pid, 31, old_comm);
            }
        }

        delete [] merged_diag_rpt; delete [] merged_diag_cids; delete [] merged_diag_vals;
        delete [] merged_offd_rpt; delete [] merged_offd_cids; delete [] merged_offd_vals;
    }
    return shadow_A;
}

/*
template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::gather (const idx_t i) const
{
    assert(i < num_levs);
    const MyParCSRVector<idx_t, calc_t> * src = F_array[i];
    MyParCSRVector<idx_t, calc_t> * dst = shadow_F[i];
    if (src == nullptr) return ;// 本进程本身就在该层没有计算，也就谈不上收缩
    const SHADOW & shd = *(shd_dict[i]);

    if (shd.send_pid == MPI_PROC_NULL) {// 本进程为master
        assert(dst);
        MPI_Request recv_reqs[shd.num_recvs];
        for (idx_t r = 0; r < shd.num_recvs; r++) {
            // printf("offset %d %d from %d\n", shd.recv_offsets[r], shd.recv_offsets[r + 1], shd.recv_pids[r]);
            MPI_Irecv(dst->data + shd.recv_offsets[r], (shd.recv_offsets[r + 1] - shd.recv_offsets[r]) * sizeof(calc_t), MPI_BYTE, 
                shd.recv_pids[r], 99, src->comm, & recv_reqs[r]);
        }
        // 自己对应的部分可以直接拷贝
        assert(dst->beg_row == src->beg_row);
        #pragma omp parallel for schedule(static)
        for (idx_t i = 0; i < src->end_row - src->beg_row; i++)
            dst->data[i] = src->data[i];
        MPI_Waitall(shd.num_recvs, recv_reqs, MPI_STATUSES_IGNORE);
    } else {// 本进程为slave
        MPI_Send(src->data, (src->end_row - src->beg_row) * sizeof(calc_t), MPI_BYTE, shd.send_pid, 99, src->comm);
    }
    // 指针变换
    std::swap(U_array[i], shadow_U[i]);
    std::swap(F_array[i], shadow_F[i]);
    std::swap(aux_arr[i], shadow_aux[i]);
}

template<typename idx_t, typename data_t, typename calc_t, typename setup_t>
void AggMG<idx_t, data_t, calc_t, setup_t>::scatter(const idx_t i) const
{
    assert(i < num_levs);
    // 注意此处，因为在scatter之前必定经过了gather，所以要从swap之后的U_array[i]也就是swap之前的shadow_U[i]的数据去发送
    const MyParCSRVector<idx_t, calc_t> * src = U_array[i];
    MyParCSRVector<idx_t, calc_t> * dst = shadow_U[i];
    if (dst == nullptr) return ;// 本进程本身就在该层没有计算，也就谈不上收缩
    const SHADOW & shd = *(shd_dict[i]);

    if (shd.send_pid == MPI_PROC_NULL) {// 本进程为master
        assert(src);
        MPI_Request send_reqs[shd.num_recvs];
        for (idx_t s = 0; s < shd.num_recvs; s++)
            MPI_Isend(src->data + shd.recv_offsets[s], (shd.recv_offsets[s + 1] - shd.recv_offsets[s]) * sizeof(calc_t), MPI_BYTE,
                shd.recv_pids[s], 88, dst->comm, & send_reqs[s]);
        // 自己对应的部分可以直接拷贝
        assert(dst->beg_row == src->beg_row);
        #pragma omp parallel for schedule(static)
        for (idx_t i = 0; i < dst->end_row - dst->beg_row; i++)
            dst->data[i] = src->data[i];
        MPI_Waitall(shd.num_recvs, send_reqs, MPI_STATUSES_IGNORE);
    } else {// 本进程为slave
        MPI_Recv(dst->data, (dst->end_row - dst->beg_row) * sizeof(calc_t), MPI_BYTE, shd.send_pid, 88, dst->comm, MPI_STATUS_IGNORE);
    }
    // 指针变换，恢复正常
    std::swap(U_array[i], shadow_U[i]);
    std::swap(F_array[i], shadow_F[i]);
    std::swap(aux_arr[i], shadow_aux[i]);
}
*/
template class AggMG<int, double, double, double>;
template class AggMG<int, float , float , float >;