#include "UnstructMG.hpp"
#include <chrono>
#include <random>
#include <algorithm>
#include <vector>
#include <string>

#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <set>
#include <map>
#include <queue>
#include <string>

#include <vector>
#include "json/json.h"
#include <fstream>
#include <mpi.h>

using big_idx_t = long long;
using idx_t = int;
using data_t = double;

#ifndef TEST_CNT
#define TEST_CNT 1
#endif

typedef struct {
  double coord;
  double setup, solve, prec;
  double total_wo_coord;
  double total_w_coord;
  int iter;
} TEST_RECORD;

big_idx_t read_binary(void * buf, const char * filename, size_t size_of_elems, long long start, big_idx_t nums, bool first_num=false) {
    assert(size_of_elems == 4 || size_of_elems == 8);
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);
    if (my_pid == 0) printf("reading binary from %s\n", filename);

    FILE * fp = fopen(filename, "rb");
    if (fp == NULL) {
        printf("cannot open %s \n", filename);
        return -1;
    }
    if (first_num) {
        if (fseek(fp, size_of_elems * start + sizeof(int), SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    } else {
        if (fseek(fp, size_of_elems * start, SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    }

    big_idx_t ret;

    ret = fread(buf, size_of_elems, nums, fp);
    return ret;
}

void myWrapper_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
    void *recvbuf, const int *displs, MPI_Datatype recvtype, MPI_Comm comm)
{
    int num_procs; MPI_Comm_size(comm, & num_procs);
    int * recvcounts = new int [num_procs];
    for (int p = 0; p < num_procs; p ++) recvcounts[p] = displs[p + 1] - displs[p];
    MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
    delete [] recvcounts;
}
void load_balance(int tot_work, int num_threads, int tid, int & beg, int & end) {
    int my_work = tot_work / num_threads;
    int remain = tot_work - my_work * num_threads;
    beg = my_work * tid;
    if (tid < remain) {
        my_work ++;
        beg += tid;
    } else {
        beg += remain;
    }
    end = beg + my_work;
}
void my_qsort_4i(idx_t *v, idx_t *w, idx_t * y, idx_t *z, idx_t  left, idx_t  right ) {
    auto swap_4i = [&v, &w, &y, &z](idx_t i, idx_t j) {
        idx_t tmp;
        tmp = v[i]; v[i] = v[j]; v[j] = tmp;
        tmp = w[i]; w[i] = w[j]; w[j] = tmp;
        tmp = y[i]; y[i] = y[j]; y[j] = tmp;
        tmp = z[i]; z[i] = z[j]; z[j] = tmp;
    };
    idx_t i, last;
    if (left >= right) { return; }

    swap_4i(left, (left + right) / 2);
    last = left;
    for (i = left + 1; i <= right; i++) {
        if (v[i] < v[left]) {
            swap_4i(++last, i);
        }
    }
    swap_4i(left, last);
    my_qsort_4i(v, w, y, z, left, last - 1);
    my_qsort_4i(v, w, y, z, last + 1, right);
}

const idx_t glb_nblks = 262143995;
const idx_t well_IDs[] = {8657011, 8657012};
constexpr int ndof = 4;
const idx_t glb_dims[] = {640, 640, 640};

#define PRINT_DEBUG
struct WELL_DESCRIP
{
    data_t rhs [ndof];
    data_t sol [ndof];
    data_t infl2self [ndof*ndof];
    std::vector<idx_t> infl2other_ids;
    std::vector<data_t> infl2other_vals;
};
#include <unordered_set>
#include <unordered_map>
void eliminate_wells(std::map<idx_t, WELL_DESCRIP> & well_records, idx_t & bibeg, idx_t & loc_nblks,
    big_idx_t * & bsr_rpt, idx_t * & bsr_cid, data_t * & bsr_val, data_t * & bsr_rhs)
{
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, & my_pid);
    int num_procs; MPI_Comm_size(MPI_COMM_WORLD, & num_procs);
    idx_t assumed_partits [num_procs + 1]; assumed_partits[0] = 0; assert(sizeof(idx_t) == 4);
    idx_t tmp = bibeg + loc_nblks;
    MPI_Allgather(& tmp, 1, MPI_INT, assumed_partits + 1, 1, MPI_INT, MPI_COMM_WORLD);
    assert(assumed_partits[num_procs] == glb_nblks);
    const idx_t num_wells = sizeof(well_IDs) / sizeof(idx_t);
    idx_t my_nrecv = 0;
    for (idx_t iw = 0; iw < num_wells; iw ++) {
        const idx_t wid = well_IDs[iw];
        if (bibeg <= wid && wid < bibeg + loc_nblks) {
#ifdef PRINT_DEBUG
            printf("Proc %d own Well %d\n", my_pid, wid);
#endif
            const idx_t loc_wid = wid - bibeg;
            WELL_DESCRIP record;
            for (int f = 0; f < ndof; f++) record.rhs[f] = bsr_rhs[loc_wid * ndof + f];// 拷贝右端项
            for (auto p = bsr_rpt[loc_wid]; p < bsr_rpt[loc_wid + 1]; p ++) {
                if (wid == bsr_cid[p]) {
                    for (int f = 0; f < ndof*ndof; f++) record.infl2self[f] = bsr_val[p * ndof*ndof + f];
                } else {
                    record.infl2other_ids.push_back(bsr_cid[p]);// 利用矩阵结构对称的特性，记录与该井有关联的点
                    for (int f = 0; f < ndof*ndof; f++) assert(bsr_val[p * ndof*ndof + f] == 0.0);
                }
            }
            // solve for well
            assert(ndof == 4);
            assert(record.infl2self[0] == 1.0); assert(record.infl2self[4] == 0.0); assert(record.infl2self[8] == 0.0); assert(record.infl2self[12]== 0.0);
            assert(record.infl2self[1] == 0.0); assert(record.infl2self[5] == 1.0); assert(record.infl2self[9] == 0.0); assert(record.infl2self[13]== 0.0);
            assert(record.infl2self[2] == 0.0); assert(record.infl2self[6] == 0.0); assert(record.infl2self[10]== 1.0); assert(record.infl2self[14]== 0.0);
            assert(record.infl2self[3] == 0.0); assert(record.infl2self[7] == 0.0); assert(record.infl2self[11]== 0.0); assert(record.infl2self[15]== 1.0);
            for (int f = 0; f < ndof; f++) record.sol[f] = record.rhs[f];
            record.infl2other_vals.resize(record.infl2other_ids.size() * ndof*ndof);
            my_nrecv += record.infl2other_ids.size();
            well_records.emplace(wid, record);
        }
    }
    
    std::set<idx_t> toFind;
    std::vector<char*> send_bufs;
    std::vector<MPI_Request> send_reqs, recv_reqs;
    idx_t my_nsend = 0;
    for (idx_t i = 0; i < num_wells; i ++) toFind.emplace(well_IDs[i]);
    for (idx_t bi = bibeg; bi < bibeg + loc_nblks; bi ++) {
        if (toFind.find(bi) != toFind.end()) continue;// 本身是井

        for (auto p = bsr_rpt[bi - bibeg]; p < bsr_rpt[bi - bibeg + 1]; p ++) {
            const idx_t bj = bsr_cid[p];
            if (toFind.find(bj) != toFind.end()) {// 该位置受井的影响
                // 确定给哪个进程发
                int dst_pid = -1;
                for (int pid = 0; pid < num_procs; pid ++) {
                    if (assumed_partits[pid] <= bj && bj < assumed_partits[pid + 1]) { dst_pid = pid; break; }
                } assert(dst_pid != -1);
#ifdef PRINT_DEBUG
                printf("Proc %d own %d affected by %d owned by Proc %d -- asking ... :\n", my_pid, bi, bj, dst_pid);
                    // bsr_rhs[(bi - bibeg)*ndof], bsr_rhs[(bi - bibeg)*ndof+1], bsr_rhs[(bi - bibeg)*ndof+2], bsr_rhs[(bi - bibeg)*ndof + 3]);
#endif
                char * buf = (char *) malloc(sizeof(idx_t)*2 + ndof*ndof*sizeof(data_t));
                *((idx_t*) buf               ) = bj;
                *((idx_t*)(buf+sizeof(idx_t))) = bi;
                memcpy(buf + 2*sizeof(idx_t), bsr_val + p * ndof*ndof, sizeof(data_t)*ndof*ndof);
                MPI_Request req;
                MPI_Isend(buf, sizeof(idx_t)*2 + ndof*ndof*sizeof(data_t), MPI_BYTE, dst_pid, 6677, MPI_COMM_WORLD, & req);
                send_bufs.push_back(buf);
                send_reqs.push_back(req);
                my_nsend ++;
            }
        }
    }

    idx_t already_recv = 0;
    while (already_recv < my_nrecv) {
        char * rbuf = (char *) malloc (sizeof(idx_t)*2 + ndof*ndof*sizeof(data_t));
        MPI_Status stat;
        MPI_Recv(rbuf, sizeof(idx_t)*2 + ndof*ndof*sizeof(data_t), MPI_BYTE,
            MPI_ANY_SOURCE, 6677, MPI_COMM_WORLD, & stat);
        const idx_t wid = * ((idx_t*) rbuf);
        assert(well_records.find(wid) != well_records.end());
        const idx_t ngb = * ((idx_t*)(rbuf + sizeof(idx_t)));
#ifdef PRINT_DEBUG
        printf("Proc %d own Well %d affect %d owned by Proc %d -- answering ...\n", my_pid, wid, ngb, stat.MPI_SOURCE);
#endif
        size_t k = 0;
        for ( ; k < well_records[wid].infl2other_ids.size(); k ++) {
            if (well_records[wid].infl2other_ids[k] == ngb) {
                memcpy(well_records[wid].infl2other_vals.data() + k * ndof*ndof, rbuf + 2*sizeof(idx_t), ndof*ndof*sizeof(data_t));
                // 计算并发回
                char * sbuf = (char *) malloc (sizeof(idx_t) + ndof * sizeof(data_t));
                *((idx_t *) sbuf) = ngb;
                data_t * res = (data_t *) (sbuf + sizeof(idx_t));
                for (int r = 0; r < ndof; r++) {
                    res[r] = 0.0;
                    for (int c = 0; c < ndof; c++)
                        res[r] += well_records[wid].infl2other_vals[k * ndof*ndof + c * ndof + r]
                                * well_records[wid].sol[c];
                }
                MPI_Request req;
                MPI_Isend(sbuf, sizeof(idx_t) + ndof * sizeof(data_t), MPI_BYTE, stat.MPI_SOURCE, 7788, MPI_COMM_WORLD, & req);
                send_reqs.push_back(req);
                send_bufs.push_back(sbuf);
                break;
            }
        } assert(k < well_records[wid].infl2other_ids.size());
        free(rbuf);
        already_recv ++;
    } assert(already_recv == my_nrecv);

    already_recv = 0;
    while (already_recv < my_nsend) {
        char * rbuf = (char *) malloc (sizeof(idx_t) + ndof*sizeof(data_t));
        MPI_Status stat;
        MPI_Recv(rbuf, sizeof(idx_t) + ndof*sizeof(data_t), MPI_BYTE, MPI_ANY_SOURCE, 7788, MPI_COMM_WORLD, & stat);
        idx_t bi = *((idx_t *)rbuf);
        assert(bibeg <= bi && bi < bibeg + loc_nblks);
        data_t * infl = (data_t *)(rbuf + sizeof(idx_t));
#ifdef PRINT_DEBUG
        printf("Proc %d own %d received from Proc %d : %.4e %.4e %.4e %.4e\n", my_pid, bi, stat.MPI_SOURCE,
            infl[0], infl[1], infl[2], infl[3]);
#endif
        bi -= bibeg;// 局部序号
#ifdef PRINT_DEBUG
        printf("    old : ");
        for (int f = 0; f < ndof; f++) printf("%.4e ", bsr_rhs[bi * ndof + f]);
#endif
        for (int f = 0; f < ndof; f++) bsr_rhs[bi * ndof + f] -= infl[f];
#ifdef PRINT_DEBUG
        printf("\n    new : ");
        for (int f = 0; f < ndof; f++) printf("%.4e ", bsr_rhs[bi * ndof + f]);
        printf("\n");
#endif
        free(rbuf);
        already_recv ++;
    } assert(already_recv == my_nsend);

    MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);
    for (size_t i = 0; i < send_bufs.size(); i ++) free(send_bufs[i]);

    // 修改
    idx_t my_new_nblks = loc_nblks - well_records.size();
    idx_t my_new_bibeg = bibeg;
    big_idx_t new_nnz = bsr_rpt[loc_nblks];
    for (idx_t i = 0; i < num_wells; i ++) {
        const idx_t wid = well_IDs[i];
        if (wid <= bibeg) my_new_bibeg --;// 向前填补井的空缺
        if (well_records.find(wid) != well_records.end()) {
            new_nnz -= bsr_rpt[wid - bibeg + 1] - bsr_rpt[wid - bibeg];
        }
    }
    new_nnz -= my_nsend;

    idx_t p0_r = 0, p1_r = 0;
    big_idx_t p0_cv = 0;
    assert(bsr_rpt[0] == 0);
    for ( ; p1_r < loc_nblks; p1_r ++) {
        assert(p0_r <= p1_r);
        const idx_t glb_i = p1_r + bibeg;
        if (toFind.find(glb_i) == toFind.end()) {// 该行不是井
            for (auto p = bsr_rpt[p1_r]; p < bsr_rpt[p1_r + 1]; p ++) {// 检查邻居是否为井
                assert(p0_cv <= p);
                idx_t old_glb_j = bsr_cid[p];
                if (toFind.find(old_glb_j) == toFind.end()) {// 邻居也不是井
                    idx_t cnt = 0;
                    for (auto it = toFind.begin(); it != toFind.end(); it ++) {
                        if (*it < old_glb_j) cnt ++;// 向前填补井的空缺
                        else break;
                    }
                    bsr_cid[p0_cv] = old_glb_j - cnt;// 需要减掉偏移
                    memcpy(bsr_val + p0_cv *ndof*ndof, bsr_val + p *ndof*ndof, sizeof(data_t)*ndof*ndof);
                    p0_cv ++;
                } else {// 邻居是井
#ifdef PRINT_DEBUG
                    printf("Proc %d skip on row %d col %d\n", my_pid, glb_i, old_glb_j);
#endif
                    // p0_cv 不动
                }
            }
            bsr_rpt[p0_r] = p0_cv;
            memcpy(bsr_rhs + p0_r * ndof, bsr_rhs + p1_r * ndof, sizeof(data_t)*ndof);
            p0_r ++;
        } else {
#ifdef PRINT_DEBUG
            printf("Proc %d skip row %d where %lld nz\n", my_pid, glb_i, bsr_rpt[p1_r + 1] - bsr_rpt[p1_r]);
#endif
            // p0_r 和 p0_cv 均不动
        }
    }
    for (idx_t i = p0_r; i > 0; i --) {
        bsr_rpt[i] = bsr_rpt[i - 1];
    }
    bsr_rpt[0] = 0;
    assert(p0_r == my_new_nblks);
    assert(new_nnz = bsr_rpt[p0_r]);
    if (bsr_rpt[loc_nblks] != new_nnz) {
#ifdef PRINT_DEBUG
        printf("Proc %d : %d nrows elimin %lld nnz (%lld => %lld)\n", my_pid, loc_nblks - p0_r,
            bsr_rpt[loc_nblks] - new_nnz, bsr_rpt[loc_nblks], new_nnz);
#endif
    }

    bibeg = my_new_bibeg;
    loc_nblks = my_new_nblks;
}

#define FILE_NUM 3000

idx_t * BFS(const par_CSRMatrix<idx_t, data_t, data_t, ndof> & A, const idx_t source)
{
    MPI_Comm comm = A.comm;
    int my_pid; MPI_Comm_rank(comm, & my_pid);
    int num_procs; MPI_Comm_size(comm, & num_procs);
    void *max_tag_buf; int succ = 0;
    MPI_Comm_get_attr(comm, MPI_TAG_UB, &max_tag_buf, &succ); assert(succ);
    const int max_tag = *((int*) max_tag_buf);// MPI的最大可允许tag值

    assert(A.beg_row == A.beg_col && A.end_row == A.end_col);
    const idx_t rbeg = A.beg_row, rend = A.end_row;
    const idx_t loc_num = rend - rbeg;
    auto check_in_range = [rbeg, rend](const idx_t id) {
        return rbeg <= id && id < rend;
    };
    auto transLoc2Glb = [rbeg, loc_num](const idx_t lid) {
        assert(0 <= lid && lid < loc_num);
        return lid + rbeg;
    };
    auto transGlb2Loc = [rbeg, rend](const idx_t gid) {
        assert(rbeg <= gid && gid < rend);
        return gid - rbeg;
    };

    // 检查：该无向图应该要对称
    const par_CSRCommPkg<idx_t> * commpkg = A.commpkg;
    assert(commpkg->num_sends == commpkg->num_recvs);
    const idx_t num_ngbP = commpkg->num_sends;// number of neighboring process
    std::unordered_map<int, idx_t> map_pid2bufid;// map neighboring process rank to local buf ID
    for (idx_t p = 0; p < num_ngbP; p ++) {
        assert(commpkg->recv_pids[p] == commpkg->send_pids[p]);
        const idx_t ID = map_pid2bufid.size();
        map_pid2bufid.emplace(commpkg->recv_pids[p], ID);
    }
    std::vector<std::vector<idx_t> > send_bufs(num_ngbP, std::vector<idx_t>() );
    std::vector<idx_t> map_offd2pid(A.offd.ncols);// 记录本进程非对角部分的每一列是由哪个进程负责
    assert(commpkg->recv_vec_starts[num_ngbP] == A.offd.ncols);
    for (idx_t r = 0; r < num_ngbP; r ++) {
        const int src_pid = commpkg->recv_pids[r];
        for (idx_t j = commpkg->recv_vec_starts[r]; j < commpkg->recv_vec_starts[r + 1]; j ++)
            map_offd2pid[j] = src_pid;
        send_bufs[r].clear();
    }

    std::vector<idx_t> FS;// current frontier, 注意这里面的序号是全局序号

    idx_t * dist2s = new idx_t [loc_num];// 记录本进程负责的点距离source的距离
    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < loc_num; i ++) dist2s[i] = -1;// 初始化
    if (check_in_range(source)) {
        dist2s[source - rbeg] = 0;
        FS.push_back(source);
    }
    
    idx_t level = 1;
    std::vector<idx_t> ngb_flags(num_ngbP, 0);// 记录邻居进程是否已处理完它的全部点
    idx_t num_ngb_done = 0;// 记录有多少个邻居进程已经完成
    idx_t my_cnt = 0;// 记录我已经处理完的点数

    std::vector<std::vector<idx_t> > recv_bufs(num_ngbP, std::vector<idx_t>() );
    std::vector<idx_t> NS;// next frontier (i.e., neighbors of vertices in FS)，注意这里面的序号是全局序号
    while (true) {
        if (my_cnt == loc_num) break;
        const int lev_tag = level * 2; assert(lev_tag + 1 < max_tag);

        for (size_t u = 0; u < FS.size(); u ++) {// for each u in FS
            const idx_t loc_i = transGlb2Loc(FS[u]);// local ID of u
            for (auto p = A.diag.row_ptr[loc_i]; p < A.diag.row_ptr[loc_i + 1]; p ++) {
                const idx_t loc_j = A.diag.col_idx[p];
                if (dist2s[loc_j] == -1) {
                    dist2s[loc_j] = level;// 记录距离
                    NS.push_back(transLoc2Glb(loc_j));// 添加到下一轮要遍历的点中
                }
            }
            for (auto p = A.offd.row_ptr[loc_i]; p < A.offd.row_ptr[loc_i + 1]; p ++) {
                const idx_t loc_j = A.offd.col_idx[p];
                const idx_t glb_j = A.col_map_offd[loc_j];
                const int ngb_pid =  map_offd2pid [loc_j];
                send_bufs[ map_pid2bufid[ngb_pid] ].push_back(glb_j);
            }
        }
        // 更新我处理完的点数
        my_cnt += FS.size(); assert(my_cnt <= loc_num);
        const idx_t done_flag = (my_cnt == loc_num) ? 1 : 0;// 本进程是否已处理完它的全部点
        // 通信告知邻居：我手上有多少个需要通知你的（属于你的）点
        std::vector<MPI_Request> send_reqs;
        for (idx_t is = 0; is < num_ngbP; is ++) {
            const int ngb_pid = commpkg->send_pids[is]; assert(is == map_pid2bufid[ngb_pid]);
            if (ngb_flags[is] == 0) {// 只有当邻居仍活跃才发送
                const idx_t ngb_cnt = send_bufs[is].size();
                send_bufs[is].push_back(ngb_cnt);// 在最末尾追加
                send_bufs[is].push_back(done_flag);// 在最末尾追加
                MPI_Request req_num;
                MPI_Isend(send_bufs[is].data() + ngb_cnt, 2, MPI_INT, ngb_pid, lev_tag, comm, & req_num);
                send_reqs.push_back(req_num);
                if (ngb_cnt > 0) {
                    MPI_Request req_dat;
                    MPI_Isend(send_bufs[is].data(), ngb_cnt, MPI_INT, ngb_pid, lev_tag + 1, comm, & req_dat);
                    send_reqs.push_back(req_dat);
                }
            }
        }
        idx_t  already_recv = num_ngb_done;
        while (already_recv < num_ngbP) {
            idx_t header[2];
            MPI_Status stat;
            MPI_Recv(header, 2, MPI_INT, MPI_ANY_SOURCE, lev_tag, comm, & stat);
            const int ngb_pid = stat.MPI_SOURCE;
            const idx_t bufID = map_pid2bufid[ngb_pid];
            if (header[1] == 1) {
                // printf("Proc %d lev %d know Proc %d done\n", my_pid, level, ngb_pid);
                assert(ngb_flags[bufID] == 0);// 通知完成的消息应且只应收到一次
                ngb_flags[bufID] = header[1];
                num_ngb_done ++;// 该邻居已完成它的计算，要退出了
            }
            const idx_t num = header[0];
            if (num > 0) {
                std::vector<idx_t> & buf = recv_bufs[bufID];
                buf.resize(num);
                MPI_Recv(buf.data(), num, MPI_INT, ngb_pid, lev_tag + 1, comm, MPI_STATUS_IGNORE);
                for (idx_t k = 0; k < num; k ++) {
                    const idx_t loc_i = transGlb2Loc(buf[k]);
                    if (dist2s[loc_i] == -1) {// 逐个检查该点是否新被发现
                        dist2s[loc_i] = level;// 记录距离
                        NS.push_back(buf[k]);// 添加到下一轮要遍历的点中
                    }
                }
            }
            already_recv ++;
        } assert(already_recv == num_ngbP);

        // 交换为下一轮准备
        FS = NS;
        NS.clear();
        // 清空发送缓冲区为下一轮准备
        MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);
        for (idx_t is = 0; is < num_ngbP; is ++) send_bufs[is].clear();

        level ++;// 新发现点的距离增加
    }
    return dist2s;
}

int main(int argc, char * argv[])
{
    setbuf(stdout, NULL);
    int my_pid, num_procs;

    int argc_cnt = 1;
    const idx_t proc_dims[3] = { atoi(argv[argc_cnt++]), atoi(argv[argc_cnt++]), atoi(argv[argc_cnt++]) };// assert(proc_dims[2] == 1);
    const char * pathname = argv[argc_cnt ++];
    const std::string iter_name = std::string(argv[argc_cnt ++]);
    const std::string prec_name = std::string(argv[argc_cnt ++]);
    std::string mg_file;
    if (prec_name == "AMG") mg_file = std::string(argv[argc_cnt ++]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);

    // const idx_t glb_nstruct = glb_dims[0]*glb_dims[1]*glb_dims[2];
    idx_t cart_ids[3], my_ilower[3], my_iupper[3], my_dims[3];
    std::vector<idx_t> cart_parti[3];
    assert(proc_dims[0]*proc_dims[1]*proc_dims[2] == num_procs);
    {// 计算本进程在part内处于什么位置
        cart_ids[0] =  my_pid / (proc_dims[1] * proc_dims[2]);
        cart_ids[1] = (my_pid - cart_ids[0] * proc_dims[1] * proc_dims[2]) / proc_dims[2];
        cart_ids[2] = (my_pid - cart_ids[0] * proc_dims[1] * proc_dims[2] - cart_ids[1] * proc_dims[2]);
    }
    for (idx_t k = 0; k < 3; k++) {
        idx_t kbeg = -1, kend = -1;
        idx_t avg_load = glb_dims[k] / proc_dims[k];
        idx_t remain = glb_dims[k] - proc_dims[k] * avg_load;
        kbeg = avg_load * cart_ids[k];
        if (cart_ids[k] < remain) {
            kbeg += cart_ids[k];
            kend = kbeg + avg_load + 1;
        } else {
            kbeg += remain;
            kend = kbeg + avg_load;
        }
        my_ilower[k] = kbeg;
        my_iupper[k] = kend - 1;
        my_dims  [k] = kend - kbeg;
        cart_parti[k].resize(proc_dims[k] + 1);
        cart_parti[k][0] = 0;
        for (int p = 0; p < proc_dims[k]; p ++) {
            if (p < remain) cart_parti[k][p + 1] = cart_parti[k][p] + avg_load + 1;
            else            cart_parti[k][p + 1] = cart_parti[k][p] + avg_load;
        }
    }

    // const idx_t loc_nstruct = my_dims[0]*my_dims[1]*my_dims[2];
    idx_t loc_nrows, glb_nrows = glb_nblks * ndof;
    idx_t loc_nblks = -1, iblk_lower = -1;
    idx_t ilower, iupper;// 本进程负责的起始行号 和末尾行号（闭区间）
    if (my_pid == 0) {
        printf("# MPI Procs %d # OMP Threads %d\n", num_procs, omp_get_max_threads());
        printf("using blk-partition!\n");
    }

    {
        loc_nblks = glb_nblks / num_procs;
        iblk_lower = my_pid * loc_nblks;
        if (glb_nblks > loc_nblks * num_procs) {
            idx_t remain_nblks = glb_nblks - loc_nblks * num_procs;
            if (my_pid < remain_nblks) {
                loc_nblks ++;
            }
            iblk_lower += MIN(my_pid, remain_nblks);
        }
    }
    loc_nrows = loc_nblks * ndof;
    ilower = iblk_lower * ndof;
    iupper = ilower + loc_nrows - 1;

    size_t ret;
    char filename[200];
    if (my_pid == 0) printf("reading data from %s\n", pathname);
    MPI_Barrier(MPI_COMM_WORLD);
    double timing_IO = MPI_Wtime();

    // 读入二进制的向量数据
    data_t* dist_b = (data_t*) malloc (loc_nrows * sizeof(data_t));
    data_t* dist_x = (data_t*) malloc (loc_nrows * sizeof(data_t));
    sprintf(filename, "%s/b.bin", pathname);
    if ((ret = read_binary(dist_b, filename, sizeof(data_t), ilower, loc_nrows, true)) != loc_nrows) {
        printf("Error! not enough b %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 4);
    }

    #pragma omp parallel for schedule(static)
    for (big_idx_t i = 0; i < loc_nrows; i++)
        dist_x[i] = 0.0;
    
    // 读入二进制的矩阵数据
    big_idx_t * dist_bsr_rpt = (big_idx_t *) malloc(sizeof(big_idx_t) * (loc_nblks + 1));// 分布式存储的行指针（数值为全局号）
    sprintf(filename, "%s/Bi.bin", pathname);
    if ((ret = read_binary(dist_bsr_rpt, filename, sizeof(big_idx_t), iblk_lower, loc_nblks + 1)) != loc_nblks + 1) {
        printf("Error! not enough rows\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    const big_idx_t loc_nnz = dist_bsr_rpt[loc_nblks] - dist_bsr_rpt[0];// 本进程负责的行内一共的非零元数
    
    idx_t * dist_bsr_cid = (idx_t *) malloc(loc_nnz * sizeof(idx_t));// 分布式存储的列序号（数值为全局号）
    data_t* dist_bsr_val = (data_t*) malloc(loc_nnz * sizeof(data_t) * ndof*ndof);
    sprintf(filename, "%s/Bj.bin", pathname);
    if ((ret = read_binary(dist_bsr_cid, filename, sizeof(int), dist_bsr_rpt[0], loc_nnz)) != loc_nnz) {
        printf("Error! not enough dist_col_idx: %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 2);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    sprintf(filename, "%s/Bv.bin", pathname);
    if ((ret = read_binary(dist_bsr_val, filename, sizeof(data_t), dist_bsr_rpt[0] *ndof*ndof, loc_nnz *ndof*ndof)) != loc_nnz *ndof*ndof) {
        printf("Error! not enough dist_vals: %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 3);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    big_idx_t offset = dist_bsr_rpt[0];
    for (idx_t i = 0; i <= loc_nblks; i++)// 修改成符合trans2BSR输入的CSR
        dist_bsr_rpt[i] -= offset;

    for (idx_t loc_bi = 0; loc_bi < loc_nblks; loc_bi ++) {
        const idx_t glb_bi = loc_bi + iblk_lower;
        if (glb_bi == 8657011 || glb_bi == 8657012) continue;// 井跳过
        bool all_zero = true;
        big_idx_t p = dist_bsr_rpt[loc_bi];
        const double * vals = nullptr;
        for ( ; p < dist_bsr_rpt[loc_bi + 1]; p ++) {
            if (dist_bsr_cid[p] == glb_bi) continue;// 主对角元跳过
            vals = dist_bsr_val + p * ndof*ndof;
            if (vals[0] != 0.0 || vals[ndof] != 0.0 || vals[2*ndof] != 0.0 || vals[3*ndof] != 0.0) {
                all_zero = false;
                break;
            }
        }
        if (all_zero == false) {
            printf("NOTZero !! Proc %d glb_bi %d glb_bj %d : %.4e %.4e %.4e %.4e\n", my_pid, glb_bi, dist_bsr_cid[p],
                vals[0], vals[ndof], vals[2*ndof], vals[3*ndof]);
        }
    }

    idx_t elim_well = 1;
    std::map<idx_t, WELL_DESCRIP> well_records;
    if (elim_well) {
#ifdef PRINT_DEBUG
        if (my_pid == 0) printf("First eliminate wells\n");
#endif
        eliminate_wells(well_records, iblk_lower, loc_nblks, dist_bsr_rpt, dist_bsr_cid, dist_bsr_val, dist_b);
    }
    
    timing_IO = MPI_Wtime() - timing_IO;

    TEST_RECORD records[TEST_CNT];
    for (int test = 0; test < TEST_CNT; test ++) {
        par_CSRMatrix<idx_t, data_t, data_t, ndof> my_A4(MPI_COMM_WORLD,
            glb_nblks - elim_well * 2, iblk_lower, iblk_lower + loc_nblks,
            glb_nblks - elim_well * 2, iblk_lower, iblk_lower + loc_nblks);
        par_Vector<idx_t, data_t, ndof> my_x4(MPI_COMM_WORLD, glb_nblks - elim_well * 2, iblk_lower, iblk_lower + loc_nblks),
                                        my_b4(MPI_COMM_WORLD, glb_nblks - elim_well * 2, iblk_lower, iblk_lower + loc_nblks);
        assert((idx_t) my_b4.local_vector->tot_len == loc_nblks * ndof);
        for (long long i = 0; i < my_b4.local_vector->tot_len; i++) {
            my_b4.local_vector->data[i] = dist_b[i];
            my_x4.local_vector->data[i] = 0.0;
        }
        my_A4.set_values_distributed(dist_bsr_rpt, dist_bsr_cid, dist_bsr_val);
        
        MPI_Barrier(MPI_COMM_WORLD);
        double t_coord = MPI_Wtime();
        if (my_pid == 0) printf("TEST %d :: Searching Coordinates\n", test);

#ifdef PRINT_DEBUG
        {// 自己做点积检查数据是否传对
            double my_b_dot = vec_dot<idx_t, data_t, double>(my_b4, my_b4);
            double my_x_dot = vec_dot<idx_t, data_t, double>(my_x4, my_x4);
            if (my_pid == 0) {
                printf("My  calc dot\n");
                printf("(  b,   b) = %.15e\n",  my_b_dot);
                printf("(  x,   x) = %.15e\n",  my_x_dot);
            }
            my_A4.Mult(1.0, my_b4, 0.0, my_x4, my_x4);// my_x <- A*my_b
            my_x_dot = vec_dot<idx_t, data_t, double>(my_x4, my_x4);
            if (my_pid == 0) {
                printf("(A*b, A*b) = %.15e\n",  my_x_dot);
            }
        }
#endif

        std::vector<double> t_remaps;
        // remap
        int16_t * loc_map_to_3D = new int16_t [loc_nblks * 3];// 记录本进程内每行对应的三维坐标
        {// 进行三次搜索，并求解得到点的三维坐标
            std::vector<idx_t> corner_ids;
            corner_ids.push_back(8569209);// 原点
            corner_ids.push_back(1224271); corner_ids.push_back(5252520);
            idx_t * my_dist2s[3];
            for (size_t iter = 0; iter < sizeof(my_dist2s)/sizeof(idx_t*); iter ++) {
                my_dist2s[iter] = BFS(my_A4, corner_ids[iter]);
            }
            const idx_t s0 = corner_ids[0];
            const idx_t s1 = corner_ids[1], d1 = 0;// s1 对应 (dims[0]-1, 0, 0) 位置
            const idx_t s2 = corner_ids[2], d2 = 1;// s2 对应 (0, dims[1]-1, 0) 位置
            const idx_t * rec0 = my_dist2s[0],
                        * rec1 = my_dist2s[1],
                        * rec2 = my_dist2s[2];
            // 与s0距离为C的面可以表示为  X+Y+Z=C
            // 与s1距离为D的面可以表示为 -X+Y+Z=D+F，待定系数F，注意到s0距离s1为rec0[s1].dist或者rec1[s0].dist，均为dims[d1]-1
            const idx_t offset_s1 = glb_dims[d1] - 1;
            if (my_A4.beg_row <= s1 && s1 < my_A4.end_row) assert(rec0[s1 - my_A4.beg_row] == offset_s1);
            if (my_A4.beg_row <= s0 && s0 < my_A4.end_row) assert(rec1[s0 - my_A4.beg_row] == offset_s1);
            // 则代入s0的坐标(0,0,0)可得到0=offset_s1+F，故F=-offset_s1
            // 与s2距离为E的面可以表示为  X-Y+Z=E+G，待定系数G，注意到s0距离s2为rec0[s2].dist或者rec2[s0].dist，均为dims[d2]-1
            const idx_t offset_s2 = glb_dims[d2] - 1;
            if (my_A4.beg_row <= s2 && s2 < my_A4.end_row) assert(rec0[s2 - my_A4.beg_row] == offset_s2);
            if (my_A4.beg_row <= s0 && s0 < my_A4.end_row) assert(rec2[s0 - my_A4.beg_row] == offset_s2);
            // 则代入s0的坐标(0,0,0)可得到0=offset_s2+G，故F=-offset_s2
            // 对于任意点i，需求它的(X,Y,Z)
            // 已知它距离s0为rec0[i].dist，距离s1为rec1[i].dist，距离s2为rec2[i].dist
            // {  X + Y + Z = rec0[i].dist             }
            // { -X + Y + Z = rec1[i].dist - offset_s1 } 
            // {  X - Y + Z = rec2[i].dist - offset_s2 } 联立求解得到
            // X = ( rec0[i].dist - rec1[i].dist + offset_s1 ) / 2
            // Y = ( rec0[i].dist - rec2[i].dist + offset_s2 ) / 2
            // Z = ( rec1[i].dist + rec2[i].dist - offset_s1 - offset_s2 ) / 2
            // std::unordered_map<idx_t, idx_t> loc_map_to_dict[omp_get_max_threads()];// 记录三维坐标的一维字典序所对应的原行号
            if (my_pid == 0) printf("begin solve coordinates ...\n");
            #pragma omp parallel
            {
                // int tid = omp_get_thread_num();
                // std::unordered_map<idx_t, idx_t> & thread_map_to_dict = loc_map_to_dict[tid];
                #pragma omp for schedule(static)
                for (idx_t i = 0; i < loc_nblks; i++) {
                    int16_t * dst_ptr = loc_map_to_3D + i * 3;
                    if (elim_well == 0 && // 没有消去井的情况下才检查
                        well_records.find(i + iblk_lower) != well_records.end()) {// 该全局序号对应的是井
                        dst_ptr[0] = dst_ptr[1] = dst_ptr[2] = -1;// 置为-1
                        continue;
                    }// 不是井才计算坐标
                    const idx_t t0 = rec0[i] - rec1[i] + offset_s1,
                                t1 = rec0[i] - rec2[i] + offset_s2,
                                t2 = rec1[i] + rec2[i] - offset_s1 - offset_s2;
                    assert((t0 & 0b1) == 0);
                    assert((t1 & 0b1) == 0);
                    assert((t2 & 0b1) == 0);
                    dst_ptr[0] = t0 / 2; assert(0 <= dst_ptr[0] && dst_ptr[0] < glb_dims[0]);
                    dst_ptr[1] = t1 / 2; assert(0 <= dst_ptr[1] && dst_ptr[1] < glb_dims[1]);
                    dst_ptr[2] = t2 / 2; assert(0 <= dst_ptr[2] && dst_ptr[2] < glb_dims[2]);
                    // 该三维坐标转换为一维字典序
                    // const idx_t dict_id = (dst_ptr[0] * glb_dims[1] + dst_ptr[1]) * glb_dims[2] + dst_ptr[2];
                    // assert(thread_map_to_dict.find(dict_id) == thread_map_to_dict.end());
                    // thread_map_to_dict.emplace(dict_id, i + iblk_lower);
                }
            }
            for (int i = 0; i < 3; i++) { delete [] my_dist2s[i]; my_dist2s[i] = nullptr; }
        }

        MPI_Barrier(MPI_COMM_WORLD);
        t_coord = MPI_Wtime() - t_coord;
        if (my_pid == 0) printf("TEST %d :: Distributing Data\n", test);

        idx_t my_final_cnt = 0;// 本进程最终负责的行数
        big_idx_t my_final_nnz = 0;
        idx_t * final_parti   = nullptr;
        big_idx_t * final_bsr_rpt = nullptr;
        idx_t     * final_bsr_cid = nullptr;
        data_t    * final_bsr_val = nullptr;
        data_t    * final_rhs     = nullptr;
// #define GLB_MAP
#ifdef GLB_MAP
        idx_t * map_final2old = new idx_t [glb_nblks - elim_well*2];
        idx_t * map_old2final = new idx_t [glb_nblks - elim_well*2];
#else
        idx_t * lcmp_final2old = nullptr;// idx_t [my_final_cnt];
        idx_t * lcmp_old2final = new idx_t [loc_nblks];// 局部的映射
#endif
        {
            // 各个进程统计原划分中自己负责的行在新划分中归哪个进程
            struct PACKAGE {
                idx_t nrows = 0;
                std::vector<idx_t>  row_ids;// 记录原全局序号
                std::vector<idx_t>  row_key;
                std::vector<idx_t>  row_nnz;// 记录原行的非零元数
                std::vector<idx_t>  row_cid;
                std::vector<data_t> row_val;
                std::vector<data_t> row_rhs;
            };
            std::unordered_map<int, PACKAGE> send_records;// 记录本进程要给其它每个进程发送的数据
            for (idx_t i = 0; i < loc_nblks; i ++) {
                idx_t c0 = loc_map_to_3D[i*3 + 0], c1 = loc_map_to_3D[i*3 + 1], c2 = loc_map_to_3D[i*3 + 2];
                idx_t p0 = (std::upper_bound(cart_parti[0].begin(), cart_parti[0].end(), c0) - cart_parti[0].begin()) - 1;
                idx_t p1 = (std::upper_bound(cart_parti[1].begin(), cart_parti[1].end(), c1) - cart_parti[1].begin()) - 1;
                idx_t p2 = (std::upper_bound(cart_parti[2].begin(), cart_parti[2].end(), c2) - cart_parti[2].begin()) - 1; assert(p2 == 0);
                int pid = (p0 * proc_dims[1] + p1) * proc_dims[2] + p2; assert(0 <= pid && pid < num_procs);
                if (send_records.find(pid) == send_records.end()) send_records.emplace(pid, PACKAGE());
                send_records[pid].row_ids.push_back(i + iblk_lower);
                send_records[pid].row_nnz.push_back(dist_bsr_rpt[i + 1] - dist_bsr_rpt[i]); assert(dist_bsr_rpt[i + 1] - dist_bsr_rpt[i] >= 0);
                for (auto ptr = dist_bsr_rpt[i]; ptr < dist_bsr_rpt[i + 1]; ptr ++) {
                    send_records[pid].row_cid.push_back(dist_bsr_cid[ptr]);
                    for (int f = 0; f < ndof*ndof; f++)
                    send_records[pid].row_val.push_back(dist_bsr_val[ptr*ndof*ndof + f]);
                }
                for (int f = 0; f < ndof; f++) {
                    send_records[pid].row_rhs.push_back(dist_b[i*ndof + f]);
                }
                int16_t * glb_3D = loc_map_to_3D + i * 3;
                idx_t glb_1D = (glb_3D[0] * glb_dims[1] + glb_3D[1]) * glb_dims[2] + glb_3D[2];
                send_records[pid].row_key.push_back(glb_1D);
            }
            // 告知其他进程
            idx_t send_nrows [num_procs], send_nnzs [num_procs];
            for (int p = 0; p < num_procs; p ++) { send_nrows[p] = send_nnzs[p] = 0; }
            for (auto it = send_records.begin(); it != send_records.end(); it ++) {
                it->second.nrows = it->second.row_ids.size();
                send_nrows[it->first] = it->second.nrows; assert(it->second.row_ids.size()*ndof == it->second.row_rhs.size());
                send_nnzs [it->first] = it->second.row_cid.size(); assert(it->second.row_cid.size()*ndof*ndof == it->second.row_val.size());
            }
            idx_t send_buf [2*num_procs]; std::vector<MPI_Request> send_reqs;
            for (int p = 0; p < num_procs; p ++) {
                send_buf[p*2 + 0] = send_nrows[p];
                send_buf[p*2 + 1] = send_nnzs [p];
                MPI_Request req_num;
                MPI_Isend(send_buf + p*2, 2, MPI_INT, p, 501, my_A4.comm, & req_num);
                send_reqs.push_back(req_num);
            }
            idx_t recv_buf [2*num_procs]; std::vector<MPI_Request> recv_reqs;
            for (int p = 0; p < num_procs; p ++) {
                MPI_Request req_num;
                MPI_Irecv(recv_buf + p*2, 2, MPI_INT, p, 501, my_A4.comm, & req_num);
                recv_reqs.push_back(req_num);
            }
            MPI_Waitall(recv_reqs.size(), recv_reqs.data(), MPI_STATUSES_IGNORE); recv_reqs.clear();
            MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE); send_reqs.clear();
            for (int p = 0; p < num_procs; p++) {
                my_final_cnt += recv_buf[p*2    ];
                my_final_nnz += recv_buf[p*2 + 1];
            }
            // 获知最终的进程划分
            final_parti = new idx_t [num_procs + 1]; final_parti[0] = 0;
            MPI_Allgather(& my_final_cnt, 1, MPI_INT, final_parti + 1, 1, MPI_INT, my_A4.comm);
            for (int p = 0; p < num_procs; p ++) final_parti[p + 1] += final_parti[p];
            assert(final_parti[num_procs] == my_A4.rows_partition[num_procs]);
            // 分配内存
            idx_t * my_final_duty = new idx_t     [my_final_cnt];
            idx_t * my_final_rnnz = new idx_t     [my_final_cnt];
            final_bsr_rpt         = new big_idx_t [my_final_cnt + 1];
            final_bsr_cid         = new idx_t     [my_final_nnz];
            final_bsr_val         = new data_t    [my_final_nnz*ndof*ndof];
            final_rhs             = new data_t    [my_final_cnt*ndof];
            // 发送除了非零元浮点值以外的数据
            for (auto it = send_records.begin(); it != send_records.end(); it ++) {
                MPI_Request req_num, req_dat0, req_dat1, req_dat2, req_dat3, req_dat4, req_dat5;
                MPI_Isend(& it->second.nrows         ,                         1, MPI_INT   , it->first, 801, my_A4.comm, & req_num);
                MPI_Isend(  it->second.row_ids.data(), it->second.nrows         , MPI_INT   , it->first, 802, my_A4.comm, & req_dat0);
                MPI_Isend(  it->second.row_nnz.data(), it->second.nrows         , MPI_INT   , it->first, 803, my_A4.comm, & req_dat1);
                MPI_Isend(  it->second.row_cid.data(), it->second.row_cid.size(), MPI_INT   , it->first, 804, my_A4.comm, & req_dat2);
                MPI_Isend(  it->second.row_val.data(), it->second.row_val.size(), MPI_DOUBLE, it->first, 805, my_A4.comm, & req_dat3);
                MPI_Isend(  it->second.row_rhs.data(), it->second.row_rhs.size(), MPI_DOUBLE, it->first, 806, my_A4.comm, & req_dat4);
                MPI_Isend(  it->second.row_key.data(), it->second.nrows         , MPI_INT   , it->first, 807, my_A4.comm, & req_dat5);
                assert(send_nrows[it->first] == it->second.nrows);
                assert(send_nnzs [it->first] == it->second.row_cid.size());
                send_reqs.push_back(req_num);
                send_reqs.push_back(req_dat0); send_reqs.push_back(req_dat1); send_reqs.push_back(req_dat2);
                send_reqs.push_back(req_dat3); send_reqs.push_back(req_dat4); send_reqs.push_back(req_dat5);
            }

            // big_idx_t nnz_ptr = 0;
            big_idx_t * rbuf_rpt = new big_idx_t [my_final_nnz + 1]; rbuf_rpt[0] = 0;
            idx_t * rbuf_cid = new idx_t [my_final_nnz];
            data_t* rbuf_val = new data_t[my_final_nnz*ndof*ndof];
            data_t* rbuf_rhs = new data_t[my_final_cnt*ndof];
            idx_t * rbuf_key = new idx_t [my_final_cnt];
            idx_t * invmap   = new idx_t [my_final_cnt];
            idx_t  already_recv = 0;
            while (already_recv < my_final_cnt) {
                idx_t num; MPI_Status stat;
                MPI_Recv(& num                       ,   1, MPI_INT,  MPI_ANY_SOURCE, 801, my_A4.comm, & stat);
                MPI_Recv(my_final_duty + already_recv, num, MPI_INT, stat.MPI_SOURCE, 802, my_A4.comm, MPI_STATUS_IGNORE);
                MPI_Recv(my_final_rnnz + already_recv, num, MPI_INT, stat.MPI_SOURCE, 803, my_A4.comm, MPI_STATUS_IGNORE);
                for (idx_t i = 0; i < num; i ++) { assert(my_final_rnnz[already_recv + i] >= 0);
                    rbuf_rpt[already_recv + i + 1] = rbuf_rpt[already_recv + i] + my_final_rnnz[already_recv + i];
                    invmap  [already_recv + i] = already_recv + i;
                }
                big_idx_t off = rbuf_rpt[already_recv];
                big_idx_t cnt = rbuf_rpt[already_recv + num] - off; assert(cnt > 0);
                MPI_Recv(rbuf_cid +     off          , cnt          , MPI_INT   , stat.MPI_SOURCE, 804, my_A4.comm, MPI_STATUS_IGNORE);
                MPI_Recv(rbuf_val +     off*ndof*ndof, cnt*ndof*ndof, MPI_DOUBLE, stat.MPI_SOURCE, 805, my_A4.comm, MPI_STATUS_IGNORE);
                MPI_Recv(rbuf_rhs + already_recv*ndof, num*ndof     , MPI_DOUBLE, stat.MPI_SOURCE, 806, my_A4.comm, MPI_STATUS_IGNORE);
                MPI_Recv(rbuf_key + already_recv     , num          , MPI_INT   , stat.MPI_SOURCE, 807, my_A4.comm, MPI_STATUS_IGNORE);
                already_recv += num;
            } assert(already_recv == my_final_cnt); 
            assert(rbuf_rpt[my_final_cnt] == my_final_nnz);
            MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE); send_reqs.clear();
            send_records.clear();// 消息发送完成后可以清理

            // 根据一维字典序排序重排
            for (idx_t i = 0; i < my_final_cnt; i ++) invmap[i] = i;
            my_qsort_4i(rbuf_key, my_final_duty, my_final_rnnz, invmap, 0, my_final_cnt - 1);
            // 此时my_final_duty就是局部的final2old映射
#ifdef GLB_MAP
            myWrapper_Allgatherv(my_final_duty, my_final_cnt, MPI_INT, map_final2old, final_parti, MPI_INT, my_A4.comm);
            for (int fin_i = 0; fin_i < glb_nblks - elim_well*2; fin_i ++)
                map_old2final[map_final2old[fin_i]] = fin_i;
#else // 只记录映射的本进程部分
            lcmp_final2old      = new idx_t     [my_final_cnt];
            for (idx_t i = 0; i < my_final_cnt; i ++) {
                const idx_t fin_i = i + final_parti[my_pid];// final ID
                const idx_t old_i = my_final_duty[i];// old ID
                lcmp_final2old[i] = old_i;
                int pid = (std::upper_bound(my_A4.rows_partition,
                                            my_A4.rows_partition + num_procs + 1, old_i) - my_A4.rows_partition) - 1;
                assert(0 <= pid && pid < num_procs);
                if (send_records.find(pid) == send_records.end()) send_records.emplace(pid, PACKAGE());
                send_records[pid].row_ids.push_back(old_i);// 旧序号
                send_records[pid].row_key.push_back(fin_i);// 新序号
            }
            for (auto it = send_records.begin(); it != send_records.end(); it ++) {
                const int pid = it->first;
                it->second.nrows = it->second.row_ids.size();
                // send_buf[it->first] = it->second.nrows;
                MPI_Request req_num, req_dat0, req_dat1;
                MPI_Isend(& it->second.nrows         ,                1, MPI_INT, pid, 901, my_A4.comm, & req_num);
                MPI_Isend(  it->second.row_ids.data(), it->second.nrows, MPI_INT, pid, 902, my_A4.comm, & req_dat0);
                MPI_Isend(  it->second.row_key.data(), it->second.nrows, MPI_INT, pid, 903, my_A4.comm, & req_dat1);
                send_reqs.push_back(req_num); send_reqs.push_back(req_dat0); send_reqs.push_back(req_dat1);
            }
            already_recv = 0;
            idx_t * old_ID_buf = new idx_t [loc_nblks];
            while (already_recv < loc_nblks) {
                idx_t num; MPI_Status stat;
                MPI_Recv(& num                        ,   1, MPI_INT,  MPI_ANY_SOURCE, 901, my_A4.comm, & stat);
                MPI_Recv(old_ID_buf     + already_recv, num, MPI_INT, stat.MPI_SOURCE, 902, my_A4.comm, MPI_STATUS_IGNORE);
                MPI_Recv(lcmp_old2final + already_recv, num, MPI_INT, stat.MPI_SOURCE, 903, my_A4.comm, MPI_STATUS_IGNORE);
                already_recv += num;
            } assert(already_recv == loc_nblks);
            hypre_qsort2i(old_ID_buf, lcmp_old2final, 0, loc_nblks - 1);

            if (old_ID_buf[0] != iblk_lower) {
                assert(my_A4.rows_partition[my_pid] == iblk_lower);
                printf("Error !! Proc %d [%d,%d) old_ID_buf[0] %d\n", my_pid, my_A4.rows_partition[my_pid], my_A4.rows_partition[my_pid + 1],
                    old_ID_buf[0]);
            }
            assert(old_ID_buf[0] == iblk_lower); assert(old_ID_buf[loc_nblks - 1] == iblk_lower + loc_nblks - 1);
            delete [] old_ID_buf; old_ID_buf = nullptr;
            MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE); send_reqs.clear();
            send_records.clear();// 消息发送完成后可以清理
#endif
#if 0 // print to check
            if (my_final_duty) {
                const idx_t dims[3] = { 640, 640, 640 };
                if (my_pid % 4 == 0) {
                    FILE * fp = fopen("map_3D", "rb");
                    idx_t * map_3D = new idx_t [glb_nblks * 3];
                    fread(map_3D, sizeof(*map_3D), glb_nblks * 3, fp);
                    fclose(fp);
                    idx_t * glb_1D = new idx_t [my_final_cnt];
                    for (idx_t i = 0; i < my_final_cnt; i ++) {
                        idx_t dictID = my_final_duty[i];
                        if (elim_well == 1 && dictID >= well_IDs[0]) dictID += 2;
                        const idx_t * my_3D = map_3D + dictID * 3;
                        assert(my_3D[0] != -1 && my_3D[1] != -1 && my_3D[2] != -1);
                        glb_1D[i] = (my_3D[0] * dims[1] + my_3D[1]) * dims[2] + my_3D[2];
                    }
                    // std::sort(glb_1D, glb_1D + my_final_cnt);
                    char filename[1024];
                    sprintf(filename, "Proc%d.pos", my_pid);
                    fp = fopen(filename, "w+");
                    for (idx_t k = 0; k < my_final_cnt; k ++) {
                        const idx_t my_1D = glb_1D[k];
                        const idx_t i0 =  my_1D / (dims[1] * dims[2]),
                                    i1 = (my_1D -  dims[1] * dims[2] * i0) / dims[2],
                                    i2 =  my_1D -  dims[1] * dims[2] * i0  - dims[2] * i1;
                        fprintf(fp, "old %d (%d,%d,%d)\n", my_final_duty[k], i0, i1, i2);
                    }
                    fclose(fp);
                }
            }
#endif
            // 计算行偏移
            final_bsr_rpt[0] = 0;
            for (idx_t i = 0; i < my_final_cnt; i ++) final_bsr_rpt[i + 1] = final_bsr_rpt[i] + my_final_rnnz[i];
            assert(my_final_nnz == final_bsr_rpt[my_final_cnt]);
            for (idx_t dst_i = 0; dst_i < my_final_cnt; dst_i ++) {
                const idx_t buf_i = invmap[dst_i]; assert(0 <= buf_i && buf_i < my_final_cnt);
                assert(rbuf_rpt[buf_i + 1] - rbuf_rpt[buf_i] == final_bsr_rpt[dst_i + 1] - final_bsr_rpt[dst_i]);
                memcpy(final_bsr_cid + final_bsr_rpt[dst_i]          , rbuf_cid + rbuf_rpt[buf_i]          ,
                    sizeof( idx_t)*(rbuf_rpt[buf_i + 1] - rbuf_rpt[buf_i])          );
                memcpy(final_bsr_val + final_bsr_rpt[dst_i]*ndof*ndof, rbuf_val + rbuf_rpt[buf_i]*ndof*ndof,
                    sizeof(data_t)*(rbuf_rpt[buf_i + 1] - rbuf_rpt[buf_i])*ndof*ndof);
                memcpy(final_rhs + dst_i*ndof, rbuf_rhs + buf_i*ndof, sizeof(data_t)*ndof);
#ifdef GLB_MAP
                for (auto p = final_bsr_rpt[dst_i]; p < final_bsr_rpt[dst_i + 1]; p ++) {
                    idx_t old_glb_j = final_bsr_cid[p]; assert(0 <= old_glb_j && old_glb_j < glb_nblks - elim_well*2);
                    final_bsr_cid[p] = map_old2final[old_glb_j];
                }
#endif
            }
#ifndef GLB_MAP // 只能到拥有这些old位置的进程处询问
            std::unordered_map<idx_t, idx_t> glmp_old2final;// global map of old => final
            // 统计本进程需要知道的旧序号
            for (big_idx_t k = 0; k < final_bsr_rpt[my_final_cnt]; k ++) {
                const idx_t old_ID = final_bsr_cid[k];
                if (glmp_old2final.find(old_ID) != glmp_old2final.end()) continue;// 之前已经发现过该号了
                int pid = (std::upper_bound(my_A4.rows_partition,
                                            my_A4.rows_partition + num_procs + 1, old_ID) - my_A4.rows_partition) - 1;
                assert(0 <= pid && pid < num_procs);
                if (send_records.find(pid) == send_records.end()) send_records.emplace(pid, PACKAGE());
                send_records[pid].row_ids.push_back(old_ID);// 希望查询的旧序号
                glmp_old2final.emplace(old_ID, -1);// 记录
            }
            for (int p = 0; p < num_procs; p ++) {
                send_buf[p] = recv_buf[p] = 0;
                if (send_records.find(p) != send_records.end()) {
                    send_records[p].nrows = send_records[p].row_ids.size();
                    send_buf[p] = send_records[p].nrows;
                    MPI_Request req_num, req_dat;
                    MPI_Isend(& send_records[p].nrows         ,                     1, MPI_INT, p, 1001, my_A4.comm, & req_num);
                    MPI_Isend(  send_records[p].row_ids.data(), send_records[p].nrows, MPI_INT, p, 1002, my_A4.comm, & req_dat);
                    send_reqs.push_back(req_num); send_reqs.push_back(req_dat);
                }
            }

            MPI_Alltoall(send_buf, 1, MPI_INT, recv_buf, 1, MPI_INT, my_A4.comm);
            idx_t inq_num = 0;// number of inquiries
            for (int p = 0; p < num_procs; p ++) inq_num += recv_buf[p];
            already_recv = 0;// 处理回复的个数
            while (already_recv < inq_num) {
                idx_t num; MPI_Status stat;
                MPI_Recv(& num                        ,   1, MPI_INT,  MPI_ANY_SOURCE, 1001, my_A4.comm, & stat);
                int pid = stat.MPI_SOURCE;
                assert(num == recv_buf[pid]);
                if (send_records.find(pid) == send_records.end()) send_records.emplace(pid, PACKAGE());
                send_records[pid].row_key.resize(num);
                MPI_Recv(send_records[pid].row_key.data(), num, MPI_INT, pid, 1002, my_A4.comm, MPI_STATUS_IGNORE);
                // 回复
                for (idx_t k = 0; k < num; k ++) {
                    idx_t old_ID = send_records[pid].row_key[k]; assert(iblk_lower <= old_ID && old_ID < iblk_lower + loc_nblks);
                    idx_t fin_ID = lcmp_old2final[old_ID - iblk_lower];
                    send_records[pid].row_key[k] = fin_ID;// 原位写回
                }
                MPI_Request req_reply;
                MPI_Isend(send_records[pid].row_key.data(), num, MPI_INT, pid, 1003, my_A4.comm, & req_reply);
                send_reqs.push_back(req_reply);
                already_recv += num;
            } assert(already_recv == inq_num);
            already_recv = 0;// 接收回复的个数
            const idx_t num_reply = glmp_old2final.size();
            while (already_recv < num_reply) {
                MPI_Status stat;
                MPI_Probe(MPI_ANY_SOURCE, 1003, my_A4.comm, & stat);
                int pid = stat.MPI_SOURCE;
                assert(send_records.find(pid) != send_records.end() && send_records[pid].nrows > 0);
                const idx_t num = send_records[pid].nrows;
                idx_t * reply_buf = new idx_t [num];// 充当接收缓冲区
                MPI_Recv(reply_buf, num, MPI_INT, pid, 1003, my_A4.comm, MPI_STATUS_IGNORE);
                const idx_t * inq_buf = send_records[pid].row_ids.data();// 之前询问的old_ID
                for (idx_t k = 0; k < num; k ++) {
                    idx_t old_ID = inq_buf[k]; assert(glmp_old2final.find(old_ID) != glmp_old2final.end());
                    glmp_old2final[old_ID] = reply_buf[k];// 记录新序号
                }
                delete [] reply_buf;
                already_recv += num;
            } assert(already_recv == num_reply);
            // 填充到最终的序列中
            for (big_idx_t k = 0; k < final_bsr_rpt[my_final_cnt]; k ++) {
                const idx_t old_ID = final_bsr_cid[k];
                final_bsr_cid[k] = glmp_old2final[old_ID];
            }
            MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE); send_reqs.clear();
            send_records.clear();// 消息发送完成后可以清理
#endif
            delete [] rbuf_rpt; delete [] rbuf_cid; delete [] rbuf_val; delete [] rbuf_rhs; delete [] rbuf_key;
            rbuf_rpt = nullptr; rbuf_cid = nullptr; rbuf_val = nullptr; rbuf_rhs = nullptr; rbuf_key = nullptr;
            delete [] my_final_duty; delete [] my_final_rnnz; delete [] invmap; // delete [] keyw; 
            my_final_duty = nullptr; my_final_rnnz = nullptr; invmap = nullptr; // keyw = nullptr; 
        }
        delete [] loc_map_to_3D; loc_map_to_3D = nullptr;
        // delete [] glb_map_to_3D; glb_map_to_3D = nullptr;
       
        MPI_Barrier(my_A4.comm);
        if (my_pid == 0) printf("TEST %d :: Filling Matrix and Vectors\n", test);

        par_Vector<idx_t, data_t, ndof> rer_b(MPI_COMM_WORLD, final_parti[num_procs], final_parti[my_pid], final_parti[my_pid + 1]),
                                        rer_x(rer_b), rer_y(rer_b);
        assert((idx_t) rer_b.local_vector->tot_len == my_final_cnt * ndof);
        for (long long i = 0; i < rer_b.local_vector->tot_len; i++) {
            rer_b.local_vector->data[i] = final_rhs[i];
        }
        delete [] final_rhs; final_rhs = nullptr;
#ifdef PRINT_DEBUG
        double ck_dot = vec_dot<idx_t, data_t, double>(rer_b, rer_b);
        if (my_pid == 0) printf("after reorder (b,b) = %.15e\n", ck_dot);
#endif
        par_CSRMatrix<idx_t, data_t, data_t, ndof> rer_A(MPI_COMM_WORLD, final_parti[num_procs], final_parti[my_pid], final_parti[my_pid + 1],
                                                                         final_parti[num_procs], final_parti[my_pid], final_parti[my_pid + 1]);
        rer_A.set_values_distributed(final_bsr_rpt, final_bsr_cid, final_bsr_val);
        delete [] final_bsr_rpt; delete [] final_bsr_cid; delete [] final_bsr_val;
        final_bsr_rpt = nullptr; final_bsr_cid = nullptr; final_bsr_val = nullptr;
#ifdef PRINT_DEBUG
        rer_A.Mult(rer_b, rer_x, false);
        ck_dot = vec_dot<idx_t, data_t, double>(rer_x, rer_x);
        if (my_pid == 0) {
            printf("after reorder (A*b, A*b) = %.15e\n", ck_dot);
        }
        rer_x.set_val(0.0);
#endif
        double t_setup = 0.0, t_calc = 0.0;
        idx_t num_iterations = 0;
// #if 0
        MPI_Barrier(MPI_COMM_WORLD);
        t_setup -= MPI_Wtime();
        if (my_pid == 0) printf("TEST %d :: Setting Up UnstructMG\n", test);

        Solver<int, double, double, ndof> * prec_4 = nullptr;
        if      (prec_name == "ILU") { prec_4 = new BoxILU_I32K64P64D32_DOF4; }
        else if (prec_name == "ILUT"){ prec_4 = new BoxILU_I32K64P64D32_DOF4(1); }
        else if (prec_name == "LU" ) { prec_4 = new GlobalLU_I32K64P64D32_DOF4; }
        else if (prec_name == "AMG") { prec_4 = new UnstructMG_I32K64P64D32_DOF4(mg_file); }
        prec_4->SetOperator(rer_A);// 建立多重网格预条件
        MPI_Barrier(MPI_COMM_WORLD);
        t_setup += MPI_Wtime();
if (false) {
        t_setup -= MPI_Wtime();
        Solver<idx_t, double, float, ndof> * low_prec_4 = nullptr;// 用double进行setup，float进行计算
        if      (prec_name == "ILU") { low_prec_4 = new BoxILU_I32K64P32D32_DOF4; }
        else if (prec_name == "ILUT"){ low_prec_4 = new BoxILU_I32K64P32D32_DOF4(1); }
        else if (prec_name == "LU" ) { low_prec_4 = new GlobalLU_I32K64P32D32_DOF4; }
        else if (prec_name == "AMG") {
            low_prec_4 = new UnstructMG_I32K64P32D32_DOF4(mg_file);
            UnstructMG_I32K64P32D32_DOF4* dst = (UnstructMG_I32K64P32D32_DOF4*) low_prec_4;
            UnstructMG_I32K64P64D32_DOF4* src = (UnstructMG_I32K64P64D32_DOF4*)     prec_4;
            BorrowMG<int, float, double, double, double, float, 4>(src, dst);
        }
        t_setup += MPI_Wtime();
        t_calc  -= MPI_Wtime();
        if (iter_name == "GMRES") {
#if 0
            // GMRESSolver_I32K64P32D32_DOF4 gmres4;
            // gmres4.SetMaxIter(2000);
            // gmres4.SetRestartlen(30);
            // gmres4.SetRelTol(2.0e-6);
            // gmres4.SetOperator(rer_A);
            // gmres4.SetPreconditioner(* low_prec_4);
            // gmres4.Mult(rer_b, rer_x, false);
            // num_iterations += gmres4.GetNumIterations();
#else
            par_CSRMatrix<int, float, float, ndof> low_A (rer_A.comm, rer_A.rows_partition, rer_A.cols_partition);
            low_A.col_map_offd = rer_A.col_map_offd;
            low_A.commpkg      = rer_A.commpkg;
            low_A.diag.nrows   = rer_A.diag.nrows  ;      low_A.offd.nrows   = rer_A.offd.nrows  ;
            low_A.diag.ncols   = rer_A.diag.ncols  ;      low_A.offd.ncols   = rer_A.offd.ncols  ;
            low_A.diag.nnz     = rer_A.diag.nnz    ;      low_A.offd.nnz     = rer_A.offd.nnz    ;
            low_A.diag.row_ptr = rer_A.diag.row_ptr;      low_A.offd.row_ptr = rer_A.offd.row_ptr;
            low_A.diag.col_idx = rer_A.diag.col_idx;      low_A.offd.col_idx = rer_A.offd.col_idx;
            low_A.diag.vals = new float [low_A.diag.nnz*ndof*ndof];
            low_A.offd.vals = new float [low_A.offd.nnz*ndof*ndof];
            for (int j = 0; j < low_A.diag.nnz*ndof*ndof; j++) low_A.diag.vals[j] = rer_A.diag.vals[j];
            for (int j = 0; j < low_A.offd.nnz*ndof*ndof; j++) low_A.offd.vals[j] = rer_A.offd.vals[j];

            par_Vector<int, float, ndof> low_b (rer_b.comm, rer_b.glb_nrows, rer_b.beg_row, rer_b.end_row);
            par_Vector<int, float, ndof> low_x (low_b);
            for (int i = 0; i < (low_b.end_row - low_b.beg_row)*ndof; i++) {
                low_b.local_vector->data[i] = rer_b.local_vector->data[i];
                low_x.local_vector->data[i] = 0.0;
            }
            GMRESSolver_I32All32_DOF4 gmres4;
            gmres4.SetMaxIter(60);
            gmres4.SetRestartlen(30);
            gmres4.SetRelTol(2.0e-6);
            gmres4.SetOperator(low_A);
            gmres4.SetPreconditioner(* low_prec_4);
            gmres4.Mult(low_b, low_x, false);
            num_iterations += gmres4.GetNumIterations();
            // 归还
            low_A.diag.row_ptr = nullptr;      low_A.offd.row_ptr = nullptr;
            low_A.diag.col_idx = nullptr;      low_A.offd.col_idx = nullptr;
            for (int i = 0; i < (low_x.end_row - low_x.beg_row)*ndof; i++) {
                rer_x.local_vector->data[i] = low_x.local_vector->data[i];
            }
#endif
        } else {
            MPI_Abort(MPI_COMM_WORLD, -404);
        }
        t_calc  += MPI_Wtime();
        t_setup -= MPI_Wtime();
        if (prec_name == "AMG") {
            UnstructMG_I32K64P32D32_DOF4* dst = (UnstructMG_I32K64P32D32_DOF4*) low_prec_4;
            ReturnMG<int, float, double, float, 4>(dst);
        }
        t_setup += MPI_Wtime();
        delete low_prec_4; low_prec_4 = nullptr;
}
// #else
        t_calc  -= MPI_Wtime();
        if (my_pid == 0) printf("TEST %d :: Solving Equations\n", test);
{
        if (iter_name == "GMRES") {
            GMRESSolver_I32K64P64D32_DOF4 gmres4;
            gmres4.SetMaxIter(200);
            gmres4.SetRestartlen(30);
            gmres4.SetRelTol(1.0e-6);
            gmres4.SetOperator(rer_A);
            gmres4.SetPreconditioner(* prec_4);
            gmres4.Mult(rer_b, rer_x, false);
            num_iterations += gmres4.GetNumIterations();
        } else {
            MPI_Abort(MPI_COMM_WORLD, -404);
        }
        delete prec_4; prec_4 = nullptr;
}
        MPI_Barrier(MPI_COMM_WORLD);
        t_calc += MPI_Wtime();// 结束求解
        if (my_pid == 0) printf("TEST %d :: Finished\n", test);
// #endif
#ifdef DEBUG_PRINT
        // 计算真实残差
        par_Vector<idx_t, data_t, ndof> my_y4(rer_b);
        rer_A.Mult(-1.0, rer_x, 1.0, rer_b, my_y4);

        double real_r_nrm2 = vec_dot<idx_t, double, double, ndof>(my_y4, my_y4);
        if (my_pid == 0) {
            printf("True ||r|| = %e\n", sqrt(real_r_nrm2));
        }
#endif
        records[test].iter = num_iterations;
        records[test].setup = t_setup;
        records[test].solve = t_calc;
        records[test].coord = t_coord;
        records[test].total_w_coord = t_setup + t_calc + t_coord;
        records[test].total_wo_coord= t_setup + t_calc;
        if (my_pid == 0) {
            printf("Coord          : %.5f\n", t_coord);
            printf("Setup          : %.5f\n", t_setup);
            printf("Solve          : %.5f\n", t_calc );
            printf("Total          : %.5f\n", t_setup + t_calc);
            printf("Total (+ Coord): %.5f\n", t_coord + t_setup + t_calc);
            printf("#Iterations    : %d\n", num_iterations);
        }
    
        if (test == TEST_CNT - 1) {// 将解向量写出文件
            idx_t  * send_buf_ID   = new idx_t  [my_final_cnt];// 记录原序号，含井
            #pragma omp parallel for schedule(static)
            for (idx_t i = 0; i < my_final_cnt; i ++) {
#ifdef GLB_MAP
                idx_t old_ID = map_final2old[i + final_parti[my_pid]];
#else
                idx_t old_ID = lcmp_final2old[i];
#endif
                if (elim_well == 1 && well_IDs[0] <= old_ID) old_ID += 2;
                send_buf_ID[i] = old_ID;
            }
            double * sol_buf = nullptr;
            idx_t  * ID_buf  = nullptr;
            int    * displs  = nullptr, * recv_cnts = nullptr;
            if (my_pid == 0) {
                sol_buf = new double [glb_nblks * ndof];
                ID_buf  = new idx_t  [glb_nblks];
                displs    = new int [num_procs];
                recv_cnts = new int [num_procs];
                for (int p = 0; p < num_procs; p ++) {
                    displs[p] = rer_A.rows_partition[p];
                    recv_cnts[p] = rer_A.rows_partition[p + 1] - rer_A.rows_partition[p];
                }
            }
            MPI_Gatherv(send_buf_ID, my_final_cnt, MPI_INT,
                        ID_buf, recv_cnts, displs, MPI_INT, 0, MPI_COMM_WORLD);

            if (my_pid == 0) printf("collect ID\n");
            if (my_pid == 0) {
                for (int p = 0; p < num_procs; p ++) {
                    displs   [p] *= ndof;
                    recv_cnts[p] *= ndof;
                }
            }
            MPI_Gatherv(rer_x.local_vector->data, my_final_cnt*ndof, MPI_DOUBLE,
                        sol_buf, recv_cnts, displs, MPI_DOUBLE, 0, MPI_COMM_WORLD);
            if (my_pid == 0) printf("collect val\n");
            // 收集井的信息
            if (elim_well == 1) {
                idx_t num = well_records.size();
                idx_t * well_id_buf = new idx_t [num];
                double* well_sol_buf = new double [num * ndof];
                std::vector<MPI_Request> send_reqs;
                if (num > 0) {
                    idx_t cnt = 0; 
                    for (auto it = well_records.begin(); it != well_records.end(); it ++) {
                        well_id_buf[cnt] = it->first;
                        for (int f = 0; f < ndof; f++) well_sol_buf[cnt*ndof + f] = it->second.sol[f];
                        cnt ++;
                    }
                    MPI_Request req_num, req_id, req_dat;
                    MPI_Isend(& num       ,   1     , MPI_INT   , 0, 901, MPI_COMM_WORLD, & req_num);
                    MPI_Isend(well_id_buf , num     , MPI_INT   , 0, 902, MPI_COMM_WORLD, & req_id );
                    MPI_Isend(well_sol_buf, num*ndof, MPI_DOUBLE, 0, 903, MPI_COMM_WORLD, & req_dat);
                    send_reqs.push_back(req_num); send_reqs.push_back(req_id); send_reqs.push_back(req_dat);
                }
                if (my_pid == 0) {
                    idx_t recv_off = final_parti[num_procs];
                    idx_t  already_recv = 0;
                    while (already_recv < 2) {
                        idx_t num; MPI_Status stat;
                        MPI_Recv(& num                  ,   1     , MPI_INT   , MPI_ANY_SOURCE , 901, MPI_COMM_WORLD, & stat);
                        MPI_Recv( ID_buf + recv_off     , num     , MPI_INT   , stat.MPI_SOURCE, 902, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                        MPI_Recv(sol_buf + recv_off*ndof, num*ndof, MPI_DOUBLE, stat.MPI_SOURCE, 903, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
                        recv_off += num;
                        already_recv += num;
                    } assert(already_recv == 2);
                    assert(recv_off == glb_nblks);
                }
                MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);
                delete [] well_id_buf; delete [] well_sol_buf;
            }
            if (my_pid == 0) printf("collect Well\n");
            if (my_pid == 0) {
                double * orig_sol = new double [glb_nblks * ndof];
                for (idx_t i = 0; i < glb_nblks; i ++) {
                    idx_t pos = ID_buf[i]; assert(0 <= pos && pos < glb_nblks);
                    for (int f = 0; f < ndof; f++) orig_sol[pos*ndof + f] = sol_buf[i*ndof + f];
                }
                const std::string solu_name = std::string(pathname) + "/x.bin";
                printf("Writing solution to %s\n", solu_name.c_str());
                FILE * fp = fopen(solu_name.c_str(), "wb+");
                ret = fwrite(& glb_nrows, sizeof(int), 1, fp);
                ret = fwrite(orig_sol, sizeof(*sol_buf), glb_nrows, fp);
                fclose(fp);
                // printf("%.4e %.4e %.4e\n", sol_buf[0], sol_buf[12], sol_buf[25]);
                delete [] orig_sol;
            }
            delete [] send_buf_ID;
            delete [] sol_buf; delete [] ID_buf;
            delete [] recv_cnts; delete [] displs;
        }
#ifdef GLB_MAP
        delete [] map_final2old; delete [] map_old2final;
        map_final2old = nullptr; map_old2final = nullptr;
#else
        delete [] lcmp_final2old; delete [] lcmp_old2final;
        lcmp_final2old = nullptr; lcmp_old2final= nullptr;
#endif
    }

    if (my_pid == 0) {
        double best_tot_w = records[0].total_w_coord;
        double best_tot_wo= records[0].total_wo_coord;
        double avg_tot_w  = 0.0;
        double avg_tot_wo = 0.0;
        double avg_setup  = 0.0, avg_solve = 0.0;
        for (int i = 0; i < TEST_CNT; i++) {
            best_tot_w = std::min(best_tot_w , records[i].total_w_coord);
            best_tot_wo= std::min(best_tot_wo, records[i].total_wo_coord);
            avg_tot_w += records[i].total_w_coord;
            avg_tot_wo+= records[i].total_wo_coord;
            avg_setup += records[i].setup;
            avg_solve += records[i].solve;
        }
        printf("AVG. Setup time         %.5f\n", avg_setup  / TEST_CNT);
        printf("AVG. Solve time         %.5f\n", avg_solve  / TEST_CNT);
        printf("AVG. Total time         %.5f\n", avg_tot_wo / TEST_CNT);
        printf("AVG. Total time(+coord) %.5f\n", avg_tot_w  / TEST_CNT);
        printf("BEST Total time         %.5f\n", best_tot_wo);
        printf("BEST Total time(+coord) %.5f\n", best_tot_w);
    }
    delete [] dist_bsr_rpt; delete [] dist_bsr_cid; delete [] dist_bsr_val;
    free(dist_b); free(dist_x);
    if (my_pid == 0) printf("IO time %.5f\n", timing_IO);

    MPI_Finalize();

    return 0;
}