#define DOF 1

#include "test.hpp"
#include <algorithm>

using big_idx_t = idx_t;

big_idx_t read_binary(void * buf, const char * filename, size_t size_of_elems, long long start, big_idx_t nums, bool first_num=false) {
    assert(size_of_elems == 4 || size_of_elems == 8);
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);
    if (my_pid == 0) printf("reading binary from %s\n", filename);

    FILE * fp = fopen(filename, "rb");
    if (fp == NULL) {
        printf("cannot open %s \n", filename);
        return 0;
    }
    if (first_num) {
        if (fseek(fp, size_of_elems * start + sizeof(int), SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    } else {
        if (fseek(fp, size_of_elems * start, SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    }

    big_idx_t ret;

    ret = fread(buf, size_of_elems, nums, fp);
    return ret;
}

int main(int argc, char* argv[])
{
    int my_pid, num_procs;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);

    int arg_cnt = 1;
    const std::string control_file(argv[arg_cnt++]);
    idx_t glb_nblks = 0;
    std::string data_path;
    idx_t case_dim[3] = {-1, -1, -1};
    {
        FILE * fp = fopen(control_file.c_str(), "r");
        char line[1024], buf [1024];
        int ndof;
        idx_t glb_ncols = 0;
        idx_t order;
        while (fgets(line, 1024, fp))
        {
            // printf("%s", buf);
            if (sscanf(line, "name %s", buf) == 1) {
                if (my_pid == 0) printf("READ case name: %s\n", buf);
            } else if (sscanf(line, "dof %d", & ndof) == 1) {
                if (ndof != DOF) {
                    printf("Error !! DOF %d ndof %d\n", DOF, ndof);
                    MPI_Abort(MPI_COMM_WORLD, -999);
                }
            } else if (sscanf(line, "nrows %d", & glb_nblks) == 1) {
                if (my_pid == 0) printf("READ glb nrows: %d\n", glb_nblks);
            } else if (sscanf(line, "ncols %d", & glb_ncols) == 1) {
                if (my_pid == 0) printf("READ glb ncols: %d\n", glb_ncols);
                if (glb_ncols != glb_nblks) {
                    printf("Error !! glb nrows %d ncols %d\n", glb_nblks, glb_ncols);
                    MPI_Abort(MPI_COMM_WORLD, -999);
                }
            } else if (sscanf(line, "path %s", buf) == 1) {
                data_path = std::string(buf);
                if (my_pid == 0) printf("READ data path: %s\n", data_path.c_str());
            } else if (sscanf(line, "order %d", & order) == 1) {
                if (my_pid == 0) printf("READ     order: %d\n", order);
                if (order == 3) {
                    fgets(line, 1024, fp);
                    sscanf(line, "sizes %d %d %d", & case_dim[0], & case_dim[1], & case_dim[2]);
                    if (my_pid == 0) printf("READ glb sizes: %d %d %d\n", case_dim[0], case_dim[1], case_dim[2]);
                }
            }
        }
        fclose(fp);
        MPI_Barrier(MPI_COMM_WORLD);
    }

    if (my_pid == 0) {
        printf("# MPI Procs %d # OMP Threads %d\n", num_procs, omp_get_max_threads());
        printf("using blk-partition!\n");
    }
    idx_t assumed_loc_nblks = glb_nblks / num_procs;
    idx_t assumed_iblk_beg  = my_pid * assumed_loc_nblks;
    if (glb_nblks > assumed_loc_nblks * num_procs) {
        idx_t remain_nblks = glb_nblks - assumed_loc_nblks * num_procs;
        if (my_pid < remain_nblks) {
            assumed_loc_nblks ++;
        }
        assumed_iblk_beg += MIN(my_pid, remain_nblks);
    }
    idx_t assumed_iblk_end  = assumed_iblk_beg + assumed_loc_nblks;
    idx_t assumed_loc_nrows = assumed_loc_nblks * DOF;
    idx_t assumed_ilower = assumed_iblk_beg * DOF;
    idx_t assumed_iupper = assumed_iblk_end * DOF - 1;
    ksp_t * dist_b = new ksp_t [assumed_loc_nrows];
    ksp_t * dist_x = new ksp_t [assumed_loc_nrows];
    big_idx_t * dist_row_ptr = new big_idx_t [assumed_loc_nblks + 1];
    idx_t     * dist_col_idx = nullptr;
    ksp_t     * dist_vals    = nullptr;
    {// 读入二进制的向量数据
        char filename[200]; size_t ret;
        sprintf(filename, "%s/b.bin", data_path.c_str());
        if ((ret = read_binary(dist_b, filename, sizeof(ksp_t), assumed_ilower, assumed_loc_nrows)) != (size_t) assumed_loc_nrows) {
            if (ret == 0) {
                if (my_pid == 0) printf("Set rhs as one\n");
                for (idx_t i = 0; i < assumed_loc_nrows; i ++) dist_b[i] = 1.0;
            } else {
                printf("Error! not enough b %zu\n", ret);
                MPI_Abort(MPI_COMM_WORLD, 4);
            }
        }
        #pragma omp parallel for schedule(static)
        for (big_idx_t i = 0; i < assumed_loc_nrows; i++)
            dist_x[i] = 0.0;

        // 读入二进制的矩阵数据
        sprintf(filename, "%s/Ai.bin", data_path.c_str());
        if ((ret = read_binary(dist_row_ptr, filename, sizeof(big_idx_t), assumed_iblk_beg, assumed_loc_nblks + 1)) != (size_t) assumed_loc_nblks+1) {
            printf("Error! not enough rows\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        const big_idx_t loc_nnz = dist_row_ptr[assumed_loc_nblks] - dist_row_ptr[0];// 本进程负责的行内一共的非零元数

        dist_col_idx = new idx_t [loc_nnz];// 分布式存储的列序号（数值为全局号）
        dist_vals    = new ksp_t [loc_nnz * DOF*DOF];
        sprintf(filename, "%s/Aj.bin", data_path.c_str());
        if ((ret = read_binary(dist_col_idx, filename, sizeof(int), dist_row_ptr[0], loc_nnz)) != (size_t) loc_nnz) {
            printf("Error! not enough dist_col_idx: %zu\n", ret);
            MPI_Abort(MPI_COMM_WORLD, 2);
        }
        MPI_Barrier(MPI_COMM_WORLD);
        sprintf(filename, "%s/Av.bin", data_path.c_str());
        if ((ret = read_binary(dist_vals, filename, sizeof(ksp_t), dist_row_ptr[0], loc_nnz*DOF*DOF)) != (size_t) loc_nnz*DOF*DOF) {
            printf("Error! not enough dist_vals: %zu\n", ret);
            MPI_Abort(MPI_COMM_WORLD, 3);
        }
        MPI_Barrier(MPI_COMM_WORLD);

        big_idx_t offset = dist_row_ptr[0];
        for (idx_t i = 0; i <= assumed_loc_nblks; i++)// 修改成符合trans2BSR输入的CSR
            dist_row_ptr[i] -= offset;
    }

    idx_t final_ibeg = -1, final_iend = -1;
    big_idx_t * final_rpt = nullptr;
    idx_t     * final_cid = nullptr;
    ksp_t     * final_val = nullptr, * final_rhs = nullptr, * final_init = nullptr;

    if (case_dim[0] == -1 && case_dim[1] == -1 && case_dim[2] == -1) {// 一维划分
        final_ibeg = assumed_iblk_beg;
        final_iend = assumed_iblk_end;
        final_rpt = dist_row_ptr; dist_row_ptr = nullptr;
        final_cid = dist_col_idx; dist_col_idx = nullptr;
        final_val = dist_vals   ; dist_vals    = nullptr;
        final_rhs = dist_b      ; dist_b       = nullptr;
        final_init= dist_x      ; dist_x       = nullptr;
    }
    else {
        int num_proc_dim[3] = { atoi(argv[arg_cnt++]), atoi(argv[arg_cnt++]), atoi(argv[arg_cnt++]) };
        assert(num_proc_dim[0] * num_proc_dim[1] * num_proc_dim[2] == num_procs);

        idx_t cart_ids[3];
        {// 计算本进程在part内处于什么位置
            cart_ids[0] =  my_pid / (num_proc_dim[1] * num_proc_dim[2]);
            cart_ids[1] = (my_pid - cart_ids[0] * num_proc_dim[1] * num_proc_dim[2]) / num_proc_dim[2];
            cart_ids[2] =  my_pid - cart_ids[0] * num_proc_dim[1] * num_proc_dim[2] - cart_ids[1] * num_proc_dim[2];
        }

        idx_t my_ilower[3], my_iupper[3], box_ends[3], my_dims[3];
        idx_t my_nelems = 1;
        for (idx_t d = 0; d < 3; d++) {
            idx_t avg_load = case_dim[d] / num_proc_dim[d];
            idx_t remain   = case_dim[d] - avg_load * num_proc_dim[d];
        
            my_ilower[d] =  cart_ids[d]     * avg_load;
            idx_t my_load = -1;
            if (cart_ids[d] < remain) { my_ilower[d] += cart_ids[d]; my_load = avg_load + 1; } 
            else                      { my_ilower[d] +=   remain   ; my_load = avg_load    ; }

            my_iupper[d] = my_ilower[d] + my_load - 1;
            box_ends[d] = my_iupper[d] + 1;
            my_dims[d] = my_iupper[d] - my_ilower[d] + 1;
            my_nelems *= my_dims[d];
        }
        const idx_t tot_elems = case_dim[0] * case_dim[1] * case_dim[2]; assert(tot_elems > 0);
        idx_t * row_partits = new idx_t [num_procs + 1];
        row_partits[0] = 0;
        constexpr MPI_Datatype mpi_idx_type = sizeof(idx_t) == 8 ? MPI_LONG_LONG : MPI_INT;
        MPI_Allgather(& my_nelems, 1, mpi_idx_type, row_partits + 1, 1, mpi_idx_type, MPI_COMM_WORLD);
        for (int p = 0; p < num_procs; p++)
            row_partits[p + 1] += row_partits[p];
        assert(tot_elems == row_partits[num_procs]);

        idx_t * proc_dims = new idx_t [3 * num_procs];
        idx_t * proc_begs = new idx_t [3 * num_procs];
        MPI_Allgather(my_dims  , 3, mpi_idx_type, proc_dims, 3, mpi_idx_type, MPI_COMM_WORLD);
        MPI_Allgather(my_ilower, 3, mpi_idx_type, proc_begs, 3, mpi_idx_type, MPI_COMM_WORLD);
        assert( proc_dims[my_pid*3 + 0] == my_dims[0] &&
                proc_dims[my_pid*3 + 1] == my_dims[1] &&
                proc_dims[my_pid*3 + 2] == my_dims[2]);
        
        for (int p = 0; p < num_procs; p ++) {
            if (my_pid == p) {
                printf("Proc %d [%d, %d) x [%d, %d) x [%d, %d)\n", my_pid,
                    my_ilower[0], my_iupper[0] + 1, 
                    my_ilower[1], my_iupper[1] + 1,
                    my_ilower[2], my_iupper[2] + 1 );
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }

        idx_t * locmap_new2old = new idx_t [my_nelems];
        idx_t * glbmap_new2old = new idx_t [glb_nblks];
        idx_t recv_cnts[num_procs];
        for (int p = 0; p < num_procs; p ++) {
            recv_cnts[p] = row_partits[p + 1] - row_partits[p];
        }
        struct Send_Package {
            idx_t num = 0;
            std::vector<idx_t> old_ids;
            std::vector<idx_t> nnz_cnt;
            std::vector<idx_t> new_cid;
            std::vector<ksp_t> values;
            std::vector<ksp_t> rhs;
            std::vector<ksp_t> sol;
        };
        struct Recv_Package {
            idx_t num = 0;
            std::vector<idx_t> need_ids;
        };
        std::map<int, Send_Package > send_records;
        std::map<int, Recv_Package > recv_records;
        for (idx_t i = 0; i < my_nelems; i ++) {
            idx_t loc_3D[3], glb_3D[3];
            loc_3D[0] =  i / (my_dims[1] * my_dims[2]);
            loc_3D[1] = (i -  my_dims[1] * my_dims[2] * loc_3D[0]) / my_dims[2];
            loc_3D[2] =  i -  my_dims[1] * my_dims[2] * loc_3D[0]  - my_dims[2] * loc_3D[1];
            glb_3D[0] = loc_3D[0] + my_ilower[0];
            glb_3D[1] = loc_3D[1] + my_ilower[1];
            glb_3D[2] = loc_3D[2] + my_ilower[2];
            locmap_new2old[i] = (glb_3D[0] * case_dim[1] + glb_3D[1]) * case_dim[2] + glb_3D[2];
            int pid = (std::upper_bound(row_partits, row_partits + num_procs, locmap_new2old[i]) - row_partits) - 1;
            assert(0 <= pid && pid < num_procs);

            if (recv_records.find(pid) == recv_records.end()) recv_records.emplace(pid, Recv_Package());

            recv_records[pid].num ++;
            recv_records[pid].need_ids.push_back(locmap_new2old[i]);
        }

        MPI_Allgatherv(locmap_new2old, my_nelems, MPI_INT, glbmap_new2old, recv_cnts, row_partits, MPI_INT, MPI_COMM_WORLD);

        idx_t * glbmap_old2new = new idx_t [glb_nblks];
        #pragma omp parallel for schedule(static)
        for (idx_t new_i = 0; new_i < glb_nblks; new_i ++) {
            glbmap_old2new[glbmap_new2old[new_i]] = new_i;
        }

        #pragma omp parallel for schedule(static)
        for (idx_t i = 0; i < assumed_loc_nblks; i ++) {
            for (auto p = dist_row_ptr[i]; p < dist_row_ptr[i + 1]; p ++) {
                idx_t old_j = dist_col_idx[p];
                idx_t new_j = glbmap_old2new[old_j];
                dist_col_idx[p] = new_j;
            }
        }

        std::vector<MPI_Request> send_reqs;
        for (auto it = recv_records.begin(); it != recv_records.end(); it ++) {
            const int pid = it->first;
            MPI_Request req_num, req_dat;
            MPI_Isend(& it->second.num , 1, MPI_INT, pid, 101, MPI_COMM_WORLD, & req_num);
            MPI_Isend(it->second.need_ids.data(), it->second.num, MPI_INT, pid, 102, MPI_COMM_WORLD, & req_dat);
            send_reqs.push_back(req_num); send_reqs.push_back(req_dat);
        }

        idx_t  already_recv = 0;
        while (already_recv < assumed_loc_nblks) {
            idx_t num;
            MPI_Status stat;
            MPI_Recv(& num, 1, MPI_INT, MPI_ANY_SOURCE, 101, MPI_COMM_WORLD, & stat);
            int pid = stat.MPI_SOURCE;
            assert(send_records.find(pid) == send_records.end());
            send_records.emplace(pid, Send_Package());
            Send_Package & package = send_records[pid];
            package.num = num;
            package.old_ids.resize(num);
            MPI_Recv(package.old_ids.data(), num, MPI_INT, pid, 102, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

            for (idx_t i = 0; i < num; i ++) {
                idx_t old_i = package.old_ids[i]; assert(assumed_iblk_beg <= old_i && old_i < assumed_iblk_end);
                package.nnz_cnt.push_back(  dist_row_ptr[old_i - assumed_iblk_beg + 1] -
                                            dist_row_ptr[old_i - assumed_iblk_beg]);
                for (auto   p = dist_row_ptr[old_i - assumed_iblk_beg];
                            p < dist_row_ptr[old_i - assumed_iblk_beg + 1]; p ++)
                {
                    package.new_cid.push_back(dist_col_idx[p]);
                    for (int f = 0; f < DOF*DOF; f++)
                    package.values .push_back(dist_vals   [p*DOF*DOF + f]);
                }
                for (int f = 0; f < DOF; f++) {
                    package.rhs.push_back(dist_b[(old_i - assumed_iblk_beg)*DOF + f]);
                    package.sol.push_back(dist_x[(old_i - assumed_iblk_beg)*DOF + f]);
                }
            }
            assert(num == (idx_t) package.nnz_cnt.size());
            MPI_Request req_0, req_1, req_2, req_3, req_4;
            MPI_Isend(package.nnz_cnt.data(), num                   , MPI_INT   , pid, 201, MPI_COMM_WORLD, & req_0);
            MPI_Isend(package.new_cid.data(), package.new_cid.size(), MPI_INT   , pid, 202, MPI_COMM_WORLD, & req_1);
            MPI_Isend(package.values .data(), package.values .size(), MPI_DOUBLE, pid, 203, MPI_COMM_WORLD, & req_2);
            MPI_Isend(package.rhs    .data(), package.rhs    .size(), MPI_DOUBLE, pid, 204, MPI_COMM_WORLD, & req_3);
            MPI_Isend(package.sol    .data(), package.sol    .size(), MPI_DOUBLE, pid, 205, MPI_COMM_WORLD, & req_4);
            send_reqs.push_back(req_0); send_reqs.push_back(req_1); send_reqs.push_back(req_2);
            send_reqs.push_back(req_3); send_reqs.push_back(req_4); 
            already_recv += num;
        }
        assert(already_recv == assumed_loc_nblks);

        final_rpt = new big_idx_t [my_nelems + 1]; final_rpt[0] = 0;
        final_rhs = new ksp_t [my_nelems*DOF];
        final_init= new ksp_t [my_nelems*DOF];
        
        already_recv = 0;
        while (already_recv < my_nelems) {
            MPI_Status stat;
            MPI_Probe(MPI_ANY_SOURCE, 201, MPI_COMM_WORLD, & stat);
            const int pid = stat.MPI_SOURCE;
            const idx_t num = recv_records[pid].num;
            idx_t * rbuf_1 = new idx_t [num];
            ksp_t * rbuf_4 = new ksp_t [num*DOF];
            ksp_t * rbuf_5 = new ksp_t [num*DOF];
            MPI_Recv(rbuf_1, num    , MPI_INT   , pid, 201, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(rbuf_4, num*DOF, MPI_DOUBLE, pid, 204, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(rbuf_5, num*DOF, MPI_DOUBLE, pid, 205, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            const idx_t * need_ids = recv_records[pid].need_ids.data();
            for (idx_t k = 0; k < num; k ++) {
                idx_t old_i = need_ids[k];
                idx_t glb_new_i = glbmap_old2new[old_i]; assert(row_partits[my_pid] <= glb_new_i && glb_new_i < row_partits[my_pid + 1]);
                idx_t loc_new_i = glb_new_i - row_partits[my_pid];
                final_rpt[loc_new_i + 1] = rbuf_1[k];
                for (int f = 0; f < DOF; f++) {
                    final_rhs [loc_new_i*DOF + f] = rbuf_4[k*DOF + f];
                    final_init[loc_new_i*DOF + f] = rbuf_5[k*DOF + f];
                }
            }
            delete [] rbuf_1; delete [] rbuf_4; delete [] rbuf_5;
            already_recv += num;
        }
        assert(already_recv == my_nelems);
        for (idx_t i = 0; i < my_nelems; i ++) final_rpt[i + 1] += final_rpt[i];
        final_cid = new idx_t [final_rpt[my_nelems]];
        final_val = new ksp_t [final_rpt[my_nelems]*DOF*DOF];
        
        already_recv = 0;
        while (already_recv < my_nelems) {
            MPI_Status stat;
            MPI_Probe(MPI_ANY_SOURCE, 203, MPI_COMM_WORLD, & stat);
            const int pid = stat.MPI_SOURCE;
            const idx_t num = recv_records[pid].num;
            const idx_t * need_ids = recv_records[pid].need_ids.data();
            // 计算非零元数
            big_idx_t recv_nnz = 0;
            for (idx_t k = 0; k < num; k ++) {
                idx_t old_i = need_ids[k];
                idx_t glb_new_i = glbmap_old2new[old_i]; assert(row_partits[my_pid] <= glb_new_i && glb_new_i < row_partits[my_pid + 1]);
                idx_t loc_new_i = glb_new_i - row_partits[my_pid];
                recv_nnz += final_rpt[loc_new_i + 1] - final_rpt[loc_new_i];
            }
            idx_t * rbuf_2 = new idx_t [recv_nnz];
            ksp_t * rbuf_3 = new ksp_t [recv_nnz*DOF*DOF];
            MPI_Recv(rbuf_2, recv_nnz        , MPI_INT   , pid, 202, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            MPI_Recv(rbuf_3, recv_nnz*DOF*DOF, MPI_DOUBLE, pid, 203, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            big_idx_t ptr = 0;
            for (idx_t k = 0; k < num; k ++) {
                idx_t old_i = need_ids[k];
                idx_t glb_new_i = glbmap_old2new[old_i];
                idx_t loc_new_i = glb_new_i - row_partits[my_pid];
                idx_t row_nnz = final_rpt[loc_new_i + 1] - final_rpt[loc_new_i];
                for (idx_t p = 0; p < row_nnz; p ++) {
                    final_cid[ final_rpt[loc_new_i] + p             ] = rbuf_2[ ptr + p             ];
                    for (int f = 0; f < DOF*DOF; f++)
                    final_val[(final_rpt[loc_new_i] + p)*DOF*DOF + f] = rbuf_3[(ptr + p)*DOF*DOF + f];
                }
                ptr += row_nnz;
            } assert(ptr == recv_nnz);
            delete [] rbuf_2; delete [] rbuf_3;
            already_recv += num;
        }
        assert(already_recv == my_nelems);

        MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);

        final_ibeg = row_partits[my_pid    ];
        final_iend = row_partits[my_pid + 1];

        delete [] locmap_new2old; delete [] glbmap_new2old;
        delete [] dist_row_ptr;
        delete [] dist_col_idx;
        delete [] dist_vals   ;
        delete [] dist_b      ;
        delete [] dist_x      ;
        delete [] row_partits; delete [] proc_dims; delete [] proc_begs;
    }

    // const idx_t avg_ngb = (double)final_rpt[final_iend - final_ibeg] / (final_iend - final_ibeg);
    // for (idx_t i = 0; i < final_iend - final_ibeg; i ++) {
    //     // if (final_rpt[i + 1] - final_rpt[i] > avg_ngb * 10) {
    //     //     printf("Warning !! Proc %d glbI %d num %d (%d)\n", my_pid, i + final_ibeg,
    //     //         final_rpt[i + 1] - final_rpt[i], avg_ngb);
    //     // }
    //     if (i + final_ibeg == 2204270) {
    //         printf("%d num ngb %d\n", i + final_ibeg, final_rpt[i + 1] - final_rpt[i]);
    //     }
    // }

    const std::string   its_name = std::string(argv[arg_cnt++]);
    const ksp_t         rtol     = atof       (argv[arg_cnt++]);
    const std::string   prc_name = std::string(argv[arg_cnt++]);
    std::string config_mg_file;
	if (prc_name == "GMG") config_mg_file = std::string(argv[arg_cnt++]);
    std::vector<TEST_RECORD> records;
    const int test_cnt = 1;
    for (int test = 0; test < test_cnt; test ++) {
        par_CSRMatrix<idx_t, ksp_t, ksp_t, DOF> my_A(MPI_COMM_WORLD,
            glb_nblks, final_ibeg, final_iend,
            glb_nblks, final_ibeg, final_iend);
        par_Vector<idx_t, ksp_t, DOF>   my_x(MPI_COMM_WORLD, glb_nblks, final_ibeg, final_iend),
                                        my_b(MPI_COMM_WORLD, glb_nblks, final_ibeg, final_iend),
                                        my_y(MPI_COMM_WORLD, glb_nblks, final_ibeg, final_iend);
        for (long long i = 0; i < my_b.local_vector->tot_len; i++) {
            my_b.local_vector->data[i] = final_rhs [i];
            my_x.local_vector->data[i] = final_init[i];
        }
        my_A.set_values_distributed(final_rpt, final_cid, final_val);
        {// 自己做点积
            double my_b_dot = vec_dot<idx_t, ksp_t, double>(my_b, my_b);
            double my_x_dot = vec_dot<idx_t, ksp_t, double>(my_x, my_x);
            if (my_pid == 0) {
                printf("My  calc dot\n");
                printf("(  b,   b) = %.15e\n",  my_b_dot);
                printf("(  x,   x) = %.15e\n",  my_x_dot);
            }
            my_A.Mult(1.0, my_b, 0.0, my_x, my_x);// my_x <- A*my_b
            my_x_dot = vec_dot<idx_t, ksp_t, double>(my_x, my_x);
            if (my_pid == 0) {
                printf("(A*b, A*b) = %.15e\n",  my_x_dot);
            }
        }

        my_x.set_val(0.0);
        
        TEST_CONFIG config;
        config.config_mg_file = config_mg_file;
        config.restart_len = 30;
        config.max_iter = 1000;
        config.rtol = rtol;
        TEST_RECORD rec;

        if (my_pid == 0) {
            printf("%s\n", config.config_mg_file.c_str());
        }
        MPI_Barrier(MPI_COMM_WORLD);

        buildup_solver(its_name, prc_name, config);
        setup_and_solve(my_A, my_b, my_x, rec);

        double true_r_norm, b_norm;
        check_residual(my_A, my_x, my_b, my_y, true_r_norm, b_norm);

        if (my_pid == 0) {
            printf("\033[1;35mtrue ||r|| = %20.16e ||r||/||b||= %20.16e\033[0m\n", 
                true_r_norm, true_r_norm / b_norm);
            printf("Proc %d Setup, Solve costs %.6f %.6f s\n",
                my_pid, rec.setup, rec.solve);
        }

        stat_part_times(rec);
        records.push_back(rec);

        destroy_solver();
    }

    MPI_Finalize();
    return 0;
}