#define DOF 3

#include "test.hpp"

#include <chrono>
#include <random>
#include <algorithm>
#include <vector>
#include <string>

#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <set>
#include <map>
#include <string>

#include <vector>
#include "json/json.h"
#include <fstream>

using big_idx_t = long long;

big_idx_t read_binary(void * buf, const char * filename, size_t size_of_elems, long long start, big_idx_t nums, bool first_num=false) {
    assert(size_of_elems == 4 || size_of_elems == 8);
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);
    if (my_pid == 0) printf("reading binary from %s\n", filename);

    FILE * fp = fopen(filename, "rb");
    if (fp == NULL) {
        printf("cannot open %s \n", filename);
        return -1;
    }
    if (first_num) {
        if (fseek(fp, size_of_elems * start + sizeof(int), SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    } else {
        if (fseek(fp, size_of_elems * start, SEEK_SET) != 0) {
            printf("Error! cannot move file pointer to %llu-th bytes\n", size_of_elems * start);
            fclose(fp);
            return -1;
        }
    }

    big_idx_t ret;

    ret = fread(buf, size_of_elems, nums, fp);
    return ret;
}

void check_input(const double * input, big_idx_t num) {
    for (big_idx_t i = 0; i < num; i++)
        // assert(input[i] == input[i]);
        assert(abs(input[i]) < 1e20);
}

void read_matrix(const std::string matname, const idx_t beg_row, const idx_t end_row,
    big_idx_t * & dist_row_ptr, idx_t * & dist_col_idx, double * & dist_vals)
{
    const idx_t loc_nrows = end_row - beg_row;
    char filename [1024];
    size_t ret = 0;

    dist_row_ptr = new big_idx_t [loc_nrows + 1];
    sprintf(filename, "%s_Ai.bin", matname.c_str());
    if ((ret = read_binary(dist_row_ptr, filename, sizeof(big_idx_t), beg_row, loc_nrows + 1)) != (size_t) loc_nrows + 1) {
        printf("Error! not enough rows\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    big_idx_t nnz = dist_row_ptr[loc_nrows] - dist_row_ptr[0];// 本进程负责的A00内一共的非零元数
    dist_col_idx = new idx_t [nnz];
    dist_vals    = new double   [nnz];

    sprintf(filename, "%s_Aj.bin", matname.c_str());
    if ((ret = read_binary(dist_col_idx, filename, sizeof(idx_t), dist_row_ptr[0], nnz)) != (size_t) nnz) {
        printf("Error! not enough dist_col_idx: %zu   %lld\n", ret, nnz);
        MPI_Abort(MPI_COMM_WORLD, 2);
    }
    
    sprintf(filename, "%s_Av.bin", matname.c_str());
    if ((ret = read_binary(dist_vals, filename, sizeof(double), dist_row_ptr[0], nnz)) != (size_t) nnz) {
        printf("Error! not enough dist_vals: %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 3);
    }
    check_input(dist_vals   , nnz);
}

void eliminateZeros(const idx_t loc_nrows, big_idx_t * & dist_row_ptr, idx_t * & dist_col_idx, double * & dist_vals)
{
    big_idx_t * cprs_row_ptr = new big_idx_t [loc_nrows + 1]; cprs_row_ptr[0] = dist_row_ptr[0];
    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < loc_nrows; i++) {
        big_idx_t cnt = 0;
        for (auto   p = dist_row_ptr[i  ] - dist_row_ptr[0];
                    p < dist_row_ptr[i+1] - dist_row_ptr[0]; p ++)
        {
            if (dist_vals[p] != 0.0) cnt ++;
        }
        cprs_row_ptr[i + 1] = cnt;
    }

    for (idx_t i = 0; i < loc_nrows; i++)
        cprs_row_ptr[i + 1] += cprs_row_ptr[i];

    const big_idx_t orig_nnz = dist_row_ptr[loc_nrows] - dist_row_ptr[0];
    const big_idx_t cprs_nnz = cprs_row_ptr[loc_nrows] - cprs_row_ptr[0];
    idx_t * cprs_col_idx = new idx_t [cprs_nnz];
    double* cprs_vals    = new double[cprs_nnz];

    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < loc_nrows; i++) {
        big_idx_t cnt = 0;
        big_idx_t offset = cprs_row_ptr[i] - cprs_row_ptr[0];
        for (auto   p = dist_row_ptr[i  ] - dist_row_ptr[0];
                    p < dist_row_ptr[i+1] - dist_row_ptr[0]; p ++)
        {
            if (dist_vals[p] != 0.0) {
                cprs_col_idx[offset + cnt] = dist_col_idx[p];
                cprs_vals   [offset + cnt] = dist_vals   [p];
                cnt ++;
            }
        }
        assert(cnt + cprs_row_ptr[i] == cprs_row_ptr[i + 1]);
    }
    delete [] dist_row_ptr; dist_row_ptr = cprs_row_ptr;
    delete [] dist_col_idx; dist_col_idx = cprs_col_idx;
    delete [] dist_vals   ; dist_vals    = cprs_vals;

    big_idx_t tmp_nnz[2] = { orig_nnz, cprs_nnz };
    big_idx_t glb_nnz[2];
    MPI_Reduce(tmp_nnz, glb_nnz, 2, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
    int my_pid; MPI_Comm_rank(MPI_COMM_WORLD, & my_pid);
    if (my_pid == 0) printf("Original %lld Compressed %lld reduction %.4f\n", glb_nnz[0], glb_nnz[1], (double)glb_nnz[1]/(double)glb_nnz[0]);
}

// // constexpr int water_well_bid = 68599;
// const idx_t glb_nblks = 1094426;

constexpr int water_well_bid = 84014869;
const idx_t glb_nblks = 143615993;

void trans2BSR(const idx_t csr_nrows, big_idx_t *   csr_rpt, idx_t *   csr_cid, double *   csr_vals,
                const idx_t bsize,    big_idx_t * & bsr_rpt, idx_t * & bsr_cid, double * & bsr_vals )
{
    assert(csr_nrows % bsize == 0);
    assert(csr_rpt[0] == 0);
    const idx_t bsr_nrows = csr_nrows / bsize;
    bsr_rpt = new big_idx_t [bsr_nrows + 1]; bsr_rpt[0] = 0;

    #pragma omp parallel for schedule(static)
    for (idx_t bi = 0; bi < bsr_nrows; bi ++) {
        std::set<idx_t> bjs;
        for (auto p = csr_rpt[bi * bsize]; p < csr_rpt[(bi + 1) * bsize]; p++) {
            const idx_t cj = csr_cid[p];
            const idx_t bj = cj / bsize; assert(0 <= bj && bj < glb_nblks);
            bjs.emplace(bj);
        }
        bsr_rpt[bi + 1] = bjs.size();
    }
    for (idx_t bi = 0; bi < bsr_nrows; bi ++) bsr_rpt[bi + 1] += bsr_rpt[bi];
    bsr_cid = new idx_t [bsr_rpt[bsr_nrows]];
    bsr_vals= new double[bsr_rpt[bsr_nrows] * bsize * bsize];

    #pragma omp parallel for schedule(static)
    for (idx_t bi = 0; bi < bsr_nrows; bi ++) {
        std::map<idx_t, idx_t> bjs;
        for (auto p = csr_rpt[bi * bsize]; p < csr_rpt[(bi + 1) * bsize]; p++) {
            const idx_t cj = csr_cid[p];
            const idx_t bj = cj / bsize; assert(0 <= bj && bj < glb_nblks);
            bjs.emplace(bj, -1);
        }
        assert((big_idx_t) bjs.size() + bsr_rpt[bi] == bsr_rpt[bi + 1]);

        idx_t cnt = 0;
        for (auto it = bjs.begin(); it != bjs.end(); it ++) {
            bsr_cid[bsr_rpt[bi] + cnt] = it->first;
            it->second = cnt;
            cnt ++;
        }

        double * vptr = bsr_vals + bsr_rpt[bi] * bsize * bsize;
        for (idx_t j = 0; j < cnt * bsize * bsize; j++)
            vptr[j] = 0.0;
        
        for (idx_t ci = bi * bsize; ci < (bi + 1) * bsize; ci ++) {
            const idx_t sub_i = ci - bi * bsize;
            for (auto p = csr_rpt[ci]; p < csr_rpt[ci + 1]; p++) {
                const idx_t cj = csr_cid[p];
                const idx_t bj = cj / bsize, sub_j = cj - bj * bsize;
                assert(bjs.find(bj) != bjs.end());
                vptr[bjs[bj] * bsize * bsize + sub_j * bsize + sub_i] = csr_vals[p];
            }
        }
    }
}


int main(int argc, char * argv[])
{
    setbuf(stdout, NULL);
    int my_pid, num_procs;
    constexpr int ndof = DOF;

    int argc_cnt = 1;
    const char * pathname = argv[argc_cnt ++];
    const std::string iter_name = std::string(argv[argc_cnt ++]);
    const std::string prec_name = std::string(argv[argc_cnt ++]);
    std::string config_mg_file;
    if (prec_name == "GMG") config_mg_file = std::string(argv[argc_cnt ++]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_pid);

    idx_t loc_nrows, glb_nrows = glb_nblks * ndof;
    idx_t loc_nblks = -1;
    idx_t ilower, iupper;// 本进程负责的起始行号 和末尾行号（闭区间）
    
    if (my_pid == 0) {
        printf("# MPI Procs %d # OMP Threads %d\n", num_procs, omp_get_max_threads());
        printf("using blk-partition!\n");
    }
    loc_nblks = glb_nblks / num_procs;
    idx_t iblk_lower = my_pid * loc_nblks;
    if (glb_nblks > loc_nblks * num_procs) {
        idx_t remain_nblks = glb_nblks - loc_nblks * num_procs;
        if (my_pid < remain_nblks) {
            loc_nblks++;
        }
        iblk_lower += MIN(my_pid, remain_nblks);
    }
    loc_nrows = loc_nblks * ndof;
    ilower = iblk_lower * ndof;
    iupper = ilower + loc_nrows - 1;

    if (my_pid == 0) printf("glb_nrows %d loc_nrows %d\n", glb_nrows, loc_nrows);
    if (my_pid == 0) printf("ilower %d iupper %d\n", ilower, iupper);

    if (my_pid == 0) printf("idx_t %lu bytes, big_idx_t %lu bytes real %lu bytes\n", 
        sizeof(idx_t), sizeof(big_idx_t), sizeof(double));
    size_t ret;
    // char pathname[100], filename[100];
    // sprintf(pathname, "%s/%s/%dx%dx%d", dir_name, case_name, gx, gy, gz);
    char filename[200];
    if (my_pid == 0) printf("reading data from %s\n", pathname);

    // 读入二进制的向量数据
    ksp_t * dist_b = (ksp_t *) malloc (loc_nrows * sizeof(ksp_t));
    ksp_t * dist_x = (ksp_t *) malloc (loc_nrows * sizeof(ksp_t));
    sprintf(filename, "%s/b.bin", pathname);
    if ((ret = read_binary(dist_b, filename, sizeof(ksp_t), ilower, loc_nrows, true)) != (size_t) loc_nrows) {
        printf("Error! not enough b %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 4);
    }
    #pragma omp parallel for schedule(static)
    for (big_idx_t i = 0; i < loc_nrows; i++)
        dist_x[i] = 0.0;

    check_input(dist_b, loc_nrows);
    check_input(dist_x, loc_nrows);

    
    // 读入二进制的矩阵数据
    big_idx_t * dist_row_ptr = (big_idx_t *) malloc(sizeof(big_idx_t) * (loc_nrows+1));// 分布式存储的行指针（数值为全局号）
    sprintf(filename, "%s/Ai.bin", pathname);
    if ((ret = read_binary(dist_row_ptr, filename, sizeof(big_idx_t), ilower, loc_nrows+1)) != (size_t) loc_nrows+1) {
        printf("Error! not enough rows\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    const big_idx_t loc_nnz = dist_row_ptr[loc_nrows] - dist_row_ptr[0];// 本进程负责的行内一共的非零元数
    
    if (my_pid == 0) printf(" row_ptr[-1]: %lld row_ptr[-2]: %lld row_ptr[-3]: %lld\n", dist_row_ptr[loc_nrows], dist_row_ptr[loc_nrows-1], dist_row_ptr[loc_nrows-2]);

    // printf(" proc %d il %d iu %d nnz %d start %d\n", my_pid, ilower, iupper, loc_nnz, dist_row_ptr[0]);
    idx_t * dist_col_idx = (idx_t *) malloc(loc_nnz * sizeof(idx_t));// 分布式存储的列序号（数值为全局号）
    ksp_t * dist_vals    = (ksp_t *) malloc(loc_nnz * sizeof(ksp_t));
    sprintf(filename, "%s/Aj.bin", pathname);
    if ((ret = read_binary(dist_col_idx, filename, sizeof(int), dist_row_ptr[0], loc_nnz)) != (size_t) loc_nnz) {
        printf("Error! not enough dist_col_idx: %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 2);
    }
    MPI_Barrier(MPI_COMM_WORLD);
    sprintf(filename, "%s/Av.bin", pathname);
    if ((ret = read_binary(dist_vals, filename, sizeof(ksp_t), dist_row_ptr[0], loc_nnz)) != (size_t) loc_nnz) {
        printf("Error! not enough dist_vals: %zu\n", ret);
        MPI_Abort(MPI_COMM_WORLD, 3);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    check_input(dist_vals   , loc_nnz);
    MPI_Barrier(MPI_COMM_WORLD);

    big_idx_t * dist_bsr_rpt = nullptr;
    idx_t * dist_bsr_cid = nullptr;
    double* dist_bsr_val = nullptr;
    big_idx_t offset = dist_row_ptr[0];
    for (idx_t i = 0; i <= loc_nrows; i++)// 修改成符合trans2BSR输入的CSR
        dist_row_ptr[i] -= offset;

    trans2BSR(loc_nrows, dist_row_ptr, dist_col_idx, dist_vals, ndof, dist_bsr_rpt, dist_bsr_cid, dist_bsr_val);
    free(dist_row_ptr); free(dist_col_idx); free(dist_vals);

    // 修改成符合set_values_distributed的输入
    big_idx_t loc_bsr_nnz = dist_bsr_rpt[loc_nblks], prev_bsr_nnz;
    MPI_Scan(& loc_bsr_nnz, & prev_bsr_nnz, 1, sizeof(big_idx_t) == 8 ? MPI_LONG_LONG : MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    prev_bsr_nnz -= loc_bsr_nnz;

    if (my_pid == 0) printf("Scan done\n");

    for (idx_t i = 0; i <= loc_nblks; i++)
        dist_bsr_rpt[i] += prev_bsr_nnz;
    
    const int test_cnt = 3;
    std::vector<TEST_RECORD> records;
    for (int test = 0; test < test_cnt; test ++) {
        par_CSRMatrix<idx_t, ksp_t, ksp_t, 3> my_A3(MPI_COMM_WORLD,
            glb_nblks, iblk_lower, iblk_lower + loc_nblks,
            glb_nblks, iblk_lower, iblk_lower + loc_nblks);
        par_Vector<idx_t, ksp_t, 3>    my_x3(MPI_COMM_WORLD, glb_nblks, iblk_lower, iblk_lower + loc_nblks),
                                        my_b3(MPI_COMM_WORLD, glb_nblks, iblk_lower, iblk_lower + loc_nblks),
                                        my_y3(MPI_COMM_WORLD, glb_nblks, iblk_lower, iblk_lower + loc_nblks);
        assert((idx_t) my_b3.local_vector->tot_len == loc_nrows);
        for (long long i = 0; i < my_b3.local_vector->tot_len; i++) {
            my_b3.local_vector->data[i] = dist_b[i];
            my_x3.local_vector->data[i] = 0.0;
        }
        my_A3.set_values_distributed(dist_bsr_rpt, dist_bsr_cid, dist_bsr_val);
        {// 自己做点积
            double my_b_dot = vec_dot<idx_t, ksp_t, double>(my_b3, my_b3);
            double my_x_dot = vec_dot<idx_t, ksp_t, double>(my_x3, my_x3);
            if (my_pid == 0) {
                printf("My  calc dot\n");
                printf("(  b,   b) = %.15e\n",  my_b_dot);
                printf("(  x,   x) = %.15e\n",  my_x_dot);
            }
#ifdef PROFILE
            const int __test_cnt = 100;
            double t_perf_spmv = wall_time();
            for (int __test = 0; __test < __test_cnt; __test ++) {
#endif
            my_A3.Mult(1.0, my_b3, 0.0, my_x3, my_x3);// my_x <- A*my_b
#ifdef PROFILE
            }
            t_perf_spmv = wall_time() - t_perf_spmv;
            printf("Proc %d perf_spmv %.5f s per count\n", my_pid, t_perf_spmv / __test_cnt);
#endif
            my_x_dot = vec_dot<idx_t, ksp_t, double>(my_x3, my_x3);
            if (my_pid == 0) {
                printf("(A*b, A*b) = %.15e\n",  my_x_dot);
            }
        }

        my_x3.set_val(0.0);
        
        TEST_CONFIG config;
        config.config_mg_file = config_mg_file;
        config.restart_len = 30;
        config.max_iter = 100;
        config.rtol = 1.0e-6;
        TEST_RECORD rec;

        if (my_pid == 0) {
            printf("%s\n", config.config_mg_file.c_str());
        }
        MPI_Barrier(MPI_COMM_WORLD);

        buildup_solver(iter_name, prec_name, config);
        setup_and_solve(my_A3, my_b3, my_x3, rec);

        double true_r_norm, b_norm;
        check_residual(my_A3, my_x3, my_b3, my_y3, true_r_norm, b_norm);

        if (my_pid == 0) {
            printf("\033[1;35mtrue ||r|| = %20.16e ||r||/||b||= %20.16e\033[0m\n", 
                true_r_norm, true_r_norm / b_norm);
            printf("Proc %d Setup, Solve costs %.6f %.6f s\n",
                my_pid, rec.setup, rec.solve);
        }

        stat_part_times(rec);
        records.push_back(rec);

        destroy_solver();

        if (test == test_cnt - 1) {// 将解向量写出文件
            double * send_buf = new double [loc_nrows];

            #pragma omp parallel for schedule(static)
            for (idx_t i = 0; i < loc_nrows; i++) {
                send_buf[i] = my_x3.local_vector->data[i];
            }
            double * sol_buf = nullptr;
            int * recv_cnts = nullptr, * displs = nullptr;
            if (my_pid == 0) {
                sol_buf = new double [glb_nrows];
                recv_cnts = new int [num_procs];
                displs    = new int [num_procs + 1];
            }
            MPI_Gather(&loc_nrows, 1, sizeof(idx_t) == 4 ? MPI_INT : MPI_LONG_LONG,
                        recv_cnts, 1, sizeof(idx_t) == 4 ? MPI_INT : MPI_LONG_LONG, 0, MPI_COMM_WORLD);

            if (my_pid == 0) {
                displs[0] = 0;
                for (int p = 0; p < num_procs; p++) {
                    displs[p + 1] = displs[p] + recv_cnts[p];
                }
                assert(displs[num_procs] == glb_nrows);
            }

            MPI_Gatherv(send_buf, loc_nrows, MPI_DOUBLE,
                        sol_buf, recv_cnts, displs, MPI_DOUBLE, 0, MPI_COMM_WORLD);

            if (my_pid == 0) {
                const std::string solu_name = std::string(pathname) + "/x.bin";
                printf("Writing solution to %s\n", solu_name.c_str());
                FILE * fp = fopen(solu_name.c_str(), "wb+");
                ret = fwrite(sol_buf, sizeof(*sol_buf), glb_nrows, fp);
                fclose(fp);
                // printf("%.4e %.4e %.4e\n", sol_buf[0], sol_buf[12], sol_buf[25]);
            }
            delete [] send_buf;
            delete [] sol_buf;
            delete [] recv_cnts; delete [] displs;
        }
    }

    // 输出最佳时间
    if (my_pid == 0) {
        stat_multi_runs(records);
    }
    delete [] dist_bsr_rpt; delete [] dist_bsr_cid; delete [] dist_bsr_val;
    free(dist_b); free(dist_x);

    MPI_Finalize();

    return 0;
}