#include "global_LU.hpp"

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
GlobalLU<idx_t, data_t, setup_t, calc_t, dof>::GlobalLU(): Solver<idx_t, setup_t, calc_t, dof>() {}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
GlobalLU<idx_t, data_t, setup_t, calc_t, dof>::~GlobalLU()
{
    if (displs != nullptr) { delete [] displs; displs = nullptr; }
    if (recvcnts != nullptr) { delete [] recvcnts; recvcnts = nullptr; }
    if (vec_buf != nullptr) { delete [] vec_buf; vec_buf = nullptr; }
    if (global_x != nullptr) { delete [] global_x; global_x = nullptr; }
    if (global_b != nullptr) { delete [] global_b; global_b = nullptr; }
    if (proc_map != nullptr) { delete [] proc_map; proc_map = nullptr; }
    if (proc_row_ids != nullptr) { delete [] proc_row_ids; proc_row_ids = nullptr; }
    if (glbA_csr != nullptr) { delete glbA_csr; glbA_csr = nullptr; }
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void GlobalLU<idx_t, data_t, setup_t, calc_t, dof>::truncate() {}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void GlobalLU<idx_t, data_t, setup_t, calc_t, dof>::SetOperator(const Operator<idx_t, setup_t, dof> & op)
{
    assert(this->setup_called == false);
    this->setup_time = - wall_time();
    const par_CSRMatrix<idx_t, setup_t, setup_t, dof> & par_A = (const par_CSRMatrix<idx_t, setup_t, setup_t, dof> &) op;
    
    int my_pid; MPI_Comm_rank(par_A.comm, & my_pid);
    if (my_pid == 0) printf("Setup Glb_LU #dof %d\n", dof);

    static_assert(sizeof(idx_t) == 4 || sizeof(idx_t) == 8);
    constexpr MPI_Datatype mpi_idx_type = (sizeof(idx_t) == 4) ? MPI_FLOAT : MPI_DOUBLE;
    constexpr int e_size = dof*dof;

    int glb_pid; MPI_Comm_rank(par_A.comm, &glb_pid);
    int glb_np ; MPI_Comm_size(par_A.comm, &glb_np );
    root_pid = 0;
    
    std::vector<idx_t> my_row_idx;// 记录我负责的每一行的全局行序号
    std::vector<idx_t> loc_row_ptr;
    std::vector<idx_t> loc_col_idx;
    std::vector<calc_t> loc_vals;

    my_nrows  = par_A.diag.nrows * dof;// 本进程一共负责了的行数（即所有block所有box加起来一共），已经乘以每个位置的自由度数dof
    glb_nrows = par_A. glb_nrows * dof;
    
    // 组装本进程的并行CSR矩阵时需要对角块和非对角块两者
    assert(par_A.diag.nrows == par_A.offd.nrows);
    loc_row_ptr.resize(my_nrows + 1);
    loc_row_ptr[0] = 0;
    my_row_idx .resize(my_nrows);
    #pragma omp parallel for schedule(static)
    for (idx_t bi = 0; bi < par_A.diag.nrows; bi ++) {
        idx_t cnts[dof];
        for (int f = 0; f < dof; f++) cnts[f] = 0;
        for (int f = 0; f < dof; f++) my_row_idx[bi * dof + f] = (par_A.beg_row + bi) * dof + f;

        // 遍历对角部分
        for (auto p = par_A.diag.row_ptr[bi]; p < par_A.diag.row_ptr[bi + 1]; p++) {
            const setup_t * bv = par_A.diag.vals + p * e_size;
            for (int r = 0; r < dof; r++)
            for (int c = 0; c < dof; c++) {
#ifdef COL_MAJOR
                const setup_t cv = bv[c * dof + r];
#else // ROW_MAJOR
                const setup_t cv = bv[r * dof + c];
#endif
                if (cv != 0.0) cnts[r] ++;
            }
        }// diag loop
        // 遍历非对角部分
        for (auto p = par_A.offd.row_ptr[bi]; p < par_A.offd.row_ptr[bi + 1]; p++) {
            const setup_t * bv = par_A.offd.vals + p * e_size;
            for (int r = 0; r < dof; r++)
            for (int c = 0; c < dof; c++) {
#ifdef COL_MAJOR
                const setup_t cv = bv[c * dof + r];
#else // ROW_MAJOR
                const setup_t cv = bv[r * dof + c];
#endif
                if (cv != 0.0) cnts[r] ++;
            }
        }// offd loop

        for (int f = 0; f < dof; f++)
            loc_row_ptr[bi * dof + f + 1] = cnts[f];
    }// bi loop

    for (idx_t i = 0; i < my_nrows; i++)
        loc_row_ptr[i + 1] += loc_row_ptr[i];

    loc_col_idx.resize(loc_row_ptr[my_nrows]);
    loc_vals   .resize(loc_row_ptr[my_nrows]);

    // 填充数值
    #pragma omp parallel for schedule(static)
    for (idx_t bi = 0; bi < par_A.diag.nrows; bi ++) {
        idx_t cnts[dof];
        for (int f = 0; f < dof; f++) cnts[f] = 0;
        const idx_t * offsets = & loc_row_ptr[bi * dof];

        // 遍历对角部分
        for (auto p = par_A.diag.row_ptr[bi]; p < par_A.diag.row_ptr[bi + 1]; p++) {
            const idx_t bj = par_A.diag.col_idx[p] + par_A.beg_col;// 注意这里需要转换为全局块列号
            const setup_t * bv = par_A.diag.vals + p * e_size;
            for (int r = 0; r < dof; r++)
            for (int c = 0; c < dof; c++) {
#ifdef COL_MAJOR
                const setup_t cv = bv[c * dof + r];
#else // ROW_MAJOR
                const setup_t cv = bv[r * dof + c];
#endif
                if (cv != 0.0) {
                    loc_col_idx[offsets[r] + cnts[r]] = bj * dof + c;
                    loc_vals   [offsets[r] + cnts[r]] = cv;
                    cnts[r] ++;
                }
            }
        }// diag loop
        // 遍历非对角部分
        for (auto p = par_A.offd.row_ptr[bi]; p < par_A.offd.row_ptr[bi + 1]; p++) {
            const idx_t bj = par_A.col_map_offd[par_A.offd.col_idx[p]];// 注意这里需要转换为全局块列号
            const setup_t * bv = par_A.offd.vals + p * e_size;
            for (int r = 0; r < dof; r++)
            for (int c = 0; c < dof; c++) {
#ifdef COL_MAJOR
                const setup_t cv = bv[c * dof + r];
#else // ROW_MAJOR
                const setup_t cv = bv[r * dof + c];
#endif
                if (cv != 0.0) {
                    loc_col_idx[offsets[r] + cnts[r]] = bj * dof + c;
                    loc_vals   [offsets[r] + cnts[r]] = cv;
                    cnts[r] ++;
                }
            }
        }// offd loop

        for (int f = 0; f < dof; f++) {
            assert(offsets[f] + cnts[f] == offsets[f + 1]);
            // 行内的列序号升序排列
            sort_indices_with_vals<idx_t, calc_t, 1>(cnts[f], & loc_col_idx[offsets[f]], & loc_vals[offsets[f]]);
        }

    }// bi loop

    // 所有进程都开辟发送接收缓冲区
    vec_buf = new calc_t [my_nrows];

    idx_t my_nnz = loc_vals.size();
    glb_nrows = 0;
    idx_t glb_nnz = 0;
    idx_t * proc_nrows = nullptr, * proc_nnz = nullptr;
    size_t recvbuf_len = 0;
    {// 集合通信获知每个进程负责了多少行和多少非零元
        idx_t send_buf[2] = { my_nrows, my_nnz };
        idx_t * recv_buf = nullptr;
        if (glb_pid == 0) {
            recv_buf = new idx_t [glb_np * 2];
            recvcnts = new int [glb_np];
            displs   = new int [glb_np];
        }
        MPI_Gather( send_buf, 2, mpi_idx_type,
                    recv_buf, 2, mpi_idx_type, 0, par_A.comm);
        if (glb_pid == 0) {
            proc_nrows = new idx_t [glb_np];
            proc_nnz   = new idx_t [glb_np];
            for (int p = 0; p < glb_np; p++) {
                proc_nrows[p] = recv_buf[p*2 + 0];
                proc_nnz  [p] = recv_buf[p*2 + 1];
                glb_nrows += proc_nrows[p];
                glb_nnz   += proc_nnz  [p];
                recvcnts[p] = sizeof(idx_t)  * (proc_nrows[p] + 1)
                            + sizeof(idx_t)  * proc_nnz[p]
                            + sizeof(calc_t) * proc_nnz[p]
                            + sizeof(idx_t)  * proc_nrows[p] ;
                displs[p] = (p == 0) ? 0 : (displs[p - 1] + recvcnts[p - 1]);
                recvbuf_len += recvcnts[p];
                // printf("Proc %d nrows %d nnz %d displs %d cnts %d\n", p, proc_nrows[p], proc_nnz[p], displs[p], recvcnts[p]);
            }
            delete [] recv_buf; recv_buf = nullptr;
            assert(glb_nrows == par_A.glb_nrows * dof);
            printf("Assembled global matrix: %d nrows %d nnz\n", glb_nrows, glb_nnz);
        }
    }
    
    const size_t sendbuf_len = sizeof(idx_t)  * (my_nrows + 1)
                            +  sizeof(idx_t)  * my_nnz
                            +  sizeof(calc_t) * my_nnz
                            +  sizeof(idx_t)  * my_nrows ;
    
    void * send_buf = malloc( sendbuf_len );
    void * recv_buf = nullptr;
    idx_t * send_row_ptr = (idx_t*)  send_buf;
    idx_t * send_col_idx = (idx_t*) ( (char*) send_row_ptr + sizeof(idx_t)  * (my_nrows + 1) );
    calc_t* send_vals    = (calc_t*)( (char*) send_col_idx + sizeof(idx_t)  * my_nnz );
    idx_t * send_row_idx = (idx_t*) ( (char*) send_vals    + sizeof(calc_t) * my_nnz );

    // 将数据拷贝进入发送缓冲区
    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < my_nrows; i++) {
        send_row_ptr[i] = loc_row_ptr[i];
        send_row_idx[i] = my_row_idx [i];
    }
    send_row_ptr[my_nrows] = loc_row_ptr[my_nrows];

    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < my_nnz; i++) {
        send_col_idx[i] = loc_col_idx[i];
        send_vals   [i] = loc_vals   [i];
    }

    // printf("Proc %d send buf %ld bytes recv buf %ld bytes\n", glb_pid, sendbuf_len, recvbuf_len);
    if (glb_pid == 0) {
        recv_buf = malloc( recvbuf_len );
    }

    MPI_Gatherv(send_buf, sendbuf_len, MPI_BYTE, recv_buf, recvcnts, displs, MPI_BYTE, 0, par_A.comm);

    if (glb_pid == 0) {
        proc_map = new idx_t [glb_np + 1]; proc_map[0] = 0;
        proc_row_ids = new idx_t [glb_nrows];
        for (int p = 0; p < glb_np; p++) proc_map[p + 1] = proc_map[p] + proc_nrows[p];
        global_b = new calc_t [glb_nrows];
        global_x = new calc_t [glb_nrows];
        // 根据收集回来的数据进行组装
        idx_t * glb_row_ptr = new idx_t [glb_nrows + 1];
        idx_t * glb_col_idx = new idx_t [glb_nnz];
        calc_t* glb_vals    = new calc_t[glb_nnz];
        // 先确定每行有多少个非零元
        #pragma omp parallel for schedule(dynamic,1)
        for (int p = 0; p < glb_np; p++) {
            void * proc_buf = (void*)( (char*) recv_buf + displs[p] );// 第p个进程发送过来的数据的起始位置
            idx_t * proc_row_ptr = (idx_t*) proc_buf;
            idx_t * proc_col_idx = (idx_t*) (((char*) proc_row_ptr) + sizeof(idx_t) * (proc_nrows[p] + 1) );
            calc_t* proc_vals    = (calc_t*)(((char*) proc_col_idx) + sizeof(idx_t) * (proc_nnz[p]) );
            idx_t * proc_row_idx = (idx_t*) (((char*) proc_vals   ) + sizeof(calc_t)* (proc_nnz[p]) );
            for (idx_t i = 0; i < proc_nrows[p]; i++) {
                const idx_t rid = proc_row_idx[i];
                assert(rid < glb_nrows);
                proc_row_ids[proc_map[p] + i] = rid;// 记录一下这个进程所发的第i行对应于全局的哪一行
                const idx_t row_nnz = proc_row_ptr[i+1] - proc_row_ptr[i];// 该行的非零元
                glb_row_ptr[rid] = row_nnz;
                // printf("Row %d from Proc %d : %d\n", rid, p, row_nnz);
            }
        }
        idx_t accum_nnz = 0;
        // 计算全局矩阵的行偏移
        for (idx_t i = 0; i < glb_nrows; i++) {
            idx_t row_nnz = glb_row_ptr[i];
            glb_row_ptr[i] = accum_nnz;
            accum_nnz += row_nnz;
        }
        assert(accum_nnz == glb_nnz);
        glb_row_ptr[glb_nrows] = accum_nnz;
        // 填充全局矩阵的列序号和非零值
        #pragma omp parallel for schedule(dynamic,1)
        for (int p = 0; p < glb_np; p++) {
            void * proc_buf = (void*)( (char*) recv_buf + displs[p] );// 第p个进程发送过来的数据的起始位置
            idx_t * proc_row_ptr = (idx_t*) proc_buf;
            idx_t * proc_col_idx = (idx_t*) (((char*) proc_row_ptr) + sizeof(idx_t) * (proc_nrows[p] + 1) );
            calc_t* proc_vals    = (calc_t*)(((char*) proc_col_idx) + sizeof(idx_t) * (proc_nnz[p]) );
            idx_t * proc_row_idx = (idx_t*) (((char*) proc_vals   ) + sizeof(calc_t)* (proc_nnz[p]) );
            for (idx_t i = 0; i < proc_nrows[p]; i++) {
                const idx_t rid = proc_row_idx[i];
                const idx_t row_nnz = proc_row_ptr[i+1] - proc_row_ptr[i];// 该行的非零元
                assert(row_nnz == glb_row_ptr[rid+1] - glb_row_ptr[rid]);
                const idx_t src_pos = proc_row_ptr[i],
                            dst_pos =  glb_row_ptr[rid];
                for (idx_t k = 0; k < row_nnz; k++) {
                    glb_col_idx[dst_pos + k] = proc_col_idx[src_pos + k];
                    glb_vals   [dst_pos + k] = proc_vals   [src_pos + k];
                }
            }
        }
        // 至此完成整个CSR矩阵的组装
#ifdef DEBUG_PRINT
        for (idx_t i = 0; i < glb_nrows; i++) {
            printf("%d:\n", i);
            for (idx_t p = glb_row_ptr[i]; p < glb_row_ptr[i+1]; p++) {
                printf("%d %.5e\n", glb_col_idx[p], glb_vals[p]);
            }
            printf("\n");
        }
#endif
        glbA_csr = new CSR_sparseMat<idx_t, calc_t, calc_t>(root_pid, glb_nrows, glb_row_ptr, glb_col_idx, glb_vals);
        // blkA_csr->fprint_COO("sparseA.txt");

        bool succ;
        succ = glbA_csr->initA(0, CSR_sparseMat<idx_t, calc_t, calc_t>::PATTERN::NONSYMMETRIC); assert(succ);
        succ = glbA_csr->initB(1, 0); assert(succ);
        succ = glbA_csr->initX(1, 0); assert(succ);
        succ = glbA_csr->initSolver(omp_get_max_threads(), 1, 0); assert(succ);
        // 符号分析时间不计入
        this->setup_time += wall_time();
        glbA_csr->analyse(1, 0, 1, omp_get_max_threads(), 1);
        if (my_pid == 0) printf("Analyze done.\n");
        this->setup_time -= wall_time();

        glbA_csr->factorize(1e-4);
        if (my_pid == 0) printf("Factorize done.\n");
        // 重新组织偏移和接收数目
        for (int p = 0; p < glb_np; p++) {
            recvcnts[p] = proc_nrows[p];
            displs[p] = (p == 0) ? 0 : (displs[p - 1] + recvcnts[p - 1]);
        }

        delete [] proc_nrows; delete [] proc_nnz;
        free(recv_buf);
    }// Root process perform LU factorization
    free(send_buf);

    this->setup_called = true;
    this->setup_time += wall_time();
}

template<typename idx_t, typename data_t, typename setup_t, typename calc_t, int dof>
void GlobalLU<idx_t, data_t, setup_t, calc_t, dof>::Mult_ipl(const par_Vector<idx_t, calc_t, dof> & input, 
    par_Vector<idx_t, calc_t, dof> & output) const
{    
    static_assert(sizeof(calc_t) == 4 || sizeof(calc_t) == 8);
    constexpr MPI_Datatype etype = (sizeof(calc_t) == 4) ? MPI_FLOAT : MPI_DOUBLE;
    int glb_pid; MPI_Comm_rank(input.comm, &glb_pid);

    if (this->zero_guess) {

        MPI_Gatherv(input.local_vector->data, my_nrows, etype, global_b, recvcnts, displs, etype, 0, input.comm);

        if (glb_pid == 0) {
#if 0 // check
            calc_t * Ab_res = new calc_t [glb_nrows];
            #pragma omp parallel for schedule(static)
            for (idx_t i = 0; i < glb_nrows; i++) {
                calc_t tmp = 0.0;
                for (auto p = glbA_csr->row_ptr[i]; p < glbA_csr->row_ptr[i + 1]; p++) {
                    const idx_t j = glbA_csr->col_idx[p];
                    tmp += glbA_csr->vals[p] * global_b[j];
                }
                Ab_res[i] = tmp;
            }

            double ck_b_dot = 0.0, ck_Ab_dot = 0.0;
            #pragma omp parallel for schedule(static) reduction(+:ck_b_dot, ck_Ab_dot)
            for (idx_t i = 0; i < glb_nrows; i++) {
                ck_b_dot  += global_b[i] * global_b[i];
                ck_Ab_dot += Ab_res  [i] * Ab_res  [i];
            }
            printf("( b,  b) = %.15e\n", ck_b_dot);
            printf("(Ab, Ab) = %.15e\n", ck_Ab_dot);
#endif
            glbA_csr->apply(global_b, global_x, 0);
        }

        MPI_Scatterv(global_x, recvcnts, displs, etype, output.local_vector->data, my_nrows, etype, 0, output.comm);
    }
    // 否则直接跳过
}

template class GlobalLU<int, double, double, double, 1>;
template class GlobalLU<int, double, double, double, 2>;
template class GlobalLU<int, double, double, double, 3>;
template class GlobalLU<int, double, double, double, 4>;

template class GlobalLU<int, float , double, double, 1>;
template class GlobalLU<int, float , double, double, 2>;
template class GlobalLU<int, float , double, double, 3>;
template class GlobalLU<int, float , double, double, 4>;

template class GlobalLU<int, float , double, float , 1>;
template class GlobalLU<int, float , double, float , 2>;
template class GlobalLU<int, float , double, float , 3>;
template class GlobalLU<int, float , double, float , 4>;

template class GlobalLU<int, float , float , float , 1>;
template class GlobalLU<int, float , float , float , 2>;
template class GlobalLU<int, float , float , float , 3>;
template class GlobalLU<int, float , float , float , 4>;
#ifdef USE_FP16
template class GlobalLU<int, __fp16, double, float , 1>;
template class GlobalLU<int, __fp16, double, float , 2>;
template class GlobalLU<int, __fp16, double, float , 3>;
template class GlobalLU<int, __fp16, double, float , 4>;

template class GlobalLU<int, __fp16, float , float , 1>;
template class GlobalLU<int, __fp16, float , float , 2>;
template class GlobalLU<int, __fp16, float , float , 3>;
template class GlobalLU<int, __fp16, float , float , 4>;
#endif