#include "par_CSRCommPkg.hpp"
#include "_hypre_parcsr_mv.h"

template<typename idx_t>
par_CSRCommPkg<idx_t>::par_CSRCommPkg()
{
}

template<typename idx_t>
par_CSRCommPkg<idx_t>::~par_CSRCommPkg()
{
    delete [] send_pids; delete [] send_map_starts; delete [] send_map_elmts;
    delete [] recv_pids; delete [] recv_vec_starts;

    delete [] send_reqs;
    delete [] recv_reqs;
}


template<typename idx_t>
void par_CSRCommPkg<idx_t>::prepare(const idx_t num_needs, const idx_t * glb_ids, const idx_t * partitions)
{
    assert(num_needs >= 0);
    assert(glb_ids);// 要求已经有了非对角部分的列映射，且要求升序排列
    for (idx_t j = 1; j < num_needs; j++) assert(glb_ids[j - 1] < glb_ids[j]);
    assert(partitions);
    assert(comm != MPI_COMM_NULL);
    int my_pid; MPI_Comm_rank(comm, & my_pid);
    int nprocs; MPI_Comm_size(comm, & nprocs);
    const idx_t beg_col = partitions[my_pid];

    idx_t * & recv_cnts = recv_vec_starts; assert(recv_cnts == nullptr);
    idx_t * & send_cnts = send_map_starts; assert(send_cnts == nullptr);
    assert(recv_cnts == nullptr && send_cnts == nullptr);
    recv_cnts = new idx_t [nprocs + 1];
    send_cnts = new idx_t [nprocs + 1];
    for (int p = 0; p <= nprocs; p++) {
        recv_cnts[p] = 0;// 先置零
        send_cnts[p] = 0;
    }
    // 注意：glb_ids一定是升序的！
    // 确定全局序号到(进程号, 局部序号)的映射
    for (idx_t lid = 0; lid < num_needs; lid++) {
        idx_t gid = glb_ids[lid];// global column id
        int pid = MPI_PROC_NULL;
        for (int src_pid = 0; src_pid < nprocs; src_pid++) {// 找到要从哪个进程收
            if (gid >= partitions[src_pid] && gid < partitions[src_pid + 1]) {
                recv_cnts[src_pid] ++;
                pid = src_pid;
                break;
            }
        }
        assert(pid != MPI_PROC_NULL);
    }

    assert(send_reqs == nullptr);
    assert(recv_reqs == nullptr);
    send_reqs = new MPI_Request [nprocs];
    recv_reqs = new MPI_Request [nprocs];
    for (int p = 0; p < nprocs; p++) {
        MPI_Isend(&recv_cnts[p], 1, MPI_INT, p, 11, comm, send_reqs + p);
        MPI_Irecv(&send_cnts[p], 1, MPI_INT, p, 11, comm, recv_reqs + p);
    }
    MPI_Waitall(nprocs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(nprocs, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    for (int p = 0; p < nprocs; p++) {
        num_recvs += (recv_cnts[p] > 0);
        num_sends += (send_cnts[p] > 0);
    }
    recv_pids = new int [num_recvs];
    send_pids = new int [num_sends];

    // 前缀和
    idx_t prefix_sum[2] = {0, 0};
    num_recvs = num_sends = 0;
    for (int p = 0; p < nprocs; p++) {
        if (recv_cnts[p] > 0) recv_pids[num_recvs ++] = p;
        if (send_cnts[p] > 0) send_pids[num_sends ++] = p;

        const idx_t tmp[2] = {recv_cnts[p], send_cnts[p]};
        recv_cnts[p] = prefix_sum[0];
        send_cnts[p] = prefix_sum[1];
        prefix_sum[0] += tmp[0];
        prefix_sum[1] += tmp[1];
    }
    recv_cnts[nprocs] = prefix_sum[0];
    send_cnts[nprocs] = prefix_sum[1];

    // 此时要从p号进程收取recv_cnts[p+1] - recv_cnts[p]个元素
    // 这些元素的全局索引号是 col_map_offd[recv_cnts[p]] ~ col_map_offd[recv_cnts[p+1]]
    // 要向p号进程发送send_cnts[p+1] - send_cnts[p]个元素
    // 这些元素的全局索引号是 send_col_ids[send_cnts[p]] ~ send_col_ids[send_cnts[p+1]] 但具体的值仍然未知
    idx_t * & send_col_ids = send_map_elmts; assert(send_col_ids == nullptr);
    send_col_ids = new idx_t [send_cnts[nprocs]];

    send_reqs = new MPI_Request [num_recvs];
    recv_reqs = new MPI_Request [num_sends];
    for (idx_t r = 0; r < num_recvs; r++) {
        const int src_pid = recv_pids[r];
        const idx_t num = recv_cnts[src_pid + 1] - recv_cnts[src_pid]; //assert(num > 0);
        const idx_t * send_buf = glb_ids + recv_cnts[src_pid];
        MPI_Isend(send_buf, num, MPI_INT, src_pid, 22, comm, send_reqs + r);
    }
    for (idx_t s = 0; s < num_sends; s++) {
        const int dst_pid = send_pids[s];
        const idx_t num = send_cnts[dst_pid + 1] - send_cnts[dst_pid]; //assert(num > 0);
        idx_t * recv_buf = send_col_ids + send_cnts[dst_pid];
        MPI_Irecv(recv_buf, num, MPI_INT, dst_pid, 22, comm, recv_reqs + s);
    }
    MPI_Waitall(num_recvs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(num_sends, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    // 将要发送的列号改为局部行号
    for (idx_t i = 0; i < send_cnts[nprocs]; i++) {// 注意此时send_cnts尚未收缩
        send_col_ids[i] -= beg_col;
    }

    // 重组一下，因为有大量的进程并不是邻居
    idx_t * shrinked_recv_cnts = new idx_t [num_recvs + 1]; shrinked_recv_cnts[0] = 0;
    idx_t * shrinked_send_cnts = new idx_t [num_sends + 1]; shrinked_send_cnts[0] = 0;
    for (int p = 0, tr = 0, ts = 0; p < nprocs; p++) {
        if (recv_cnts[p + 1] > recv_cnts[p]) {
            // assert(recv_pids[tr] == p);
            shrinked_recv_cnts[tr + 1] = recv_cnts[p + 1];
            tr ++;
        }
        if (send_cnts[p + 1] > send_cnts[p]) {
            // assert(send_pids[ts] == p);
            shrinked_send_cnts[ts + 1] = send_cnts[p + 1];
            ts ++;
        }
    }
    delete [] recv_cnts; recv_cnts = shrinked_recv_cnts; shrinked_recv_cnts = nullptr;
    delete [] send_cnts; send_cnts = shrinked_send_cnts; shrinked_send_cnts = nullptr;

    // 新建非阻塞通信句柄和缓冲区以便后续使用
    send_reqs = new MPI_Request [num_sends];
    recv_reqs = new MPI_Request [num_recvs];
}

template<typename idx_t>
void par_CSRCommPkg<idx_t>::copy_hypre(const hypre_ParCSRCommPkg hypre_pkg)
// void par_CSRCommPkg<idx_t>::copy_hypre(const void * hypre_ptr)
{
    // const hypre_ParCSRCommPkg hypre_pkg = *((const hypre_ParCSRCommPkg *) hypre_ptr);
    assert(hypre_pkg.num_components == 1);
    comm = hypre_pkg.comm;
    num_sends = hypre_pkg.num_sends;
    num_recvs = hypre_pkg.num_recvs;
    send_pids = new int [num_sends]; send_map_starts = new idx_t [num_sends + 1];
    recv_pids = new int [num_recvs]; recv_vec_starts = new idx_t [num_recvs + 1];

    for (int s = 0; s < num_sends; s++) {
        send_pids      [s] = hypre_pkg.send_procs     [s];
        send_map_starts[s] = hypre_pkg.send_map_starts[s];
    }
    send_map_starts[num_sends] = hypre_pkg.send_map_starts[num_sends];

    for (int r = 0; r < num_recvs; r++) {
        recv_pids      [r] = hypre_pkg.recv_procs     [r];
        recv_vec_starts[r] = hypre_pkg.recv_vec_starts[r];
    }
    recv_vec_starts[num_recvs] = hypre_pkg.recv_vec_starts[num_recvs];

    send_map_elmts = new idx_t [ send_map_starts[num_sends] ];
    #pragma omp parallel for schedule(static)
    for (idx_t j = 0; j < send_map_starts[num_sends]; j++) {
        send_map_elmts[j] = hypre_pkg.send_map_elmts[j];
    }

    send_reqs = new MPI_Request [num_sends];
    recv_reqs = new MPI_Request [num_recvs];
}

template<typename idx_t>
void par_CSRCommPkg<idx_t>::init_update_halo(const int dof, idx_t * send_buf, idx_t * recv_vec) const
{
    constexpr MPI_Datatype etype = (sizeof(idx_t) == 8) ? MPI_LONG_LONG : MPI_INT;
    for (idx_t s = 0; s < num_sends; s++) {
        idx_t       * buf     = send_buf       + send_map_starts[s] * dof;// 注意自由度带有的偏移
        const idx_t num = send_map_starts[s + 1] - send_map_starts[s];
        MPI_Isend(buf, num * dof, etype, send_pids[s], 33, comm, send_reqs + s);
    }

    for (idx_t r = 0; r < num_recvs; r++) {
        idx_t * buf = recv_vec + recv_vec_starts[r] * dof;// 注意自由度带有的偏移
        const idx_t num = recv_vec_starts[r + 1] - recv_vec_starts[r];
        MPI_Irecv(buf, num * dof, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
}

template<typename idx_t>
void par_CSRCommPkg<idx_t>::init_update_halo(const int dof, const idx_t * loc_vec, idx_t * send_buf, idx_t * recv_vec) const
{
    constexpr MPI_Datatype etype = (sizeof(idx_t) == 8) ? MPI_LONG_LONG : MPI_INT;
    for (idx_t s = 0; s < num_sends; s++) {
        idx_t       * buf     = send_buf       + send_map_starts[s] * dof;// 注意自由度带有的偏移
        const idx_t * col_ids = send_map_elmts + send_map_starts[s];
        const idx_t num = send_map_starts[s + 1] - send_map_starts[s];
        for (idx_t k = 0; k < num; k++) {
            for (int f = 0; f < dof; f++)
                buf[k * dof + f] = loc_vec[col_ids[k] * dof + f];
        }
        MPI_Isend(buf, num * dof, etype, send_pids[s], 33, comm, send_reqs + s);
    }

    for (idx_t r = 0; r < num_recvs; r++) {
        idx_t * buf = recv_vec + recv_vec_starts[r] * dof;// 注意自由度带有的偏移
        const idx_t num = recv_vec_starts[r + 1] - recv_vec_starts[r];
        MPI_Irecv(buf, num * dof, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
}


template<typename idx_t>
void par_CSRCommPkg<idx_t>::init_update_halo(const int dof, const float * loc_vec, float * send_buf, float * recv_vec) const
{
    constexpr MPI_Datatype etype = MPI_FLOAT;
    for (idx_t s = 0; s < num_sends; s++) {
        float       * buf     = send_buf       + send_map_starts[s] * dof;// 注意自由度带有的偏移
        const idx_t * col_ids = send_map_elmts + send_map_starts[s];
        const idx_t num = send_map_starts[s + 1] - send_map_starts[s];
        for (idx_t k = 0; k < num; k++) {
            for (int f = 0; f < dof; f++)
                buf[k * dof + f] = loc_vec[col_ids[k] * dof + f];
        }
        MPI_Isend(buf, num * dof, etype, send_pids[s], 33, comm, send_reqs + s);
    }

    for (idx_t r = 0; r < num_recvs; r++) {
        float * buf = recv_vec + recv_vec_starts[r] * dof;// 注意自由度带有的偏移
        const idx_t num = recv_vec_starts[r + 1] - recv_vec_starts[r];
        MPI_Irecv(buf, num * dof, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
}

template<typename idx_t>
void par_CSRCommPkg<idx_t>::init_update_halo(const int dof, const double * loc_vec, double* send_buf, double* recv_vec) const
{
    constexpr MPI_Datatype etype = MPI_DOUBLE;
    for (idx_t s = 0; s < num_sends; s++) {
        double      * buf     = send_buf       + send_map_starts[s] * dof;// 注意自由度带有的偏移
        const idx_t * col_ids = send_map_elmts + send_map_starts[s];
        const idx_t num = send_map_starts[s + 1] - send_map_starts[s];
        for (idx_t k = 0; k < num; k++) {
            for (int f = 0; f < dof; f++)
                buf[k * dof + f] = loc_vec[col_ids[k] * dof + f];
        }
        MPI_Isend(buf, num * dof, etype, send_pids[s], 33, comm, send_reqs + s);
    }

    for (idx_t r = 0; r < num_recvs; r++) {
        double * buf = recv_vec + recv_vec_starts[r] * dof;// 注意自由度带有的偏移
        const idx_t num = recv_vec_starts[r + 1] - recv_vec_starts[r];
        MPI_Irecv(buf, num * dof, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
}

template<typename idx_t>
void par_CSRCommPkg<idx_t>::wait_update_halo() const
{
    MPI_Waitall(num_recvs, recv_reqs, MPI_STATUSES_IGNORE);
    MPI_Waitall(num_sends, send_reqs, MPI_STATUSES_IGNORE);
}

/*
template<typename idx_t, typename data_t, typename calc_t, int dof>
par_CSRCommPkg<idx_t>* transpose_commpkg(const CSRMatrix<idx_t, data_t, calc_t, dof> & offd_trans,
    const par_CSRCommPkg<idx_t>* commpkg)
{
    if (commpkg == nullptr) { assert(offd_trans.nrows == 0); return nullptr; }

    assert(commpkg->recv_vec_starts[commpkg->num_recvs] == offd_trans.nrows);// offd.ncols

    par_CSRCommPkg<idx_t> * transpkg = new par_CSRCommPkg<idx_t>;
    // 发送和接收的消息数反转
    transpkg->comm       = commpkg->comm;
    transpkg->num_recvs  = commpkg->num_sends;
    transpkg->num_sends  = commpkg->num_recvs;
    transpkg->recv_reqs  = new MPI_Request [transpkg->num_recvs];
    transpkg->send_reqs  = new MPI_Request [transpkg->num_sends];
    // 发送和接收的对象反转
    transpkg->recv_pids = new int [transpkg->num_recvs];
    transpkg->send_pids = new int [transpkg->num_sends];
    for (int p = 0; p < transpkg->num_recvs; p++)
        transpkg->recv_pids[p] = commpkg->send_pids[p];
    for (int p = 0; p < transpkg->num_sends; p++)
        transpkg->send_pids[p] = commpkg->recv_pids[p];
    // 发送的位置和个数可以推理得到
    transpkg->send_map_starts = new idx_t [transpkg->num_sends + 1];
    transpkg->send_map_starts[0] = 0;
    std::vector<idx_t> buf;
    idx_t * send_nnzs = new idx_t [transpkg->num_sends];
    idx_t * recv_nnzs = new idx_t [transpkg->num_recvs];
    
    std::vector<std::pair<idx_t, idx_t> > * rec_cid = new std::vector<std::pair<idx_t, idx_t> > [transpkg->num_sends];
    std::vector<MPI_Request> send_reqs;

    for (int s = 0; s < transpkg->num_sends; s ++) {
        rec_cid[s].clear();
        send_nnzs[s] = 0;
        std::set<idx_t> uniquified_cids;
        for (idx_t  recv_cid = commpkg->recv_vec_starts[s];// 遍历转置前要从该进程recv的局部列序
                    recv_cid < commpkg->recv_vec_starts[s + 1]; recv_cid ++) {
            const idx_t send_rid = recv_cid;// 即为转置后的行号
            for (idx_t k = offd_trans.row_ptr[send_rid]; k < offd_trans.row_ptr[send_rid + 1]; k ++) {

                std::pair<idx_t, idx_t> tmp;
                tmp.first  = col_map_offd[recv_cid];// global row id
                tmp.second = dst.beg_col + offd_trans.col_idx[k];// global colum id
                
                rec_cid[s].emplace_back(tmp);
                uniquified_cids.emplace(offd_trans.col_idx[k]);// local colum id
            }
            send_nnzs[s] += (offd_trans.row_ptr[send_rid + 1] - offd_trans.row_ptr[send_rid]);
        }
        for (auto it = uniquified_cids.begin(); it != uniquified_cids.end(); it ++) {
            buf.push_back(*it);
        }
        transpkg->send_map_starts[s + 1] = buf.size();

        MPI_Request req_num, req_dat;
        MPI_Isend(& send_nnzs[s], 1, mpi_idx_type,
            dst.commpkg->send_pids[s], 501, dst.commpkg->comm, & req_num);

        assert(send_nnzs[s] == (idx_t) rec_cid_nz[s].size());
        MPI_Isend(rec_cid_nz[s].data(), rec_cid_nz[s].size() * sizeof(COO<idx_t, data_t2, dof>), MPI_BYTE,
            dst.commpkg->send_pids[s], 502, dst.commpkg->comm, & req_dat);
        send_reqs.emplace_back(req_num);
        send_reqs.emplace_back(req_dat);
    }
}
*/
template class par_CSRCommPkg<      int>;
template class par_CSRCommPkg<long long>;