#include "my_parcsr.hpp"
#include "utils.hpp"

template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t>::MyParCSRMatrix(MPI_Comm _comm, const idx_t _glb_nrows,
    const idx_t _beg, const idx_t _end, const idx_t * dist_row_ptr, const idx_t * dist_col_idx, const double * dist_vals)
    : comm(_comm), glb_nrows(_glb_nrows), beg_row(_beg), end_row(_end)
{
    int my_pid; MPI_Comm_rank(comm, &my_pid);
    int nprocs; MPI_Comm_size(comm, &nprocs);

    const idx_t loc_nrows = end_row - beg_row;
    const idx_t beg_col = beg_row, end_col = end_row;

#ifdef USE_GPU
    // 单进程，整个矩阵视作一个大对角块
    diag = new MyCSRMatrix<idx_t, data_t, calc_t>(loc_nrows, loc_nrows , dist_row_ptr[loc_nrows]); 
    // 传输 row_ptr 和 col_idx
    cuda_memcpy(diag->row_ptr, dist_row_ptr, (loc_nrows + 1), 0);
    cuda_memcpy(diag->col_idx, dist_col_idx, dist_row_ptr[loc_nrows], 0);
    // 若精度相同，直接传输 vals
    // 否则，在GPU上做精度转换
    if constexpr (sizeof(data_t) == sizeof(double)) {
        cuda_memcpy(diag->vals, dist_vals, dist_row_ptr[loc_nrows], 0);
    } else {
        double *vals_setup;
        cuda_malloc((void **)(&vals_setup), dist_row_ptr[loc_nrows] * sizeof(double));
        cuda_memcpy(vals_setup, dist_vals, dist_row_ptr[loc_nrows], 0);
        cuda_vec_copy(diag->vals, vals_setup, dist_row_ptr[loc_nrows]);
        cuda_free(vals_setup);
    }
#else

    rows_partition = new idx_t [nprocs + 1];
    
    // 获得所有进程的划分信息
    MPI_Allgather(&beg_row, 1, MPI_INT, rows_partition, 1, MPI_INT, comm);
    rows_partition[nprocs] = glb_nrows;

    std::unordered_map<idx_t, std::pair<int, idx_t> > gid2pidlid;

    idx_t diag_nnz = 0, offd_nnz = 0;
    idx_t offd_ncols = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        for (idx_t  p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                    p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            idx_t glb_j = dist_col_idx[p];
            if (glb_j >= beg_col && glb_j < end_col) {
                diag_nnz ++;
            } else {// 属于非对角块
                offd_nnz ++;
                if (gid2pidlid.find(glb_j) == gid2pidlid.end()) {// 之前没发现过这一列
                    offd_ncols ++;
                    std::pair<int, idx_t> tmp(-1, -1);
                    gid2pidlid.emplace(glb_j, tmp);// 记录
                }
            }
        }
    }
    assert(offd_ncols == (idx_t) gid2pidlid.size());

    diag = new MyCSRMatrix<idx_t, data_t, calc_t>(loc_nrows, loc_nrows , diag_nnz);
    offd = new MyCSRMatrix<idx_t, data_t, calc_t>(loc_nrows, offd_ncols, offd_nnz);

    buildup_comm_handles(gid2pidlid);// 这里面会修改哈希表

    idx_t ptr_diag = 0, ptr_offd = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        diag->row_ptr[loc_i] = ptr_diag;
        offd->row_ptr[loc_i] = ptr_offd;
    
        for (idx_t  p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                    p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            idx_t glb_j = dist_col_idx[p];
            if (glb_j >= beg_col && glb_j < end_col) {// 落在本进程负责范围内
                diag->col_idx[ptr_diag] = glb_j - beg_col;
                diag->vals   [ptr_diag] = dist_vals[p];
                ptr_diag ++;
            } else {// 否则查看它对应的局部序号
                // assert(gid2pidlid.find(glb_j) != gid2pidlid.end());
                idx_t dst = gid2pidlid[glb_j].second;
                offd->col_idx[ptr_offd] = dst;
                offd->vals   [ptr_offd] = dist_vals[p];
                ptr_offd ++;
            }
        }// 处理完一行
    }
    assert(ptr_diag == diag_nnz);
    assert(ptr_offd == offd_nnz);
    diag->row_ptr[loc_nrows] = ptr_diag;
    offd->row_ptr[loc_nrows] = ptr_offd;

    // 统计 
    if (my_pid == 0) {
        FILE * fp = fopen("stat.nnz", "w+");
        for (idx_t i = 0; i <= diag->nrows; i++) {
            fprintf(fp, "%d\n", diag->row_ptr[i]);
        }
        fclose(fp);
    }
#endif
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::buildup_comm_handles(
        std::unordered_map<idx_t, std::pair<int, idx_t> > & gid2pidlid)
{
    const idx_t offd_ncols = offd->ncols; assert(offd_ncols >= 0);
    int nprocs; MPI_Comm_size(comm, & nprocs);

    col_map_offd = new idx_t [offd_ncols];
    idx_t rp = 0;
    for (auto it = gid2pidlid.begin(); it != gid2pidlid.end(); it++)
        col_map_offd[rp ++] = it->first;

    // 排序
    qsort(col_map_offd, offd_ncols, sizeof(idx_t), cmpfunc<idx_t>);

    assert(recv_cnts == nullptr && send_cnts == nullptr);
    recv_cnts = new idx_t [nprocs + 1];
    send_cnts = new idx_t [nprocs + 1];
    for (int p = 0; p <= nprocs; p++) {
        recv_cnts[p] = 0;// 先置零
        send_cnts[p] = 0;
    }
    // 注意：col_map_offd一定是升序的！
    // 确定全局序号到(进程号, 局部序号)的映射
    for (idx_t lid = 0; lid < offd_ncols; lid++) {
        idx_t gid = col_map_offd[lid];// global id
        int pid = MPI_PROC_NULL;
        for (int src_pid = 0; src_pid < nprocs; src_pid++) {// 找到要从哪个进程收
            if (gid >= rows_partition[src_pid] && gid < rows_partition[src_pid + 1]) {
                recv_cnts[src_pid] ++;
                pid = src_pid;
                break;
            }
        }
        // assert(pid != MPI_PROC_NULL);
        gid2pidlid[gid].first  = pid;
        gid2pidlid[gid].second = lid;
    }

    send_reqs = new MPI_Request [nprocs];
    recv_reqs = new MPI_Request [nprocs];
    for (int p = 0; p < nprocs; p++) {
        MPI_Isend(&recv_cnts[p], 1, MPI_INT, p, 11, comm, send_reqs + p);
        MPI_Irecv(&send_cnts[p], 1, MPI_INT, p, 11, comm, recv_reqs + p);
    }
    MPI_Waitall(nprocs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(nprocs, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    for (int p = 0; p < nprocs; p++) {
        num_recvs += (recv_cnts[p] > 0);
        num_sends += (send_cnts[p] > 0);
    }
    recv_pids = new int [num_recvs];
    send_pids = new int [num_sends];

    // 前缀和
    idx_t prefix_sum[2] = {0, 0};
    num_recvs = num_sends = 0;
    for (int p = 0; p < nprocs; p++) {
        if (recv_cnts[p] > 0) recv_pids[num_recvs ++] = p;
        if (send_cnts[p] > 0) send_pids[num_sends ++] = p;

        const idx_t tmp[2] = {recv_cnts[p], send_cnts[p]};
        recv_cnts[p] = prefix_sum[0];
        send_cnts[p] = prefix_sum[1];
        prefix_sum[0] += tmp[0];
        prefix_sum[1] += tmp[1];
    }
    recv_cnts[nprocs] = prefix_sum[0];
    send_cnts[nprocs] = prefix_sum[1];
    // 此时要从p号进程收取recv_cnts[p+1] - recv_cnts[p]个元素
    // 这些元素的全局索引号是 col_map_offd[recv_cnts[p]] ~ col_map_offd[recv_cnts[p+1]]
    // 要向p号进程发送send_cnts[p+1] - send_cnts[p]个元素
    // 这些元素的全局索引号是 send_row_ids[send_cnts[p]] ~ send_row_ids[send_cnts[p+1]] 但具体的值仍然未知
    send_row_ids = new idx_t [send_cnts[nprocs]];

    send_reqs = new MPI_Request [num_recvs];
    recv_reqs = new MPI_Request [num_sends];
    for (idx_t r = 0; r < num_recvs; r++) {
        const int src_pid = recv_pids[r];
        const idx_t num = recv_cnts[src_pid + 1] - recv_cnts[src_pid]; //assert(num > 0);
        const idx_t * send_buf = col_map_offd + recv_cnts[src_pid];
        MPI_Isend(send_buf, num, MPI_INT, src_pid, 22, comm, send_reqs + r);
    }
    for (idx_t s = 0; s < num_sends; s++) {
        const int dst_pid = send_pids[s];
        const idx_t num = send_cnts[dst_pid + 1] - send_cnts[dst_pid]; //assert(num > 0);
        idx_t * recv_buf = send_row_ids + send_cnts[dst_pid];
        MPI_Irecv(recv_buf, num, MPI_INT, dst_pid, 22, comm, recv_reqs + s);
    }
    MPI_Waitall(num_recvs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(num_sends, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    // 重组一下，因为有大量的进程并不是邻居
    idx_t * shrinked_recv_cnts = new idx_t [num_recvs + 1]; shrinked_recv_cnts[0] = 0;
    idx_t * shrinked_send_cnts = new idx_t [num_sends + 1]; shrinked_send_cnts[0] = 0;
    for (int p = 0, tr = 0, ts = 0; p < nprocs; p++) {
        if (recv_cnts[p + 1] > recv_cnts[p]) {
            // assert(recv_pids[tr] == p);
            shrinked_recv_cnts[tr + 1] = recv_cnts[p + 1];
            tr ++;
        }
        if (send_cnts[p + 1] > send_cnts[p]) {
            // assert(send_pids[ts] == p);
            shrinked_send_cnts[ts + 1] = send_cnts[p + 1];
            ts ++;
        }
    }
    delete [] recv_cnts; recv_cnts = shrinked_recv_cnts; shrinked_recv_cnts = nullptr;
    delete [] send_cnts; send_cnts = shrinked_send_cnts; shrinked_send_cnts = nullptr;

    // 新建非阻塞通信句柄和缓冲区以便后续使用
    send_reqs = new MPI_Request [num_sends];
    recv_reqs = new MPI_Request [num_recvs];
    send_buf = new calc_t [send_cnts[num_sends]];
}


template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t>::MyParCSRMatrix(MPI_Comm _comm) : comm(_comm)
{
}

template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t>::~MyParCSRMatrix()
{
    if (rows_partition) delete [] rows_partition;

    if (recv_pids) delete [] recv_pids;
    if (recv_cnts) delete [] recv_cnts;
    if (col_map_offd) delete [] col_map_offd;

    if (send_pids) delete [] send_pids;
    if (send_cnts) delete [] send_cnts;
    if (send_row_ids) delete [] send_row_ids;

    if (diag) delete diag;
    if (offd) delete offd;

    if (send_buf) { delete [] send_buf; send_buf = nullptr; }
    if (send_reqs) { delete [] send_reqs; send_reqs = nullptr; }
    if (recv_reqs) { delete [] recv_reqs; recv_reqs = nullptr; }

    if (points) { delete [] points; points = nullptr; }
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::init_point_coords(const std::string filename)
{
    int my_pid; MPI_Comm_rank(comm, &my_pid);
    FILE * fp = fopen(filename.c_str(), "r");
    if (fp == nullptr) {
        if (my_pid == 0) printf("Cannot find coordinates\n");
        return;
    }
    assert(points == nullptr);
    points = new Point<data_t> [end_row - beg_row];
    fscanf(fp, "Vec_s = [\n"); 
    for (idx_t gi = 0; gi < glb_nrows; gi++) {
        idx_t tmp_i;
        data_t tmp_f;
        Point<data_t> tmp_p;
        if constexpr (sizeof(data_t) == 8)
            fscanf(fp, "%d %le %le %le %le", &tmp_i, &tmp_f, &tmp_p.coord[0], &tmp_p.coord[1], &tmp_p.coord[2]);
        else if constexpr (sizeof(data_t) == 4)
            fscanf(fp, "%d %e %e %e %e", &tmp_i, &tmp_f, &tmp_p.coord[0], &tmp_p.coord[1], &tmp_p.coord[2]);
        if (beg_row <= gi && gi < end_row)
            points[gi - beg_row] = tmp_p;
    }
    fclose(fp);
    MPI_Barrier(comm);
    if (my_pid == 0) printf("Done load points' coordinates\n");
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::init_update_halo(const calc_t * my_vec, calc_t * other_vec) const
{
#ifndef USE_GPU
    static_assert(sizeof(calc_t) == 4 || sizeof(calc_t) == 8);
    MPI_Datatype etype = sizeof(calc_t) == 8 ? MPI_DOUBLE : MPI_FLOAT;
    for (idx_t s = 0; s < num_sends; s++) {
        calc_t      * buf      = send_buf     + send_cnts[s];
        const idx_t * glb_rows = send_row_ids + send_cnts[s];
        const idx_t num = send_cnts[s + 1] - send_cnts[s];
        for (idx_t k = 0; k < num; k++)
            buf[k] = my_vec[glb_rows[k] - beg_row];
        MPI_Isend(buf, num, etype, send_pids[s], 33, comm, send_reqs + s);
    }
    for (idx_t r = 0; r < num_recvs; r++) {
        calc_t * buf = other_vec + recv_cnts[r];
        const idx_t num = recv_cnts[r + 1] - recv_cnts[r];
        MPI_Irecv(buf, num, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
#endif
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::wait_update_halo() const
{
#ifndef USE_GPU
    MPI_Waitall(num_sends, send_reqs, MPI_STATUSES_IGNORE);
    MPI_Waitall(num_recvs, recv_reqs, MPI_STATUSES_IGNORE);
#endif
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::update_halo(const calc_t * my_vec, calc_t * other_vec) const
{
#ifndef USE_GPU
    init_update_halo(my_vec, other_vec);
    wait_update_halo();
#endif
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::Mult(calc_t alpha, const MyParCSRVector<idx_t, calc_t> & x,
    calc_t beta, const MyParCSRVector<idx_t, calc_t> & b, MyParCSRVector<idx_t, calc_t> & y) const
{
    assert(glb_nrows == y.glb_nrows && beg_row == y.beg_row && end_row == y.end_row);

#ifdef USE_GPU
    diag->Mult(alpha, x.data,         beta, b.data, y.data);
#else
    calc_t * offd_x = new calc_t [offd->ncols];
    init_update_halo(x.data, offd_x); // 非对角块 向量通信
    diag->Mult(alpha, x.data,         beta, b.data, y.data); // 对角块计算
    wait_update_halo();
    offd->Mult(alpha, offd_x, (calc_t) 1.0, y.data); // 非对角块计算
    delete [] offd_x;
    // printf("Offd done\n");
#endif
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::Mult(calc_t alpha, const MyParCSRVector<idx_t, calc_t> & x,
            calc_t beta, MyParCSRVector<idx_t, calc_t> & y) const
{
    this->Mult(alpha, x, beta, y, y);
}

template class MyParCSRMatrix<int, float , double>;
template class MyParCSRMatrix<int, double, double>;
template class MyParCSRMatrix<int, float , float >;