#include "my_parcsr.hpp"


template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t>::MyParCSRMatrix(MPI_Comm _comm, const idx_t _glb_nrows,
    const idx_t _beg, const idx_t _end, const idx_t * dist_row_ptr, const idx_t * dist_col_idx, const calc_t * dist_vals)
    : comm(_comm), glb_nrows(_glb_nrows), beg_row(_beg), end_row(_end)
{
    int my_pid; MPI_Comm_rank(comm, &my_pid);
    int nprocs; MPI_Comm_size(comm, &nprocs);

    const idx_t loc_nrows = end_row - beg_row;
    const idx_t beg_col = beg_row, end_col = end_row;

    // 统计自己这些行里的非零元所对应的列序号，落入的范围（以便分开对角块和非对角块）
    idx_t * stat_cnts = new idx_t [glb_nrows];
    #pragma omp parallel for schedule(static)
    for (idx_t i = 0; i < glb_nrows; i++)// 计数值先赋零
        stat_cnts[i] = 0;
    
    idx_t diag_nnz = 0, offd_nnz = 0;
    idx_t offd_ncols = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        for (idx_t  p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                    p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            idx_t glb_j = dist_col_idx[p];
            assert(glb_j >= 0 && glb_j < glb_nrows);
            if (glb_j >= beg_col && glb_j < end_col) {
                diag_nnz ++;
            } else {// 属于非对角块
                offd_nnz ++;
                if (stat_cnts[glb_j] == 0) offd_ncols ++;// 之前没发现过这一列
            }
            stat_cnts[glb_j] ++;
        }
    }

    // printf("Proc %d offd_ncols %d diag_nnz %d offd_nnz %d\n", my_pid, offd_ncols, diag_nnz, offd_nnz);

    diag = new MyCSRMatrix<idx_t, data_t, calc_t>(loc_nrows, loc_nrows , diag_nnz);
    offd = new MyCSRMatrix<idx_t, data_t, calc_t>(loc_nrows, offd_ncols, offd_nnz);

    col_map_offd = new idx_t [offd_ncols];
    idx_t rp = 0;
    for (idx_t j = 0; j < glb_nrows; j++) {
        if (j < beg_col || j >= end_col) {// 属于offd
            if (stat_cnts[j] > 0) {
                col_map_offd[rp] = j;
                rp ++;
            }
        }
    }
    assert(rp == offd_ncols);
    // 注意：col_map_offd一定是升序的！

    // 将传入的本进程负责范围的分布式行偏移量、列索引和非零值，拆分成对角块和非对角块的数据
    idx_t ptr_diag = 0, ptr_offd = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        diag->row_ptr[loc_i] = ptr_diag;
        offd->row_ptr[loc_i] = ptr_offd;
    
        for (idx_t  p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                    p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            idx_t glb_j = dist_col_idx[p];
            if (glb_j >= beg_col && glb_j < end_col) {// 该列落在自己负责的范围内，属于对角块
                diag->col_idx[ptr_diag] = glb_j - beg_col;
                diag->vals   [ptr_diag] = dist_vals[p];
                ptr_diag ++;
            } else {// 否则属于非对角块
                idx_t dst = -1;
                for (idx_t offd_j = 0; offd_j < offd_ncols; offd_j++) {// 确定它属于重组后的非对角块里的哪一列
                    if (col_map_offd[offd_j] == glb_j) {
                        dst = offd_j;
                        break;
                    }
                }
                assert(dst != -1);
                offd->col_idx[ptr_offd] = dst;
                offd->vals   [ptr_offd] = dist_vals[p];
                ptr_offd ++;
            }
        }// 处理完一行
    }
    assert(ptr_diag == diag_nnz);
    assert(ptr_offd == offd_nnz);
    diag->row_ptr[loc_nrows] = ptr_diag;
    offd->row_ptr[loc_nrows] = ptr_offd;

    
    // char fname[128];
    // sprintf(fname, "proc%d.data", my_pid);
    // FILE * fp = fopen(fname, "w+");
    // fprintf(fp, "Proc %d\n", my_pid);
    // fprintf(fp, "\nDiag\n");
    // for (idx_t i = 0; i < loc_nrows; i++) {
    //     fprintf(fp, "%d: nnz %d\n", i, diag->row_ptr[i+1] - diag->row_ptr[i] );
    //     for (idx_t p = diag->row_ptr[i]; p < diag->row_ptr[i+1]; p++)
    //         fprintf(fp, "   %d (%.3e)\n", diag->col_idx[p], diag->vals[p]);
    // }
    // fprintf(fp, "\nOffd\n");
    // for (idx_t i = 0; i < loc_nrows; i++) {
    //     fprintf(fp, "%d: nnz %d\n", i, offd->row_ptr[i+1] - offd->row_ptr[i] );
    //     for (idx_t p = offd->row_ptr[i]; p < offd->row_ptr[i+1]; p++)
    //         fprintf(fp, "   %d (%.3e) glb %d\n", offd->col_idx[p], offd->vals[p], col_map_offd[offd->col_idx[p]] );
    // }
    // fclose(fp);

    // idx_t tot_diag_nnz, tot_offd_nnz;
    // MPI_Allreduce(&diag_nnz, &tot_diag_nnz, 1, MPI_INT, MPI_SUM, comm);
    // MPI_Allreduce(&offd_nnz, &tot_offd_nnz, 1, MPI_INT, MPI_SUM, comm);
    // if (my_pid == 0) {
    //     printf("Total nnz %d\n", tot_diag_nnz + tot_offd_nnz);
    // }


    rows_partition = new idx_t [nprocs + 1];
    recv_cnts = new idx_t [nprocs + 1];
    send_cnts = new idx_t [nprocs + 1];
    // 获得所有进程的划分信息
    MPI_Allgather(&beg_row, 1, MPI_INT, rows_partition, 1, MPI_INT, comm);
    rows_partition[nprocs] = glb_nrows;

    for (int p = 0; p <= nprocs; p++) {
        recv_cnts[p] = 0;// 先置零
        send_cnts[p] = 0;
    }

    // 注意：col_map_offd一定是升序的！
    for (idx_t j = 0; j < offd_ncols; j++) {
        idx_t glb_col = col_map_offd[j];
        for (int src_pid = 0; src_pid < nprocs; src_pid++) {// 找到要从哪个进程收
            if (glb_col >= rows_partition[src_pid] && glb_col < rows_partition[src_pid + 1])
                recv_cnts[src_pid] ++;
        }
    }

    send_reqs = new MPI_Request [nprocs];
    recv_reqs = new MPI_Request [nprocs];
    for (int p = 0; p < nprocs; p++) {
        MPI_Isend(&recv_cnts[p], 1, MPI_INT, p, 11, comm, send_reqs + p);
        MPI_Irecv(&send_cnts[p], 1, MPI_INT, p, 11, comm, recv_reqs + p);
    }
    MPI_Waitall(nprocs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(nprocs, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    for (int p = 0; p < nprocs; p++) {
        num_recvs += (recv_cnts[p] > 0);
        num_sends += (send_cnts[p] > 0);
    }
    recv_pids = new int [num_recvs];
    send_pids = new int [num_sends];

    // 前缀和
    idx_t prefix_sum[2] = {0, 0};
    num_recvs = num_sends = 0;
    for (int p = 0; p < nprocs; p++) {
        if (recv_cnts[p] > 0) recv_pids[num_recvs ++] = p;
        if (send_cnts[p] > 0) send_pids[num_sends ++] = p;

        const idx_t tmp[2] = {recv_cnts[p], send_cnts[p]};
        recv_cnts[p] = prefix_sum[0];
        send_cnts[p] = prefix_sum[1];
        prefix_sum[0] += tmp[0];
        prefix_sum[1] += tmp[1];
    }
    recv_cnts[nprocs] = prefix_sum[0];
    send_cnts[nprocs] = prefix_sum[1];
    // 此时要从p号进程收取recv_cnts[p+1] - recv_cnts[p]个元素
    // 这些元素的全局索引号是 col_map_offd[recv_cnts[p]] ~ col_map_offd[recv_cnts[p+1]]
    // 要向p号进程发送send_cnts[p+1] - send_cnts[p]个元素
    // 这些元素的全局索引号是 send_row_ids[send_cnts[p]] ~ send_row_ids[send_cnts[p+1]] 但具体的值仍然未知
    send_row_ids = new idx_t [send_cnts[nprocs]];

    send_reqs = new MPI_Request [num_recvs];
    recv_reqs = new MPI_Request [num_sends];
    for (idx_t r = 0; r < num_recvs; r++) {
        const int src_pid = recv_pids[r];
        const idx_t num = recv_cnts[src_pid + 1] - recv_cnts[src_pid]; assert(num > 0);
        const idx_t * send_buf = col_map_offd + recv_cnts[src_pid];
        MPI_Isend(send_buf, num, MPI_INT, src_pid, 22, comm, send_reqs + r);
    }
    for (idx_t s = 0; s < num_sends; s++) {
        const int dst_pid = send_pids[s];
        const idx_t num = send_cnts[dst_pid + 1] - send_cnts[dst_pid]; assert(num > 0);
        idx_t * recv_buf = send_row_ids + send_cnts[dst_pid];
        MPI_Irecv(recv_buf, num, MPI_INT, dst_pid, 22, comm, recv_reqs + s);
    }
    MPI_Waitall(num_recvs, send_reqs, MPI_STATUSES_IGNORE); delete [] send_reqs; send_reqs = nullptr;
    MPI_Waitall(num_sends, recv_reqs, MPI_STATUSES_IGNORE); delete [] recv_reqs; recv_reqs = nullptr;

    // 重组一下，因为有大量的进程并不是邻居
    idx_t * shrinked_recv_cnts = new idx_t [num_recvs + 1]; shrinked_recv_cnts[0] = 0;
    idx_t * shrinked_send_cnts = new idx_t [num_sends + 1]; shrinked_send_cnts[0] = 0;
    for (int p = 0, tr = 0, ts = 0; p < nprocs; p++) {
        if (recv_cnts[p + 1] > recv_cnts[p]) {
            assert(recv_pids[tr] == p);
            shrinked_recv_cnts[tr + 1] = recv_cnts[p + 1];
            tr ++;
        }
        if (send_cnts[p + 1] > send_cnts[p]) {
            assert(send_pids[ts] == p);
            shrinked_send_cnts[ts + 1] = send_cnts[p + 1];
            ts ++;
        }
    }
    delete [] recv_cnts; recv_cnts = shrinked_recv_cnts; shrinked_recv_cnts = nullptr;
    delete [] send_cnts; send_cnts = shrinked_send_cnts; shrinked_send_cnts = nullptr;

    // 新建非阻塞通信句柄和缓冲区以便后续使用
    send_reqs = new MPI_Request [num_sends];
    recv_reqs = new MPI_Request [num_recvs];
    send_buf = new calc_t [send_cnts[num_sends]];
}

template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t>::~MyParCSRMatrix()
{
    if (rows_partition) delete [] rows_partition;

    if (recv_pids) delete [] recv_pids;
    if (recv_cnts) delete [] recv_cnts;
    if (col_map_offd) delete [] col_map_offd;

    if (send_pids) delete [] send_pids;
    if (send_cnts) delete [] send_cnts;
    if (send_row_ids) delete [] send_row_ids;

    if (diag) delete diag;
    if (offd) delete offd;
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::init_update_halo(const calc_t * my_vec, calc_t * other_vec)
{
    static_assert(sizeof(calc_t) == 4 || sizeof(calc_t) == 8);
    MPI_Datatype etype = sizeof(calc_t) == 8 ? MPI_DOUBLE : MPI_FLOAT;
    // 给哪些非对角块spmv需要引用到我负责范围内的值的进程发送数据
    for (idx_t s = 0; s < num_sends; s++) {
        calc_t      * buf      = send_buf     + send_cnts[s];
        const idx_t * glb_rows = send_row_ids + send_cnts[s];
        const idx_t num = send_cnts[s + 1] - send_cnts[s];
        for (idx_t k = 0; k < num; k++)
            buf[k] = my_vec[glb_rows[k] - beg_row];
        MPI_Isend(buf, num, etype, send_pids[s], 33, comm, send_reqs + s);
    }
    // 从负责本进程非对角块spmv的依赖值的那些进程里收数据
    for (idx_t r = 0; r < num_recvs; r++) {
        calc_t * buf = other_vec + recv_cnts[r];
        const idx_t num = recv_cnts[r + 1] - recv_cnts[r];
        MPI_Irecv(buf, num, etype, recv_pids[r], 33, comm, recv_reqs + r);
    }
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::wait_update_halo()
{
    MPI_Waitall(num_sends, send_reqs, MPI_STATUSES_IGNORE);
    MPI_Waitall(num_recvs, recv_reqs, MPI_STATUSES_IGNORE);
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::update_halo(const calc_t * my_vec, calc_t * other_vec)
{
    init_update_halo(my_vec, other_vec);
    wait_update_halo();
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::Mult(calc_t alpha, const MyParCSRVector<idx_t, calc_t> & x,
            calc_t beta, const MyParCSRVector<idx_t, calc_t> & b, MyParCSRVector<idx_t, calc_t> & y)
{
    assert(glb_nrows == y.glb_nrows && beg_row == y.beg_row && end_row == y.end_row);
    // 计算通信重叠：先启动非对角块所需向量部分的通信
    calc_t * offd_x = new calc_t [offd->ncols];
    init_update_halo(x.data, offd_x);
    // 然后执行对角块的spmv
    diag->Mult(alpha, x.data,         beta, b.data, y.data);
    // 最后等待非对角块所需数据的通信完成后，再执行非对角块的spmv
    wait_update_halo();
    offd->Mult(alpha, offd_x, (calc_t) 1.0, y.data);
    delete [] offd_x;
}

template<typename idx_t, typename data_t, typename calc_t>
void MyParCSRMatrix<idx_t, data_t, calc_t>::Mult(calc_t alpha, const MyParCSRVector<idx_t, calc_t> & x,
            calc_t beta, MyParCSRVector<idx_t, calc_t> & y)
{
    this->Mult(alpha, x, beta, y, y);
}

template class MyParCSRMatrix<int, float , double>;
template class MyParCSRMatrix<int, double, double>;
template class MyParCSRMatrix<int, float , float >;