#ifndef COARSEN_HPP
#define COARSEN_HPP
#include "my_parcsr.hpp"
#include "utils.hpp"

/// @brief 封装了粗化后的插值矩阵、限制矩阵、和粗网格矩阵的结构体
/// @tparam idx_t 索引类型
/// @tparam data_t 存储数据的浮点类型
/// @tparam calc_t 执行计算时的浮点类型
template<typename idx_t, typename data_t, typename calc_t>
class CoarsenWrapper
{
public:
    // 插值矩阵，因为采用的是非光滑聚合，所以插值矩阵只需要用一张长度为细点数目的表
    // 记录下每个细点被映射到了哪个粗点即可
    idx_t * loc_map_f2c = nullptr;

    // 限制矩阵，因为采用的是非光滑聚合，所以插值矩阵只需要行偏移和列索引
    // 不需要非零元的值
    idx_t * R_rp = nullptr;// 行偏移
    idx_t * R_ci = nullptr;// 列索引

    // 生成的粗矩阵
    MyParCSRMatrix<idx_t, data_t, calc_t> * coar_mat = nullptr;
    /// @brief 构造函数
    CoarsenWrapper() {}
    /// @brief 析构函数
    ~CoarsenWrapper() {
        if (loc_map_f2c != nullptr) { delete [] loc_map_f2c; loc_map_f2c = nullptr; }
        if (R_rp != nullptr) { delete [] R_rp; R_rp = nullptr; }
        if (R_ci != nullptr) { delete [] R_ci; R_ci = nullptr; }
        if (coar_mat != nullptr) { delete coar_mat; coar_mat = nullptr; }
    }
};
typedef CoarsenWrapper<int, float , double> CoarsenWrapper_I32F32F64;
typedef CoarsenWrapper<int, double, double> CoarsenWrapper_I32F64F64;
typedef CoarsenWrapper<int, float , float > CoarsenWrapper_I32F32F32;

/// @brief 粗化函数
/// @tparam idx_t 索引类型
/// @tparam data_t 存储数据的浮点类型
/// @tparam calc_t 执行计算时的浮点类型
/// @param fine_mat 细层网格的矩阵
/// @param use_coord 是否使用基于点坐标的粗化方法
/// @param threshold 决定本点哪些邻居是强耦合/影响的阈值，一般为0.25（仅当使用基于最小堆的粗化方法时起效）
/// @param num pairwise-aggregation的次数，一般设置为1即可
/// @param alpha 通过AC=R*AF*P计算粗网格矩阵AC时，一般设为1.0即可
/// @return 封装了粗化后的插值矩阵、限制矩阵、和粗网格矩阵的结构体
template<typename idx_t, typename data_t, typename calc_t>
CoarsenWrapper<idx_t, data_t, calc_t>* coarsen(const MyParCSRMatrix<idx_t, data_t, calc_t> & fine_mat,
    const idx_t use_coord, const calc_t threshold, const idx_t num, const calc_t alpha = 0.5);
extern template CoarsenWrapper<int, double, double>* coarsen(const MyParCSRMatrix<int, double, double> & fine_mat,
    const int  use_coord, const double threshold, const int num, const double alpha = 0.5);
extern template CoarsenWrapper<int, float , double>* coarsen(const MyParCSRMatrix<int, float , double> & fine_mat,
    const int  use_coord, const double threshold, const int num, const double alpha = 0.5);
extern template CoarsenWrapper<int, float , float >* coarsen(const MyParCSRMatrix<int, float , float > & fine_mat,
    const int  use_coord, const float  threshold, const int num, const float  alpha = 0.5);

/// @brief 根据限制矩阵、插值矩阵、和细网格矩阵计算得到粗网格矩阵，注意这里的输入和输出都是CSR矩阵（而不是并行CSR矩阵）
/// @tparam idx_t 索引类型
/// @tparam data_t 存储数据的浮点类型
/// @tparam calc_t 执行计算时的浮点类型
/// @param fine_mat 细层网格的矩阵
/// @param num_coar 本进程在粗层的粗点数目，它同时也是生成的粗网格矩阵的行数
/// @param num_cols 生成的粗网格矩阵的列数
/// @param Rstr_rp 限制矩阵的行偏移
/// @param Rstr_ci 限制矩阵的列索引
/// @param col_map_f2c 插值矩阵
/// @param coar_mat 粗网格矩阵（返回时会被更新的参数）
/// @param alpha 通过AC=R*AF*P计算粗网格矩阵AC时，一般设为1.0即可
template<typename idx_t, typename data_t, typename calc_t>
void aggregate_mat(const MyCSRMatrix<idx_t, data_t, calc_t> & fine_mat,
    const idx_t num_coar, const idx_t num_cols,
    const idx_t * Rstr_rp, const idx_t * Rstr_ci,
    const idx_t * col_map_f2c, MyCSRMatrix<idx_t, data_t, calc_t> * & coar_mat, const calc_t alpha)
{
    assert(coar_mat == nullptr);
    coar_mat = new MyCSRMatrix<idx_t, data_t, calc_t>(num_coar, num_cols);

    // 对于粗矩阵而言，首先计算它的中间结果，该中间结果可能会在一行内包含重复的非零元（列号相同）
    // 随后再对该中间结果进行重复元素的合并操作，中间结果的数据都以assum_前缀
    idx_t * assum_rpt = new idx_t [num_coar + 1];// 假定的行偏移
    assum_rpt[0] = 0;
    idx_t * assum_cids = nullptr;// 假定的列序号
    data_t* assum_vals = nullptr;// 假定的非零元值

    coar_mat->row_ptr[0] = 0;
    data_t * rstr_coeff = new data_t [num_coar];// 限制矩阵的系数，临时生成，之后不必存储
    
    // 计算粗网格的过程通过多线程并行
    
    idx_t thread_flags [omp_get_max_threads()];// 多线程并行的标志位，用来表达该线程当前进行到哪个阶段
    idx_t thread_shared[omp_get_max_threads()];// 多线程相互共享的数据区
    for (idx_t t = 0; t < omp_get_max_threads(); t++) thread_flags[t] = -1;// 阶段编号初始化为-1
    #pragma omp parallel
    {
        int tid = omp_get_thread_num();
        int nt  = omp_get_num_threads();
        int tbeg = 0, tend = 0;
        thread_load_balance(num_coar, nt, tid, tbeg, tend);// 将各线程的负载调整均衡

        idx_t * tl_rpt = new idx_t [tend - tbeg + 1];// 线程内部的行偏移
        tl_rpt[0] = 0;
        for (idx_t ci = tbeg; ci < tend; ci++) {// 遍历本线程负责的每一行
            idx_t sum_nnz = 0;// 该粗行的非零元数，应该是它所限制而来的各细行的非零元数之和
            for (idx_t p = Rstr_rp[ci]; p < Rstr_rp[ci + 1]; p++) {// 遍历每一个可能限制而来的细行
                idx_t fi = Rstr_ci[p];// 细行行号
                sum_nnz += fine_mat.row_ptr[fi + 1] - fine_mat.row_ptr[fi];// 该细行会贡献出多少个非零元
            }
            tl_rpt[ci - tbeg + 1] = tl_rpt[ci - tbeg] + sum_nnz;// 局部做前缀和
        }

        while (tid > 0 && __atomic_load_n( & thread_flags[tid - 1], __ATOMIC_ACQUIRE) < 1) {  }// 等待上一个线程完成
        idx_t prev_tsum = (tid > 0) ? thread_shared[tid - 1] : 0;
        thread_shared[tid] = prev_tsum + tl_rpt[tend - tbeg];// 本线程内的求和存入多线程共享区
        __atomic_store_n( & thread_flags[tid], 1, __ATOMIC_RELEASE);
        
        for (idx_t ci = tbeg; ci < tend; ci++) {// 填充本线程负责范围内的中间结果的行偏移
            assum_rpt[ci + 1] = prev_tsum + tl_rpt[ci - tbeg + 1];
        }
        __atomic_store_n( & thread_flags[tid], 2, __ATOMIC_RELEASE);
        
        if (tid == 0) {
            while (__atomic_load_n( & thread_flags[nt - 1], __ATOMIC_ACQUIRE) < 2) {  }// 等待最后一个线程填充完
            const idx_t assum_nnz = assum_rpt[num_coar];
            assum_cids = new idx_t [assum_nnz];// 假定的列号（可能有重复）
            assum_vals = new data_t[assum_nnz];// 假定的非零元（可能有重复）
            __atomic_store_n( & thread_flags[tid], 3, __ATOMIC_RELEASE);
        }
        
        while (tid > 0 && __atomic_load_n( & thread_flags[tid - 1], __ATOMIC_ACQUIRE) < 2) {  }// 等待上一个线程把假定的(assum)行偏移填充完
        while (tid > 0 && __atomic_load_n( & thread_flags[  0    ], __ATOMIC_ACQUIRE) < 3) {  }// 等待0号线程开辟好内存空间
        for (idx_t ci = tbeg; ci < tend; ci++) {
            const idx_t max_nnz = assum_rpt[ci + 1] - assum_rpt[ci];
            idx_t * _cids = assum_cids + assum_rpt[ci];
            data_t* _vals = assum_vals + assum_rpt[ci];

            idx_t dst_off = 0;
            for (idx_t p = Rstr_rp[ci]; p < Rstr_rp[ci + 1]; p++) {// 遍历每一个可能限制而来的细行
                const idx_t fi = Rstr_ci[p];// 细行行号
                const idx_t fnnz = fine_mat.row_ptr[fi + 1] - fine_mat.row_ptr[fi];
                const idx_t src_off = fine_mat.row_ptr[fi]; 
                for (idx_t j = 0; j < fnnz; j++) {
                    _cids[dst_off + j] = col_map_f2c [fine_mat.col_idx[src_off + j]];// 列序号从细映射为粗
                    _vals[dst_off + j] = fine_mat.vals[src_off + j];
                }
                dst_off += fnnz;
            } assert(dst_off == max_nnz);
            // 检查并合并列号相同的非零元
            bubble_sort<idx_t, data_t>(max_nnz, _cids, _vals);// 先进行冒泡排序
            idx_t uniq_cnt = uniquify<idx_t, data_t>(max_nnz, _cids, _vals);// 然后将重复的元素进行合并

            tl_rpt[ci - tbeg + 1] = tl_rpt[ci - tbeg] + uniq_cnt;
            coar_mat->row_ptr[ci + 1] = uniq_cnt;
            rstr_coeff[ci] = alpha / (Rstr_rp[ci + 1] - Rstr_rp[ci]);
        }
        // 至此已经完成了中间结果的计算

        while (tid > 0 && __atomic_load_n( & thread_flags[tid - 1], __ATOMIC_ACQUIRE) < 4) {  }// 等待上一个线程完成
        prev_tsum = (tid > 0) ? thread_shared[tid - 1] : 0;
        thread_shared[tid] = prev_tsum + tl_rpt[tend - tbeg];// 本线程内的求和存入多线程共享区
        __atomic_store_n( & thread_flags[tid], 4, __ATOMIC_RELEASE);

        for (idx_t ci = tbeg; ci < tend; ci++) {// 填充本线程负责范围内的粗矩阵的行偏移
            coar_mat->row_ptr[ci + 1] = prev_tsum + tl_rpt[ci - tbeg + 1];
        }
        __atomic_store_n( & thread_flags[tid], 5, __ATOMIC_RELEASE);
        delete [] tl_rpt; tl_rpt = nullptr;
        // 由0号线程负责开辟粗矩阵的内存空间
        if (tid == 0) {
            while (__atomic_load_n( & thread_flags[nt - 1], __ATOMIC_ACQUIRE) < 5) {  }// 等待最后一个线程填充完
            coar_mat->nnz = coar_mat->row_ptr[num_coar];
            coar_mat->col_idx = new idx_t [coar_mat->nnz];
            coar_mat->vals    = new data_t[coar_mat->nnz];
            __atomic_store_n( & thread_flags[tid], 6, __ATOMIC_RELEASE);
        }

        while (tid > 0 && __atomic_load_n( & thread_flags[tid - 1], __ATOMIC_ACQUIRE) < 5) {  }// 等待上一个线程把粗矩阵的行偏移填充完
        while (tid > 0 && __atomic_load_n( & thread_flags[  0    ], __ATOMIC_ACQUIRE) < 6) {  }// 等待0号线程开辟好内存空间
        // 逐行组装粗矩阵的列索引和非零元
        for (idx_t ci = tbeg; ci < tend; ci++) {
            const idx_t dst_off = coar_mat->row_ptr[ci],
                        src_off = assum_rpt[ci];
            const idx_t uniq_cnt = coar_mat->row_ptr[ci + 1] - dst_off;
            for (idx_t j = 0; j < uniq_cnt; j++) {
                idx_t tmp = assum_cids[src_off + j];
                coar_mat->col_idx[dst_off + j] = tmp;
                coar_mat->vals   [dst_off + j] = assum_vals[src_off + j] * rstr_coeff[ci];
            }
        }
    }// implicit sync

    delete [] assum_rpt ; assum_rpt  = nullptr;
    delete [] assum_cids; assum_cids = nullptr;
    delete [] assum_vals; assum_vals = nullptr;
    delete [] rstr_coeff; rstr_coeff = nullptr;
}

/// @brief 计算粗网格矩阵的函数，注意这里的输入和输出都是并行CSR矩阵
/// @tparam idx_t 索引类型
/// @tparam data_t 存储数据的浮点类型
/// @tparam calc_t 执行计算时的浮点类型
/// @param fine_mat 细层网格的矩阵
/// @param num_coar 本进程在粗层的粗点数目，它同时也是生成的粗网格矩阵的行数
/// @param rstr_rpt 限制矩阵的行偏移
/// @param rstr_cid 限制矩阵的列索引
/// @param loc_map_f2c 插值矩阵
/// @param alpha 通过AC=R*AF*P计算粗网格矩阵AC时，一般设为1.0即可
/// @return 粗网格矩阵
template<typename idx_t, typename data_t, typename calc_t>
MyParCSRMatrix<idx_t, data_t, calc_t> * calc_coar_mat(const MyParCSRMatrix<idx_t, data_t, calc_t> & fine_mat,
    const idx_t num_coar, const idx_t * rstr_rpt, const idx_t * rstr_cid, const idx_t * loc_map_f2c, const calc_t alpha)
{
    int my_pid; MPI_Comm_rank(fine_mat.comm, &my_pid);
    int nprocs; MPI_Comm_size(fine_mat.comm, &nprocs);
    const MyCSRMatrix<idx_t, data_t, calc_t> & fine_diag = *(fine_mat.diag);// 细层矩阵的对角部分
    MyParCSRMatrix<idx_t, data_t, calc_t> * coar_mat = new MyParCSRMatrix<idx_t, data_t, calc_t>(fine_mat.comm);// 粗网格矩阵
    // 计算粗网格的进程划分
    idx_t * & rows_partition = coar_mat->rows_partition;
    rows_partition = new idx_t [nprocs + 1];
    rows_partition[0] = 0;
    constexpr MPI_Datatype mpi_idx_type = (sizeof(idx_t) == 4) ? MPI_INT : MPI_LONG_LONG;
    MPI_Allgather(&num_coar, 1, mpi_idx_type, rows_partition + 1, 1, mpi_idx_type, fine_mat.comm);
    for (int p = 1; p <= nprocs; p++) {
        rows_partition[p] += rows_partition[p - 1];
    }
    coar_mat->beg_row   = rows_partition[my_pid    ];
    coar_mat->end_row   = rows_partition[my_pid + 1]; assert(coar_mat->end_row - coar_mat->beg_row == num_coar);
    coar_mat->glb_nrows = rows_partition[nprocs];
    // 对角和非对角部分的矩阵值可以分别计算
    // 先计算对角部分
    aggregate_mat<idx_t, data_t, calc_t>(fine_diag, num_coar, num_coar, rstr_rpt, rstr_cid, loc_map_f2c, coar_mat->diag, alpha);
    // 再计算非对角部分
    const MyCSRMatrix<idx_t, data_t, calc_t> & fine_offd = *(fine_mat.offd);// 细层矩阵的非对角部分
    idx_t * remote_map_f2c = new idx_t [fine_offd.ncols];// 记录原细层矩阵的非对角部分的各列在粗网格中映射到的列号
    {
        // 每个进程告诉细网格上的邻居，
        // 对方在细网格上的halo区所对应的（由我负责的）列号在粗网格中的全局列号
        static_assert(sizeof(idx_t) <= sizeof(calc_t));
        
        for (idx_t s = 0; s < fine_mat.num_sends; s++) {
            const idx_t offset = fine_mat.send_cnts[s],
                        num = fine_mat.send_cnts[s + 1] - offset;
            idx_t * buf = ( idx_t *)(fine_mat.send_buf     + offset);// 发送缓冲区
            const idx_t * glb_rows = fine_mat.send_row_ids + offset;
            #pragma omp parallel for schedule(static)
            for (idx_t k = 0; k < num; k++) {
                const idx_t loc_fid = glb_rows[k] - fine_mat.beg_row,// 局部的细点序号
                            loc_cid = loc_map_f2c[loc_fid];// 注意这里是局部的粗点序号
                buf[k] = loc_cid + coar_mat->beg_row;// 需要将局部的粗点序号转换成全局的粗点序号
            }
            MPI_Isend(buf, num, mpi_idx_type, fine_mat.send_pids[s], 55, fine_mat.comm, fine_mat.send_reqs + s);
        }
        for (idx_t r = 0; r < fine_mat.num_recvs; r++) {
            const idx_t offset = fine_mat.recv_cnts[r],
                        num = fine_mat.recv_cnts[r + 1] - offset;
            idx_t * buf = (idx_t *)(remote_map_f2c + fine_mat.recv_cnts[r]);
            MPI_Irecv(buf, num, mpi_idx_type, fine_mat.recv_pids[r], 55, fine_mat.comm, fine_mat.recv_reqs + r);
        }
        MPI_Waitall(fine_mat.num_sends, fine_mat.send_reqs, MPI_STATUSES_IGNORE);
        MPI_Waitall(fine_mat.num_recvs, fine_mat.recv_reqs, MPI_STATUSES_IGNORE);
        // 此时接收缓冲区中的是细矩阵非对角部分原来的局部列号在粗矩阵中的全局列号
        // 然后构造粗矩阵的col_map_offd (即局部列序号到全局列序号的映射)
        // 但可能原来的2列才对应粗矩阵中的一列，所以需要消重

        const idx_t num = fine_offd.ncols;
        std::unordered_map<idx_t, idx_t> aux_map;
        // 用于记录原细网格非对角部分的局部列号到粗网格非对角部分的局部列号的映射
        idx_t * arr = new idx_t [num];
        #pragma omp parallel for schedule(static)
        for (idx_t k = 0; k < num; k++)
            arr[k] = remote_map_f2c[k];
        // 对remote_map排序
        qsort(arr, num, sizeof(idx_t), cmpfunc<idx_t>);
        // 合并重复元素后可得到粗网格的非对角部分一共有多少列
        const idx_t uniq_cnt = uniquify<idx_t, data_t>(num, arr);

        // 构造coar_mat的非对角部分的列映射
        assert(coar_mat->col_map_offd == nullptr);
        coar_mat->col_map_offd = new idx_t [uniq_cnt];
        for (idx_t j = 0; j < uniq_cnt; j++) {
            coar_mat->col_map_offd[j] = arr[j];
            aux_map.emplace(arr[j], j);// 不可多线程化
        }
        // 构造非对角部分粗化所需的f2c，并储存在arr中
        #pragma omp parallel for schedule(static) // STL允许多个线程同时读
        for (idx_t f_loc = 0; f_loc < num; f_loc++) {
            const idx_t c_glb = remote_map_f2c[f_loc];
            assert(aux_map.find(c_glb) != aux_map.end());
            const idx_t c_loc = aux_map[c_glb];
            arr[f_loc] = c_loc;
        }
        // 有了
        aggregate_mat<idx_t, data_t, calc_t>(fine_offd, num_coar, uniq_cnt, rstr_rpt, rstr_cid, arr, coar_mat->offd, alpha);
        delete [] arr; arr = nullptr;
    }
    // 至此粗网格矩阵的除通信元数据外的其它数据都已全部就绪
    // 下一步：建立通信元数据
    {
        const idx_t num_sends = fine_mat.num_sends,
                    num_recvs = fine_mat.num_recvs;
        // 在不发生跨进程粗化时，聚合型的AMG应该邻居进程数不变
        // 建立发送侧的元数据
        {
            coar_mat->num_sends = num_sends;
            coar_mat->send_pids = new int   [num_sends];
            coar_mat->send_cnts = new idx_t [num_sends + 1];
            idx_t * write_buf = new idx_t [fine_mat.send_cnts[num_sends]];// 按最大可能的需求开辟
            // 根据细矩阵的元数据修改得到粗矩阵的
            coar_mat->send_cnts[0] = 0;
            #pragma omp parallel for schedule(static)
            for (idx_t s = 0; s < num_sends; s ++) {
                coar_mat->send_pids[s] = fine_mat.send_pids[s];// 原来的进程号保持不变
                const idx_t offset = fine_mat.send_cnts[s];
                const idx_t * f_glbs = fine_mat.send_row_ids + offset;
                idx_t * dst_buf = write_buf + offset;

                const idx_t f_num = fine_mat.send_cnts[s + 1] - offset;
                for (idx_t fk = 0; fk < f_num; fk++) {
                    const idx_t f_loc = f_glbs[fk] - fine_mat.beg_row,// 原细网格中的局部序号
                                c_loc = loc_map_f2c[f_loc];// 粗网格中的局部序号
                    dst_buf[fk] = c_loc + coar_mat->beg_row;// 转换成粗网格中的全局序号
                }
                qsort(dst_buf, f_num, sizeof(idx_t), cmpfunc<idx_t>);
                const idx_t uniq_cnt = uniquify<idx_t, data_t>(f_num, dst_buf);// 原细网格中的一些消息目标需要合并
                coar_mat->send_cnts[s + 1] = uniq_cnt;
            }
            
            for (idx_t s = 0; s < num_sends; s ++)// 单线程前缀和
                coar_mat->send_cnts[s + 1] += coar_mat->send_cnts[s];
            coar_mat->send_row_ids = new idx_t [coar_mat->send_cnts[num_sends]];
            
            #pragma omp parallel for schedule(static)
            for (idx_t s = 0; s < num_sends; s ++) {
                const idx_t f_offset = fine_mat.send_cnts[s],
                            c_offset = coar_mat->send_cnts[s];
                const idx_t c_num = coar_mat->send_cnts[s + 1] - c_offset;
                for (idx_t j = 0; j < c_num; j++)
                    coar_mat->send_row_ids[c_offset + j] = write_buf[f_offset + j];
            }
            delete [] write_buf; write_buf = nullptr;
        }
        // 建立接受侧的元数据
        {
            coar_mat->num_recvs = num_recvs;
            coar_mat->recv_pids = new int   [num_recvs];
            coar_mat->recv_cnts = new idx_t [num_recvs + 1];
            // 根据细矩阵的元数据修改得到粗矩阵的
            coar_mat->recv_cnts[0] = 0;
            assert(fine_offd.ncols == fine_mat.recv_cnts[num_recvs]);
            #pragma omp parallel for schedule(static)
            for (idx_t r = 0; r < num_recvs; r++) {
                coar_mat->recv_pids[r] = fine_mat.recv_pids[r];// 原来的进程号保持不变
                const idx_t offset = fine_mat.recv_cnts[r];
                const idx_t * c_glbs = remote_map_f2c + offset;// 粗点的全局序号
                const idx_t f_num = fine_mat.recv_cnts[r + 1] - offset;
                idx_t _poss[f_num];
                for (idx_t j = 0; j < f_num; j++)
                    _poss[j] = c_glbs[j];
                qsort(_poss, f_num, sizeof(idx_t), cmpfunc<idx_t>);
                const idx_t uniq_cnt = uniquify<idx_t, data_t>(f_num, _poss);// 原细网格中的一些消息源需要合并
                coar_mat->recv_cnts[r + 1] = uniq_cnt;
            }
            for (idx_t r = 0; r < num_recvs; r++)
                coar_mat->recv_cnts[r + 1] += coar_mat->recv_cnts[r];
        }
        // 开辟通信句柄和缓冲区的内存空间
        coar_mat->send_reqs = new MPI_Request [num_sends];
        coar_mat->recv_reqs = new MPI_Request [num_recvs];
        coar_mat->send_buf = new calc_t [coar_mat->send_cnts[num_sends]];
    }
    delete [] remote_map_f2c; remote_map_f2c = nullptr;
    return coar_mat;
}

#endif