#ifndef UnstructMG_PARCSR_MAT_OP
#define UnstructMG_PARCSR_MAT_OP

#include "par_CSRMatrix.hpp"
#include "csr_matop.hpp"

template<typename idx_t,    typename data_t1, typename calc_t1,
                            typename data_t2, typename calc_t2, int dof>
void par_CSRMatrixTranspose(par_CSRMatrix<idx_t, data_t1, calc_t1, dof> & src,
                            par_CSRMatrix<idx_t, data_t2, calc_t2, dof> & dst,
                            bool need2trans_diag=true)
{
    assert(src.commpkg);
    constexpr MPI_Datatype mpi_idx_type = (sizeof(idx_t) == 8) ? MPI_LONG_LONG : MPI_INT;
    constexpr int e_size = dof*dof;

    dst.comm           = src.comm;
    dst.rows_partition = src.cols_partition;
    dst.cols_partition = src.rows_partition;
    dst.calc_ranges();

    if (need2trans_diag) {
        // 对角部分直接转置
        CSRMatrixTranspose(src.diag, dst.diag);
    }

    CSRMatrix<idx_t, data_t2, calc_t2, dof> src_offd_trans;
    CSRMatrixTranspose(src.offd, src_offd_trans);

    // int my_pid; MPI_Comm_rank(src.commpkg->comm, & my_pid);
    // printf("Proc %d transpose src_offd_trans %d %d %d src.commpkg ns %d nr %d\n",
    //     my_pid, src_offd_trans.nrows, src_offd_trans.ncols, src_offd_trans.nnz,
    //     src.commpkg->num_sends, src.commpkg->num_recvs);

    {// 构造转置后的通信包
        dst.owns_commpkg = true;
        dst.commpkg = new par_CSRCommPkg<idx_t>;
        // 发送和接收的消息数反转
        dst.commpkg->comm       = src.commpkg->comm;
        dst.commpkg->num_recvs  = src.commpkg->num_sends;
        dst.commpkg->num_sends  = src.commpkg->num_recvs;
        dst.commpkg->recv_reqs  = new MPI_Request [dst.commpkg->num_recvs];
        dst.commpkg->send_reqs  = new MPI_Request [dst.commpkg->num_sends];
        // 发送和接收的对象反转
        dst.commpkg->recv_pids = new int [dst.commpkg->num_recvs];
        dst.commpkg->send_pids = new int [dst.commpkg->num_sends];
        for (int p = 0; p < dst.commpkg->num_recvs; p++)
            dst.commpkg->recv_pids[p] = src.commpkg->send_pids[p];
        for (int p = 0; p < dst.commpkg->num_sends; p++)
            dst.commpkg->send_pids[p] = src.commpkg->recv_pids[p];
        // 发送的位置和个数可以推理得到
        dst.commpkg->send_map_starts = new idx_t [dst.commpkg->num_sends + 1];
        dst.commpkg->send_map_starts[0] = 0;
        std::vector<idx_t> buf;
        idx_t * send_nnzs = new idx_t [dst.commpkg->num_sends];
        idx_t * recv_nnzs = new idx_t [dst.commpkg->num_recvs];
        std::vector<COO<idx_t, data_t2, dof> > * rec_cid_nz = new std::vector<COO<idx_t, data_t2, dof> > [dst.commpkg->num_sends];
        std::vector<MPI_Request> send_reqs;
        
        for (int s = 0; s < dst.commpkg->num_sends; s ++) {
            rec_cid_nz[s].clear();
            send_nnzs[s] = 0;
            std::set<idx_t> uniquified_cids;
            for (idx_t  recv_cid = src.commpkg->recv_vec_starts[s];// 遍历转置前要从该进程recv的局部列序
                        recv_cid < src.commpkg->recv_vec_starts[s + 1]; recv_cid ++) {
                const idx_t send_rid = recv_cid;// 即为转置后的行号
                for (idx_t k = src_offd_trans.row_ptr[send_rid]; k < src_offd_trans.row_ptr[send_rid + 1]; k ++) {
                    COO<idx_t, data_t2, dof> tmp;
                    tmp.r = src.col_map_offd[recv_cid];// global row id
                    tmp.c = dst.beg_col + src_offd_trans.col_idx[k];// global colum id
                    for (int f = 0; f < e_size; f++)
                        tmp.v[f] = src_offd_trans.vals[k * e_size + f];

                    rec_cid_nz[s].emplace_back(tmp);
                    uniquified_cids.emplace(src_offd_trans.col_idx[k]);// local colum id
                }
                send_nnzs[s] += (src_offd_trans.row_ptr[send_rid + 1] - src_offd_trans.row_ptr[send_rid]);
            }
            for (auto it = uniquified_cids.begin(); it != uniquified_cids.end(); it ++) {
                buf.push_back(*it);
            }
            dst.commpkg->send_map_starts[s + 1] = buf.size();

            MPI_Request req_num, req_dat;
            MPI_Isend(& send_nnzs[s], 1, mpi_idx_type,
                dst.commpkg->send_pids[s], 501, dst.commpkg->comm, & req_num);

            assert(send_nnzs[s] == (idx_t) rec_cid_nz[s].size());
            MPI_Isend(rec_cid_nz[s].data(), rec_cid_nz[s].size() * sizeof(COO<idx_t, data_t2, dof>), MPI_BYTE,
                dst.commpkg->send_pids[s], 502, dst.commpkg->comm, & req_dat);
            send_reqs.emplace_back(req_num);
            send_reqs.emplace_back(req_dat);
        }
        dst.commpkg->send_map_elmts  = new idx_t [buf.size()];
        for (size_t j = 0; j < buf.size(); j++)
            dst.commpkg->send_map_elmts[j] = buf[j];
        // 但接收的个数和位置尚未确定

        for (idx_t r = 0; r < dst.commpkg->num_recvs; r ++) {
            MPI_Irecv(& recv_nnzs[r], 1, mpi_idx_type,
                dst.commpkg->recv_pids[r], 501, dst.commpkg->comm, & dst.commpkg->recv_reqs[r]);
        }
        MPI_Waitall(dst.commpkg->num_recvs, dst.commpkg->recv_reqs, MPI_STATUSES_IGNORE);
        
        idx_t dst_offd_nnz = 0;
        for (idx_t r = 0; r < dst.commpkg->num_recvs; r ++) {
            dst_offd_nnz += recv_nnzs[r];
        }

        
        // printf("Proc %d transpose %d\n", my_pid, dst_offd_nnz);

        dst.offd.nrows = src.diag.ncols;
        dst.offd.ncols = -1;// 待确定
        dst.offd.nnz   = dst_offd_nnz;
        dst.offd.alloc_mem();// 已经初始化rpt为0
        dst.offd.row_ptr[0] = 0;

        COO<idx_t, data_t2, dof> * recv_buf = new COO<idx_t, data_t2, dof> [dst.offd.nnz];
        for (idx_t r = 0, cnt = 0; r < dst.commpkg->num_recvs; r ++) {
            MPI_Irecv(recv_buf + cnt, recv_nnzs[r] * sizeof(COO<idx_t, data_t2, dof>), MPI_BYTE,
                dst.commpkg->recv_pids[r], 502, dst.commpkg->comm, & dst.commpkg->recv_reqs[r]);
            cnt += recv_nnzs[r];
        }
        MPI_Waitall(dst.commpkg->num_recvs, dst.commpkg->recv_reqs, MPI_STATUSES_IGNORE);

        std::sort(recv_buf, recv_buf + dst.offd.nnz);

        std::unordered_map<idx_t, idx_t> glb2loc;// 建立转置后的列映射
        for (idx_t j = 0; j < dst.offd.nnz; j++) {
            const idx_t glb_r = recv_buf[j].r,
                        glb_c = recv_buf[j].c;
            assert(dst.beg_row <= glb_r && glb_r < dst.end_row);

            dst.offd.row_ptr[glb_r - dst.beg_row + 1] ++;
            dst.offd.col_idx[j] = glb_c;// 暂存
            for (int f = 0; f < e_size; f++)
                dst.offd.vals[j * e_size + f] = recv_buf[j].v[f];
            
            glb2loc.emplace(glb_c, -1);
        }

        for (idx_t i = 0; i < dst.offd.nrows; i++) {
            dst.offd.row_ptr[i + 1] += dst.offd.row_ptr[i];
        }
        assert(dst.offd.row_ptr[dst.offd.nrows] == dst_offd_nnz);

        dst.offd.ncols = glb2loc.size();
        dst.owns_map_offd = true;
        dst.col_map_offd = new idx_t [dst.offd.ncols];
        idx_t cnt = 0;
        for (auto it = glb2loc.begin(); it != glb2loc.end(); it ++) {
            dst.col_map_offd[cnt ++] = it->first;
        }
        assert(cnt == dst.offd.ncols);
        std::sort(dst.col_map_offd, dst.col_map_offd + cnt);
        for (idx_t loc_j = 0; loc_j < dst.offd.ncols; loc_j++) {
            const idx_t glb_j = dst.col_map_offd[loc_j];
            glb2loc[glb_j] = loc_j;
        }
        for (idx_t j = 0; j < dst.offd.nnz; j++) {
            const idx_t glb_c = dst.offd.col_idx[j];
            dst.offd.col_idx[j] = glb2loc[glb_c];
        }

        // 根据重排后的列需要确定接收位置
        dst.commpkg->recv_vec_starts = new idx_t [dst.commpkg->num_recvs + 1];
        dst.commpkg->recv_vec_starts[0] = 0;
        cnt = 0;
        for (idx_t r = 0; r < dst.commpkg->num_recvs; r ++) {
            const int pid = dst.commpkg->recv_pids[r];
            const idx_t cbeg = dst.cols_partition[pid], cend = dst.cols_partition[pid + 1];
            assert(cnt == dst.offd.ncols || cbeg <= dst.col_map_offd[cnt]);
            while (cnt < dst.offd.ncols && dst.col_map_offd[cnt] < cend) cnt ++;

            dst.commpkg->recv_vec_starts[r + 1] = cnt;
        }
        assert(cnt == dst.offd.ncols);

        MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);
        delete [] send_nnzs; delete [] recv_nnzs;
        delete [] rec_cid_nz;
        delete [] recv_buf;
    }
}

template<typename idx_t,    typename data_t, typename calc_t, int dof1, int dof2>
void par_CSRMatrixExtractBExt(  const par_CSRMatrix<idx_t, data_t, calc_t, dof1> & tmpl,
                                const par_CSRMatrix<idx_t, data_t, calc_t, dof2> & src, 
                                CSRMatrix<idx_t, data_t, calc_t, dof2> & dst,// 注意这里是CSR矩阵
                                bool want_data)
{
    // constexpr MPI_Datatype mpi_idx_type = (sizeof(idx_t) == 8) ? MPI_LONG_LONG : MPI_INT;
    constexpr int e_size = dof2*dof2;
    const par_CSRCommPkg<idx_t> * commpkg = tmpl.commpkg;
    const idx_t offd_ncols = tmpl.offd.ncols;
    if (commpkg == nullptr) { assert(offd_ncols == 0);
        dst.nrows = 0;
        dst.ncols = 0;
        dst.nnz   = 0;
        dst.alloc_mem();
        return ;
    }

    const idx_t num_sends = commpkg->num_sends,
                num_recvs = commpkg->num_recvs;
    const idx_t nrows_send = commpkg->send_map_starts[num_sends],
                nrows_recv = commpkg->recv_vec_starts[num_recvs];
    assert(offd_ncols == nrows_recv);

    idx_t * send_nnzs = new idx_t [nrows_send    ];// 记录发送的每行有多少个非零元
    idx_t * recv_nnzs = new idx_t [nrows_recv + 1];// 记录接收的每行有多少个非零元
    idx_t tot_send_nnz = 0;
    for (idx_t sr = 0; sr < nrows_send; sr ++) {
        const idx_t i = commpkg->send_map_elmts[sr];// 要发送的本进程的哪一行
        send_nnzs[sr] = src.diag.row_ptr[i + 1] - src.diag.row_ptr[i]
                    +   src.offd.row_ptr[i + 1] - src.offd.row_ptr[i];
        tot_send_nnz += send_nnzs[sr];
    }
    commpkg->init_update_halo(1, send_nnzs, recv_nnzs + 1);

    // prepare data to send out
    idx_t * send_rpts = new idx_t [nrows_send + 1]; send_rpts[0] = 0;
    idx_t * send_cids = new idx_t [tot_send_nnz];
    data_t* send_vals = new data_t[tot_send_nnz * e_size];
    for (idx_t i = 0; i < nrows_send; i++) {
        send_rpts[i + 1] = send_rpts[i] + send_nnzs[i];
    }
    assert(send_rpts[nrows_send] == tot_send_nnz);

    idx_t * send_rpts_procs = new idx_t [num_sends + 1];// pointers to each proc in send_cids
    for (idx_t s = 0; s < num_sends + 1; s ++) {
        send_rpts_procs[s] = send_rpts[ commpkg->send_map_starts[s] ];
    }
    assert(send_rpts_procs[num_sends] == tot_send_nnz);

    // fill the CSR matrix: cid and val
    #pragma omp parallel for schedule(static)
    for (idx_t sr = 0; sr < nrows_send; sr ++) {
        const idx_t i = commpkg->send_map_elmts[sr];// 要发送的本进程的哪一行
        idx_t offset = send_rpts[sr];// 写入位置的偏移
        for (idx_t p = src.diag.row_ptr[i]; p < src.diag.row_ptr[i + 1]; p++) {
            send_cids[offset] = src.beg_col + src.diag.col_idx[p];
            for (int f = 0; f < e_size; f++)
                send_vals[offset * e_size + f] = src.diag.vals[p * e_size + f];
            offset ++;
        }
        for (idx_t p = src.offd.row_ptr[i]; p < src.offd.row_ptr[i + 1]; p++) {
            send_cids[offset] = src.col_map_offd[src.offd.col_idx[p]];
            for (int f = 0; f < e_size; f++)
                send_vals[offset * e_size + f] = src.offd.vals[p * e_size + f];
            offset ++;
        }
        assert(offset == send_rpts[sr + 1]);
    }

    // overlap comm.
    commpkg->wait_update_halo();
    // adjust recv_nnzs to ptr
    recv_nnzs[0] = 0;
    for (idx_t ri = 0; ri < nrows_recv; ri ++) {
        recv_nnzs[ri + 1] += recv_nnzs[ri]; 
    }
    const idx_t tot_recv_nnz = recv_nnzs[nrows_recv];
    
    idx_t * recv_cids = new idx_t [tot_recv_nnz];
    data_t* recv_vals = new data_t[tot_recv_nnz * e_size];
    idx_t * recv_rpts_procs = new idx_t [num_recvs + 1]; recv_rpts_procs[0] = 0;
    for (idx_t r = 1; r <= num_recvs; r++) {
        const idx_t j = commpkg->recv_vec_starts[r];
        recv_rpts_procs[r] = recv_nnzs[j];
    }

    std::vector<MPI_Request> send_reqs, recv_reqs;
    for (idx_t s = 0; s < num_sends; s ++) {
        MPI_Request req_cid, req_val;
        MPI_Isend(send_cids + send_rpts_procs[s]         , (send_rpts_procs[s + 1] - send_rpts_procs[s]) * sizeof(idx_t), MPI_BYTE,
            commpkg->send_pids[s], 1031, commpkg->comm, & req_cid);
        MPI_Isend(send_vals + send_rpts_procs[s] * e_size, (send_rpts_procs[s + 1] - send_rpts_procs[s]) * e_size * sizeof(data_t), MPI_BYTE,
            commpkg->send_pids[s], 1032, commpkg->comm, & req_val);
        send_reqs.emplace_back(req_cid);
        send_reqs.emplace_back(req_val);
    }
    for (idx_t r = 0; r < num_recvs; r ++) {
        MPI_Request req_cid, req_val;
        MPI_Irecv(recv_cids + recv_rpts_procs[r]         , (recv_rpts_procs[r + 1] - recv_rpts_procs[r]) * sizeof(idx_t), MPI_BYTE,
            commpkg->recv_pids[r], 1031, commpkg->comm, & req_cid);
        MPI_Irecv(recv_vals + recv_rpts_procs[r] * e_size, (recv_rpts_procs[r + 1] - recv_rpts_procs[r]) * e_size * sizeof(data_t), MPI_BYTE,
            commpkg->recv_pids[r], 1032, commpkg->comm, & req_val);
        recv_reqs.emplace_back(req_cid);
        recv_reqs.emplace_back(req_val);
    }

    // wait comm.
    dst.nrows = nrows_recv;
    dst.ncols = src.glb_ncols;
    dst.nnz   = tot_recv_nnz;
    dst.row_ptr = recv_nnzs;
    dst.col_idx = recv_cids;
    dst.vals    = recv_vals;

    delete [] send_nnzs;
    delete [] send_rpts;

    MPI_Waitall(recv_reqs.size(), recv_reqs.data(), MPI_STATUSES_IGNORE);
    MPI_Waitall(send_reqs.size(), send_reqs.data(), MPI_STATUSES_IGNORE);

    delete [] send_rpts_procs;
    delete [] send_cids; delete [] send_vals;
    delete [] recv_rpts_procs;
}


template<typename idx_t,    typename data_t, typename calc_t, int dof1, int dof2, int dof3>
void par_CSRMatrixMultiply( const par_CSRMatrix<idx_t, data_t, calc_t, dof1> & A,
                            const par_CSRMatrix<idx_t, data_t, calc_t, dof2> & B,
                            par_CSRMatrix<idx_t, data_t, calc_t, dof3> & C)
{
    static_assert(dof1 == dof2 || dof1 == 1 || dof2 == 1);
    static_assert(dof3 == (MAX(dof1, dof2)));
    assert(A.glb_ncols  == B.glb_nrows);// 全局尺寸要匹配
    assert(A.diag.ncols == B.diag.nrows);// 局部尺寸也要匹配

    // Extract B_ext, i.e. portion of B that is stored on neighbor procs
    // and needed locally for matrix matrix product
    CSRMatrix<idx_t, data_t, calc_t, dof2> Bs_ext, Bext_diag, Bext_offd;
    par_CSRMatrixExtractBExt(A, B, Bs_ext, true);
#ifdef DEBUG
    int my_pid; MPI_Comm_rank(A.comm, & my_pid);
    int num_procs; MPI_Comm_size(A.comm, & num_procs);
    for (int p = 0; p < num_procs; p++) {
        if (my_pid == p) {
            printf("Proc %d Be_ext nrows %d ncols %d nnz %d\n", my_pid, Bs_ext.nrows, Bs_ext.ncols, Bs_ext.nnz);
            for (idx_t i = 0; i < Bs_ext.nrows; i++) {
                printf("  %d : ", i);
                for (idx_t p = Bs_ext.row_ptr[i]; p < Bs_ext.row_ptr[i + 1]; p++)
                    printf("%d %.3f ", Bs_ext.col_idx[p], Bs_ext.vals[p]);
                printf("\n");
            }
        }
        MPI_Barrier(A.comm);
    }
    MPI_Barrier(A.comm);
#endif
    idx_t & C_offd_ncols = C.offd.ncols;
    C_offd_ncols = 0;
    C.owns_map_offd = true; assert(C.col_map_offd == nullptr);
    CSRMatrixSplit(Bs_ext, B.beg_col, B.end_col, B.offd.ncols, B.col_map_offd,
        C_offd_ncols, C.col_map_offd, Bext_diag, Bext_offd);
#ifdef DEBUG
    for (int p = 0; p < num_procs; p++) {
        if (my_pid == p) {
            printf("Proc %d Bext_diag nrows %d ncols %d nnz %d C_offd_ncols %d\n",
                my_pid, Bext_diag.nrows, Bext_diag.ncols, Bext_diag.nnz, C_offd_ncols);
            for (idx_t i = 0; i < Bext_diag.nrows; i++) {
                printf("  %d : ", i);
                for (idx_t p = Bext_diag.row_ptr[i]; p < Bext_diag.row_ptr[i + 1]; p++)
                    printf("%d %.3f ", Bext_diag.col_idx[p], Bext_diag.vals[p]);
                printf("\n");
            }
            printf("Proc %d Bext_offd nrows %d ncols %d nnz %d C_offd_ncols %d\n",
                my_pid, Bext_offd.nrows, Bext_offd.ncols, Bext_offd.nnz, C_offd_ncols);
            for (idx_t i = 0; i < Bext_offd.nrows; i++) {
                printf("  %d : ", i);
                for (idx_t p = Bext_offd.row_ptr[i]; p < Bext_offd.row_ptr[i + 1]; p++)
                    printf("%d %.3f ", Bext_offd.col_idx[p], Bext_offd.vals[p]);
                printf("\n");
            }
        }
        MPI_Barrier(A.comm);
    }
#endif
    CSRMatrix<idx_t, data_t, calc_t, dof3> AB_diag, AB_offd, ABext_diag, ABext_offd;
    // These are local and could be overlapped with communication
    CSRMatrixMultiply(A.diag, B.diag, AB_diag);
    CSRMatrixMultiply(A.diag, B.offd, AB_offd);
    // These require data from other processes
    CSRMatrixMultiply(A.offd, Bext_diag, ABext_diag);
    CSRMatrixMultiply(A.offd, Bext_offd, ABext_offd);

    if (B.offd.ncols > 0) {
        idx_t * map_B2C = new idx_t [B.offd.ncols], cnt = 0;
        for (idx_t i = 0; i < C_offd_ncols; i ++) {
            if (C.col_map_offd[i] == B.col_map_offd[cnt]) {
                map_B2C[cnt ++] = i;
                if (cnt == B.offd.ncols) { break; }
            }
        }

        for (idx_t p = 0; p < AB_offd.nnz; p ++) {// 转换列号以便加法
            AB_offd.col_idx[p] = map_B2C[AB_offd.col_idx[p]];
        }
        delete [] map_B2C; map_B2C = nullptr;
    } else {
        assert(B.offd.ncols == 0);
        assert(AB_offd.nnz == 0);
    }

    AB_diag   .ncols = B.diag.ncols;
    ABext_diag.ncols = B.diag.ncols;
    AB_offd   .ncols = C_offd_ncols;
    ABext_offd.ncols = C_offd_ncols;

    CSRMatrixAdd<idx_t, data_t, calc_t, dof3>(1.0, AB_diag, 1.0, ABext_diag, C.diag);
    CSRMatrixAdd<idx_t, data_t, calc_t, dof3>(1.0, AB_offd, 1.0, ABext_offd, C.offd);

    // 建立结果矩阵的通信包
    C.owns_commpkg = true;
    C.commpkg = new par_CSRCommPkg<idx_t>;
    C.commpkg->comm = C.comm;
    C.commpkg->prepare(C_offd_ncols, C.col_map_offd, C.cols_partition);
}


template<typename idx_t,    typename data_t1, typename calc_t1,
                            typename data_t2, typename calc_t2, int dof>
void par_CSRMatrixTransfer( par_CSRMatrix<idx_t, data_t1, calc_t1, dof> * src_mat,
                            par_CSRMatrix<idx_t, data_t2, calc_t2, dof> * & dst_mat)
{
    assert(dst_mat == nullptr);
    if (src_mat == nullptr) return ;
    dst_mat = new par_CSRMatrix<idx_t, data_t2, calc_t2, dof>(src_mat->comm, src_mat->rows_partition, src_mat->cols_partition);
    dst_mat->owns_row_partits = src_mat->owns_row_partits; src_mat->owns_row_partits = false;
    dst_mat->owns_col_partits = src_mat->owns_col_partits; src_mat->owns_col_partits = false;
    
    dst_mat->col_map_offd = src_mat->col_map_offd;
    dst_mat->owns_map_offd = src_mat->owns_map_offd; src_mat->owns_map_offd = false;

    dst_mat->commpkg = src_mat->commpkg;
    dst_mat->owns_commpkg = src_mat->owns_commpkg; src_mat->owns_commpkg = false;
    dst_mat->commpkg_transpose = src_mat->commpkg_transpose;
    dst_mat->owns_commpkg_transpose = src_mat->owns_commpkg_transpose; src_mat->owns_commpkg_transpose = false;

    dst_mat->diag.nrows   = src_mat->diag.nrows;
    dst_mat->diag.ncols   = src_mat->diag.ncols;
    dst_mat->diag.nnz     = src_mat->diag.nnz;
    dst_mat->diag.row_ptr = src_mat->diag.row_ptr; src_mat->diag.row_ptr = nullptr;
    dst_mat->diag.col_idx = src_mat->diag.col_idx; src_mat->diag.col_idx = nullptr;
    dst_mat->diag.vals = new data_t2 [dst_mat->diag.nnz * dof*dof];

    dst_mat->offd.nrows   = src_mat->offd.nrows;
    dst_mat->offd.ncols   = src_mat->offd.ncols;
    dst_mat->offd.nnz     = src_mat->offd.nnz;
    dst_mat->offd.row_ptr = src_mat->offd.row_ptr; src_mat->offd.row_ptr = nullptr;
    dst_mat->offd.col_idx = src_mat->offd.col_idx; src_mat->offd.col_idx = nullptr;
    dst_mat->offd.vals = new data_t2 [dst_mat->offd.nnz * dof*dof];
    #pragma omp parallel
    {
        #pragma omp for schedule(static) nowait
        for (idx_t i = 0; i < dst_mat->diag.nnz * dof*dof; i ++)
            dst_mat->diag.vals[i] = src_mat->diag.vals[i];
        
        #pragma omp for schedule(static)
        for (idx_t i = 0; i < dst_mat->offd.nnz * dof*dof; i ++)
            dst_mat->offd.vals[i] = src_mat->offd.vals[i];
    }
}

#endif