#include "par_CSRMatrix.hpp"
#include <unordered_map>

template<typename idx_t, typename data_t, typename calc_t, int dof>
par_CSRMatrix<idx_t, data_t, calc_t, dof>::par_CSRMatrix(MPI_Comm _comm) : comm(_comm)
{
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
par_CSRMatrix<idx_t, data_t, calc_t, dof>::par_CSRMatrix(MPI_Comm _comm,
    const idx_t _glb_nrows, const idx_t _beg_row, const idx_t _end_row,
    const idx_t _glb_ncols, const idx_t _beg_col, const idx_t _end_col)
    : comm(_comm),  glb_nrows(_glb_nrows), beg_row(_beg_row), end_row(_end_row),
                    glb_ncols(_glb_ncols), beg_col(_beg_col), end_col(_end_col)
{
    int num_procs; MPI_Comm_size(comm, & num_procs);

    owns_row_partits = owns_col_partits = true;
    rows_partition = new idx_t [num_procs + 1];
    cols_partition = new idx_t [num_procs + 1];

    idx_t my_row_col[2] = { beg_row, beg_col };
    idx_t all_row_cols[2 * num_procs];
    constexpr MPI_Datatype etype = (sizeof(idx_t) == 4) ? MPI_INT : MPI_LONG_LONG;
    MPI_Allgather(my_row_col, 2, etype, all_row_cols, 2, etype, comm);

    for (int p = 0; p < num_procs; p++) {
        rows_partition[p] = all_row_cols[p * 2 + 0];
        cols_partition[p] = all_row_cols[p * 2 + 1];
    }
    rows_partition[num_procs] = glb_nrows;
    cols_partition[num_procs] = glb_ncols;
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
par_CSRMatrix<idx_t, data_t, calc_t, dof>::par_CSRMatrix(MPI_Comm _comm,
    idx_t * _rows_partition, idx_t * _cols_partition)
    : comm(_comm), rows_partition(_rows_partition), cols_partition(_cols_partition)
{
    calc_ranges();
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
par_CSRMatrix<idx_t, data_t, calc_t, dof>::~par_CSRMatrix()
{
    if (owns_row_partits)       { delete [] rows_partition   ; rows_partition    = nullptr; }
    if (owns_col_partits)       { delete [] cols_partition   ; cols_partition    = nullptr; }
    if (owns_map_offd)          { delete [] col_map_offd     ; col_map_offd      = nullptr; }
    if (owns_commpkg)           { delete    commpkg          ; commpkg           = nullptr; }
    if (owns_commpkg_transpose) { delete    commpkg_transpose; commpkg_transpose = nullptr; }
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::prepare_commpkg(
    std::unordered_map<idx_t, std::pair<int, idx_t> > & glb2pidloc)
{
    assert(commpkg == nullptr);
    owns_commpkg = true;
    commpkg = new par_CSRCommPkg<idx_t>;
    commpkg->comm = comm;

    const idx_t offd_ncols = glb2pidloc.size();
    assert(col_map_offd == nullptr);
    owns_map_offd = true;
    col_map_offd = new idx_t [offd_ncols];
    
    idx_t rp = 0;
    for (auto it = glb2pidloc.begin(); it != glb2pidloc.end(); it++)
        col_map_offd[rp ++] = it->first;
    // 升序排列
    std::sort(col_map_offd, col_map_offd + offd_ncols);

    for (idx_t lid = 0; lid < offd_ncols; lid++) {
        idx_t gid = col_map_offd[lid];// global column id
        assert(glb2pidloc.find(gid) != glb2pidloc.end());
        glb2pidloc[gid].second = lid;
    }

    commpkg->prepare(offd_ncols, col_map_offd, cols_partition);

    // int my_pid; MPI_Comm_rank(comm, & my_pid);
    // int num_procs; MPI_Comm_size(comm, & num_procs);
    // for (int p = 0; p < num_procs; p ++) {
    //     if (my_pid == p) {
    //         // printf("Proc %d Poffd_g2l\n", my_pid);
    //         // for (auto it = Poffd_g2l.begin(); it != Poffd_g2l.end(); it ++) {
    //         //     printf("  P :: %d => %d\n", it->first, it->second.second);
    //         // }
    //         printf("commpkg ns %d nr %d\n", commpkg->num_sends, commpkg->num_recvs);
    //         printf("  send_map_starts : ");
    //         for (idx_t s = 0; s <= commpkg->num_sends; s ++) printf(" %d", commpkg->send_map_starts[s]);
    //         printf("\n  recv_vec_starts : ");
    //         for (idx_t r = 0; r <= commpkg->num_recvs; r ++) printf(" %d", commpkg->recv_vec_starts[r]);
    //         printf("\n");
    //     }
    //     MPI_Barrier(comm);
    // }
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::set_values_distributed(
    const int * dist_row_ptr, const idx_t * dist_col_idx, const data_t * dist_vals, const bool squeeze_zero)
{
    long long * i64_rpt = new long long [end_row - beg_row + 1];
    for (idx_t i = 0; i < end_row - beg_row + 1; i ++)
        i64_rpt[i] = dist_row_ptr[i];

    set_values_distributed(i64_rpt, dist_col_idx, dist_vals, squeeze_zero);
    delete [] i64_rpt;
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::set_values_distributed(
    const long long * dist_row_ptr, const idx_t * dist_col_idx, const data_t * dist_vals, const bool squeeze_zero)
{
    std::unordered_map<idx_t, std::pair<int, idx_t> > gc2pidlc;// glb_col => (pid, loc_col)
    const idx_t loc_nrows = end_row - beg_row;

    constexpr int e_size = dof*dof;
    idx_t diag_nnz = 0, offd_nnz = 0;
    idx_t offd_ncols = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        for (long long  p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                        p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            if (squeeze_zero) {
                bool all_zero = true;
                const data_t * vpt = dist_vals + p * e_size;
                for (int f = 0; f < e_size; f++) all_zero = all_zero && (vpt[f] == 0.0);
                if (all_zero) continue;// 挤掉数值0的元素
            }
            // if (squeeze_zero == true && dist_vals[p] == 0.0) continue;

            idx_t glb_j = dist_col_idx[p]; assert(glb_j < glb_ncols);
            if (glb_j >= beg_col && glb_j < end_col) {
                diag_nnz ++;
            } else {// 属于非对角块
                offd_nnz ++;
                if (gc2pidlc.find(glb_j) == gc2pidlc.end()) {// 之前没发现过这一列
                    offd_ncols ++;
                    std::pair<int, idx_t> tmp(-1, -1);
                    gc2pidlc.emplace(glb_j, tmp);// 记录
                }
            }
        }
    }
    assert(offd_ncols == (idx_t) gc2pidlc.size());

    diag.nrows = loc_nrows; diag.ncols = end_col - beg_col; diag.nnz = diag_nnz; diag.alloc_mem(false);
    offd.nrows = loc_nrows; offd.ncols = offd_ncols       ; offd.nnz = offd_nnz; offd.alloc_mem(false);

    // 建立通信拓扑
    prepare_commpkg(gc2pidlc);
    
    // 填充本进程的对角和非对角部分矩阵数据
    idx_t ptr_diag = 0, ptr_offd = 0;
    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {// 遍历本进程负责的每一行
        diag.row_ptr[loc_i] = ptr_diag;
        offd.row_ptr[loc_i] = ptr_offd;

        for (auto   p = dist_row_ptr[loc_i  ] - dist_row_ptr[0]; 
                    p < dist_row_ptr[loc_i+1] - dist_row_ptr[0]; p++) {// 遍历该行的所有非零元
            if (squeeze_zero) {
                bool all_zero = true;
                const data_t * vpt = dist_vals + p * e_size;
                for (int f = 0; f < e_size; f++) all_zero = all_zero && (vpt[f] == 0.0);
                if (all_zero) continue;// 挤掉数值0的元素
            }
            // if (squeeze_zero == true && dist_vals[p] == 0.0) continue;// 挤掉数值0的元素

            idx_t glb_j = dist_col_idx[p]; assert(glb_j < glb_ncols);
            if (glb_j >= beg_col && glb_j < end_col) {// 落在本进程负责范围内
                diag.col_idx[ptr_diag] = glb_j - beg_col;
                for (int f = 0; f < e_size; f++) {// diag.vals   [ptr_diag] = dist_vals[p];
                    diag.vals[ptr_diag * e_size + f] = dist_vals[p * e_size + f];
                }
                ptr_diag ++;
            } else {// 否则查看它对应的局部序号
                assert(gc2pidlc.find(glb_j) != gc2pidlc.end());
                idx_t dst = gc2pidlc[glb_j].second;
                offd.col_idx[ptr_offd] = dst;
                for (int f = 0; f < e_size; f++) {// offd.vals   [ptr_offd] = dist_vals[p];
                    offd.vals[ptr_offd * e_size + f] = dist_vals[p * e_size + f];
                }
                ptr_offd ++;
            }
        }// 处理完一行

        // 按列序号升序排列
        sort_indices_with_vals<idx_t, data_t, dof>(ptr_diag - diag.row_ptr[loc_i],
            diag.col_idx + diag.row_ptr[loc_i],
            diag.vals    + diag.row_ptr[loc_i] * e_size);
        sort_indices_with_vals<idx_t, data_t, dof>(ptr_offd - offd.row_ptr[loc_i],
            offd.col_idx + offd.row_ptr[loc_i],
            offd.vals    + offd.row_ptr[loc_i] * e_size);
    }
    assert(ptr_diag == diag_nnz);
    assert(ptr_offd == offd_nnz);
    diag.row_ptr[loc_nrows] = ptr_diag;
    offd.row_ptr[loc_nrows] = ptr_offd;
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::set_values_hypre(const HYPRE_ParCSRMatrix hypre_mat)
// void par_CSRMatrix<idx_t, data_t, calc_t, dof>::set_values_hypre(const void * hypre_ptr)
{
    assert(dof == 1);
    // const HYPRE_ParCSRMatrix hypre_mat = *((const HYPRE_ParCSRMatrix *) hypre_ptr);
    // check size matched
    assert(hypre_mat->global_num_rows == glb_nrows);
    assert(hypre_mat->global_num_cols == glb_ncols);
    assert(hypre_mat->first_row_index == beg_row);
    assert(hypre_mat->last_row_index  == end_row - 1);
    assert(hypre_mat->first_col_diag  == beg_col);
    assert(hypre_mat->last_col_diag   == end_col - 1);
    int my_pid; MPI_Comm_rank(comm, & my_pid);
    assert(rows_partition[my_pid] == beg_row);
    assert(cols_partition[my_pid] == beg_col);

    const hypre_CSRMatrix * hypre_diag = hypre_mat->diag, * hypre_offd = hypre_mat->offd;

    const idx_t offd_ncols = hypre_offd->num_cols;
    diag.nrows = hypre_diag->num_rows; diag.ncols = hypre_diag->num_cols; diag.nnz = hypre_diag->num_nonzeros; diag.alloc_mem(false);
    offd.nrows = hypre_offd->num_rows; offd.ncols = offd_ncols          ; offd.nnz = hypre_offd->num_nonzeros; offd.alloc_mem(false);

    owns_map_offd = true;
    col_map_offd = new idx_t [offd_ncols];

    // 拷贝两个CSR矩阵数据和非对角列映射
    const idx_t loc_nrows = end_row - beg_row;
    #pragma omp parallel
    {
        #pragma omp for schedule(static) nowait
        for (idx_t i = 0; i <= loc_nrows; i++) {
            diag.row_ptr[i] = hypre_diag->i[i];
            offd.row_ptr[i] = hypre_offd->i[i];
        }

        #pragma omp for schedule(static) nowait
        for (idx_t j = 0; j < diag.nnz; j++) {
            diag.col_idx[j] = hypre_diag->j   [j];
            diag.vals   [j] = hypre_diag->data[j];
        }

        #pragma omp for schedule(static) nowait
        for (idx_t j = 0; j < offd.nnz; j++) {
            offd.col_idx[j] = hypre_offd->j   [j];
            offd.vals   [j] = hypre_offd->data[j];
        }

        #pragma omp for schedule(static) nowait
        for (idx_t j = 0; j < offd_ncols; j++) {
            col_map_offd[j] = hypre_mat->col_map_offd[j];
        }
    }

    for (idx_t loc_i = 0; loc_i < loc_nrows; loc_i++) {
        // 按列序号升序排列
        sort_indices_with_vals<idx_t, data_t, dof>(diag.row_ptr[loc_i + 1] - diag.row_ptr[loc_i],
            diag.col_idx + diag.row_ptr[loc_i],
            diag.vals    + diag.row_ptr[loc_i]);
        sort_indices_with_vals<idx_t, data_t, dof>(offd.row_ptr[loc_i + 1] - offd.row_ptr[loc_i],
            offd.col_idx + offd.row_ptr[loc_i],
            offd.vals    + offd.row_ptr[loc_i]);
    }

    // 拷贝通信包
    owns_commpkg = true;
    commpkg = new par_CSRCommPkg<idx_t>;
    commpkg->copy_hypre(*(hypre_mat->comm_pkg));
    if (hypre_mat->comm_pkgT) {
        owns_commpkg_transpose = true;
        commpkg_transpose = new par_CSRCommPkg<idx_t>;
        commpkg_transpose->copy_hypre(*(hypre_mat->comm_pkgT));
    }
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::Mult(  calc_t alpha, const par_Vector<idx_t, calc_t, dof> & x,
                calc_t beta , const par_Vector<idx_t, calc_t, dof> & b, par_Vector<idx_t, calc_t, dof> & y) const
{
    assert(glb_nrows == y.glb_nrows && beg_row == y.beg_row && end_row == y.end_row);
    assert(glb_ncols == x.glb_nrows && beg_col == x.beg_row && end_col == x.end_row);

    assert(commpkg);

    // int my_pid; MPI_Comm_rank(comm, & my_pid);
    // int num_procs; MPI_Comm_size(comm, & num_procs);
    // printf("Proc %d diag %d %d %d offd %d %d %d\n", my_pid, diag.nrows, diag.ncols, diag.nnz, offd.nrows, offd.ncols, offd.nnz);

    calc_t * offd_x = new calc_t [offd.ncols * dof];
    calc_t * send_buf = new calc_t [commpkg->send_map_starts[commpkg->num_sends] * dof];

    commpkg->init_update_halo(dof, x.local_vector->data, send_buf, offd_x);
    diag.Mult(alpha, x.local_vector->data,         beta, b.local_vector->data, y.local_vector->data);

    // double ck_dot = vec_dot<idx_t, calc_t, double, dof>(y, y);
    
    // if (my_pid == 0) printf("After diag %.15e\n", ck_dot);

    commpkg->wait_update_halo();
    offd.Mult(alpha,         offd_x      , (calc_t) 1.0, y.local_vector->data, y.local_vector->data);

    // for (int p = 0; p < num_procs; p++) {
    //     if (p == my_pid) {
    //         printf("Proc %d offd_ncols %d dof %d\n", my_pid, offd.ncols, dof);
    //         for (int i = 0; i < offd.ncols * dof; i++) printf("  %d %.5e\n", i, offd_x[i]);
    //     }
    //     MPI_Barrier(MPI_COMM_WORLD);
    // }
    // offd.Mult(alpha,         offd_x      , (calc_t) 0.0, y.local_vector->data, y.local_vector->data);

    // ck_dot = vec_dot<idx_t, calc_t, double, dof>(y, y);
    // if (my_pid == 0) printf("After offd %.15e\n", ck_dot);

    delete [] offd_x;
    delete [] send_buf;
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::Mult(const par_Vector<idx_t, calc_t, dof> & x, 
                    par_Vector<idx_t, calc_t, dof> & y, bool use_zero_guess) const
{
    this->Mult(1.0, x, 0.0, y, y);
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::truncate()
{
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
void par_CSRMatrix<idx_t, data_t, calc_t, dof>::calc_ranges()
{
    int my_pid; MPI_Comm_rank(comm, & my_pid);
    int num_procs; MPI_Comm_size(comm, & num_procs);

    glb_nrows = rows_partition[num_procs];
    beg_row   = rows_partition[my_pid    ];
    end_row   = rows_partition[my_pid + 1];

    glb_ncols = cols_partition[num_procs];
    beg_col   = cols_partition[my_pid    ];
    end_col   = cols_partition[my_pid + 1];
}

template<typename idx_t, typename data_t, typename calc_t, int dof>
calc_t par_CSRMatrix<idx_t, data_t, calc_t, dof>::power_iteration(idx_t max_iter, const Operator<idx_t, calc_t, dof> * prec) const
{
    assert(glb_nrows == glb_ncols);
    int my_pid; MPI_Comm_rank(comm, & my_pid);
    par_Vector<idx_t, calc_t, dof> x(comm, glb_nrows, beg_row, end_row);
    par_Vector<idx_t, calc_t, dof> tmp(x), y(x);
    // 初始化随机向量
    for (idx_t i = 0; i < (end_row - beg_row)*dof; i++) {
        y.local_vector->data[i] = (calc_t) rand() / (calc_t) RAND_MAX;
    }
    double lambda = 0.0;
    for (idx_t i = 0; i < max_iter; i ++) {
        double y_nrm2 = vec_dot<idx_t, calc_t, double, dof>(y, y);
        y_nrm2 = 1.0 / sqrt(y_nrm2);
        vec_mul_by_scalar<idx_t, calc_t, calc_t, dof>(y_nrm2, y, x);// x <- y/||y|| 归一化
        this->Mult(x, y, false);// y <- A*x
        if (prec != nullptr) {// 有预条件
            tmp.set_val(0.0);
            prec->Mult(y, tmp, true);
            vec_copy(tmp, y);
        } else {
            // nothing to do
        }

        lambda = vec_dot<idx_t, calc_t, double, dof>(x, y);
        if (my_pid == 0) printf("Round %3d lambda %.10e\n", i, lambda);
    }
    return lambda;
}


template class par_CSRMatrix<int, float , float , 1>;
template class par_CSRMatrix<int, float , float , 2>;
template class par_CSRMatrix<int, float , float , 3>;
template class par_CSRMatrix<int, float , float , 4>;

template class par_CSRMatrix<int, float , double, 1>;
template class par_CSRMatrix<int, float , double, 2>;
template class par_CSRMatrix<int, float , double, 3>;
template class par_CSRMatrix<int, float , double, 4>;

template class par_CSRMatrix<int, double, double, 1>;
template class par_CSRMatrix<int, double, double, 2>;
template class par_CSRMatrix<int, double, double, 3>;
template class par_CSRMatrix<int, double, double, 4>;
#ifdef USE_FP16
template class par_CSRMatrix<int, __fp16, float , 1>;
template class par_CSRMatrix<int, __fp16, float , 2>;
template class par_CSRMatrix<int, __fp16, float , 3>;
template class par_CSRMatrix<int, __fp16, float , 4>;

template class par_CSRMatrix<int, __fp16, double, 1>;
template class par_CSRMatrix<int, __fp16, double, 2>;
template class par_CSRMatrix<int, __fp16, double, 3>;
template class par_CSRMatrix<int, __fp16, double, 4>;
#endif

// template class par_CSRMatrix<int, long long, float , float , 1>;
// template class par_CSRMatrix<int, long long, float , float , 2>;
// template class par_CSRMatrix<int, long long, float , float , 3>;
// template class par_CSRMatrix<int, long long, float , float , 4>;

// template class par_CSRMatrix<int, long long, float , double, 1>;
// template class par_CSRMatrix<int, long long, float , double, 2>;
// template class par_CSRMatrix<int, long long, float , double, 3>;
// template class par_CSRMatrix<int, long long, float , double, 4>;

// template class par_CSRMatrix<int, long long, double, double, 1>;
// template class par_CSRMatrix<int, long long, double, double, 2>;
// template class par_CSRMatrix<int, long long, double, double, 3>;
// template class par_CSRMatrix<int, long long, double, double, 4>;
// #ifdef USE_FP16
// template class par_CSRMatrix<int, long long, __fp16, float , 1>;
// template class par_CSRMatrix<int, long long, __fp16, float , 2>;
// template class par_CSRMatrix<int, long long, __fp16, float , 3>;
// template class par_CSRMatrix<int, long long, __fp16, float , 4>;

// template class par_CSRMatrix<int, long long, __fp16, double, 1>;
// template class par_CSRMatrix<int, long long, __fp16, double, 2>;
// template class par_CSRMatrix<int, long long, __fp16, double, 3>;
// template class par_CSRMatrix<int, long long, __fp16, double, 4>;
// #endif




