#include "GCRSolver.hpp"

template<typename idx_t, typename pc_data_t, typename pc_calc_t, typename ksp_t, int dof>
GCRSolver<idx_t, pc_data_t, pc_calc_t, ksp_t, dof>::GCRSolver(): IterativeSolver<idx_t, pc_data_t, pc_calc_t, ksp_t, dof>()
{
}

template<typename idx_t, typename pc_data_t, typename pc_calc_t, typename ksp_t, int dof>
void GCRSolver<idx_t, pc_data_t, pc_calc_t, ksp_t, dof>::SetInnerIterMax(int num)
{
    inner_iter_max = num;
}

template<typename idx_t, typename pc_data_t, typename pc_calc_t, typename ksp_t, int dof>
void GCRSolver<idx_t, pc_data_t, pc_calc_t, ksp_t, dof>::Mult_ipl(const par_Vector<idx_t, ksp_t, dof> & b, par_Vector<idx_t, ksp_t, dof> & x) const 
{
    assert(this->oper != nullptr);
    assert(this->inner_iter_max > 0);
    
    MPI_Comm comm = x.comm;
    int my_pid; MPI_Comm_rank(comm, &my_pid);
// #ifndef NDEBUG
    if (my_pid == 0) printf("GCR with inner_iter_max = %d\n", inner_iter_max);
// #endif
    double * record = this->part_times;
    memset(record, 0.0, sizeof(ksp_t) * NUM_KRYLOV_RECORD);
    // 初始化辅助向量
    std::allocator<par_Vector<idx_t, ksp_t, dof> > alloc;
    par_Vector<idx_t, ksp_t, dof> * p = alloc.allocate(inner_iter_max), 
                                   *ap = alloc.allocate(inner_iter_max);

    for (int i = 0; i < inner_iter_max; i++) {
        alloc.construct( p + i, x);
        alloc.construct(ap + i, x);
        p [i].set_val(0.0);// 不需要全部清零，只需要对halo区清零
        ap[i].set_val(0.0);
    }

    par_Vector<idx_t, ksp_t, dof> r(x), ar(x), gcr(x);
    r.set_val(0.0);
    ar.set_val(0.0);
    gcr.set_val(0.0);

    ksp_t b_dot = this->Dot(b, b);

    // 初始化辅助标量
    ksp_t res = 0.0;
    ksp_t aps[inner_iter_max], 
            c1[inner_iter_max], c2[inner_iter_max], beta[inner_iter_max];

    par_Vector<idx_t, pc_calc_t, dof> * pc_buf_b = nullptr, * pc_buf_x = nullptr;
    if constexpr (sizeof(pc_calc_t) != sizeof(ksp_t)) {
        pc_buf_b = new par_Vector<idx_t, pc_calc_t, dof>(b.comm, b.glb_nrows, b.beg_row, b.end_row);
        pc_buf_x = new par_Vector<idx_t, pc_calc_t, dof>(*pc_buf_b);
        pc_buf_b->set_val(0.0);
        pc_buf_x->set_val(0.0);
    }

    // 开始计算
    if (this->zero_guess) {// x 此时为0故可以直接拷贝
        vec_copy(b, r);
        res = b_dot;
    } else {
        record[OPER] -= wall_time();
        this->oper->Mult(x, r, false);
        record[OPER] += wall_time();
        vec_add(b, (ksp_t) -1.0, r, r);
        res = this->Dot(r, r);
    }

    if (my_pid == 0) printf("begin of gcr %20.16e\n", (double)res);

    // 在这里做预条件，r => M^{-1}*r，然后用M^{-1}*r去跟矩阵A乘
    if (this->prec) {
        bool zero_guess = true;
        if constexpr (sizeof(pc_calc_t) != sizeof(ksp_t)) {
            VecTransPrec(r, *pc_buf_b);// copy in
            if (zero_guess) pc_buf_x->set_val(0.0);
            else
                VecTransPrec(gcr, *pc_buf_x); // copy in
            record[PREC] -= wall_time();
            this->prec->Mult(*pc_buf_b, *pc_buf_x, zero_guess);
            record[PREC] += wall_time();
            VecTransPrec(*pc_buf_x, gcr);
        } else {
            if (zero_guess) gcr.set_val(0.0);
            record[PREC] -= wall_time();
            this->prec->Mult(r, gcr, zero_guess);
            record[PREC] += wall_time();
        }
    } else {
        vec_copy(r, gcr);
    }
    this->oper->Mult(gcr, ar, false);// ar = A*r

    // p[0] = r, ap[0] = ar
    vec_copy(gcr,  p[0]);
    vec_copy( ar, ap[0]);

    // 主循环
    int mm = 0;
    for ( ; mm < this->max_iter; mm++) {
        int m = mm % (inner_iter_max - 1);
        // TODO: save one copy
        if (m == 0 && mm > 0) {// 内迭代归零时，开始下一轮内迭代，拷贝上一轮的最后一个值
            vec_copy(  p[inner_iter_max - 1],  p[0] );
            vec_copy( ap[inner_iter_max - 1], ap[0] );
        }

        // TODO: kernel fusion
        if constexpr (sizeof(ksp_t) < 8) {
            c1[0] = c1[1] = 0.0;
            c1[0] += vec_dot_intra_proc<idx_t, ksp_t, double, dof>(    r, ap[m]);
            c1[1] += vec_dot_intra_proc<idx_t, ksp_t, double, dof>(ap[m], ap[m]);
        } else {
            c1[0] = c1[1] = 0.0;
            c1[0] += vec_dot_intra_proc<idx_t, ksp_t, ksp_t, dof>(    r, ap[m]);
            c1[1] += vec_dot_intra_proc<idx_t, ksp_t, ksp_t, dof>(ap[m], ap[m]);
        }
#ifdef __x86_64
        if constexpr (std::is_same<ksp_t, __float128>::value)
            MPI_Allreduce(c1, c2, 2, MPI_LONG_DOUBLE, MPIU_SUM_FP128, comm);
        else
#endif
            MPI_Allreduce(c1, c2, 2, sizeof(ksp_t) == 16 ? MPI_LONG_DOUBLE : (sizeof(ksp_t) == 8 ? MPI_DOUBLE : MPI_FLOAT), MPI_SUM, comm);
        ksp_t ac = c2[0] / c2[1];
        aps[m]    = c2[1];

        vec_add(x,  ac,  p[m], x);// 更新解向量
        vec_add(r, -ac, ap[m], r);// 更新残差向量
#ifdef MONITOR
        {// Monitor Real residual
            par_Vector<idx_t, ksp_t, dof> tmp(b);
            this->oper->Mult(x, tmp, false);
            vec_add(b, (ksp_t) -1.0, tmp, tmp);
            double true_r_norm = this->Norm(tmp);
            if (my_pid == 0) printf("\ttrue ||r|| = %.10e\n", true_r_norm);
        }
#endif
        ksp_t loc_err, glb_err;
        if constexpr (sizeof(ksp_t) < 8) {
            loc_err = 0.0;
            loc_err += vec_dot_intra_proc<idx_t, ksp_t, double, dof>(r, r);// 判敛的全局通信可以放到后面的和c1、c2一起通
        } else {
            loc_err = 0.0;
            loc_err += vec_dot_intra_proc<idx_t, ksp_t, ksp_t , dof>(r, r);// 判敛的全局通信可以放到后面的和c1、c2一起通
        }
        // 这里做预条件，r => M^{-1}*r，然后用M^{-1}*r去跟矩阵A乘，得到ar
        if (this->prec) {
            bool zero_guess = true;
            if constexpr (sizeof(pc_calc_t) != sizeof(ksp_t)) {
                VecTransPrec(r, *pc_buf_b);
                if (zero_guess) pc_buf_x->set_val(0.0);
                else
                    VecTransPrec(gcr, *pc_buf_x);// copy in
                record[PREC] -= wall_time();
                this->prec->Mult(*pc_buf_b, *pc_buf_x, zero_guess);
                record[PREC] += wall_time();
                VecTransPrec(*pc_buf_x, gcr);
            } else {
                if (zero_guess) gcr.set_val(0.0);
                record[PREC] -= wall_time();
                this->prec->Mult(r, gcr, zero_guess);
                record[PREC] += wall_time();
            }
        } else {
            vec_copy(r, gcr);
        }
        record[OPER] -= wall_time();
        this->oper->Mult(gcr, ar, false);
        record[OPER] += wall_time();
        // TODO: kernel fusion
        for (int l = 0; l <= m; l++) {
            c1[l] = 0.0;
            if constexpr (sizeof(ksp_t) < 8)
                c1[l] += vec_dot_intra_proc<idx_t, ksp_t, double, dof>(ar, ap[l]);
            else
                c1[l] += vec_dot_intra_proc<idx_t, ksp_t, ksp_t , dof>(ar, ap[l]);
        }
        c1[m + 1] = loc_err;
#ifdef __x86_64__
        if constexpr (std::is_same<ksp_t, __float128>::value)
            MPI_Allreduce(c1, c2, m + 2, MPI_LONG_DOUBLE, MPIU_SUM_FP128, comm);
        else
#endif
            MPI_Allreduce(c1, c2, m + 2, sizeof(ksp_t) == 16 ? MPI_LONG_DOUBLE : (sizeof(ksp_t) == 8 ? MPI_DOUBLE : MPI_FLOAT), MPI_SUM, comm);// 通信更新[0,1,...,m]共计m+1个数
        glb_err = c2[m + 1];
        ksp_t _rel = glb_err / b_dot;

// #ifndef NDEBUG
        if (my_pid == 0) printf("  res of gcr %20.16e at %3d iter (r,r)/(b,b) = %.16e\n", (double)glb_err, mm, (double)_rel);
// #endif
        if (glb_err <= this->abs_tol || _rel <= this->rel_tol || mm == this->max_iter) goto finish;

        // 计算beta并更新下一次内迭代的向量
        for (int l = 0; l <= m; l++)
            beta[l] = - c2[l] / aps[l];
        vec_copy(gcr,  p[m+1]);
        vec_copy( ar, ap[m+1]);
        // TODO: kernel fusion
        for (int l = 0; l <= m; l++) {// 根据[0,1,...,m]共计m+1个向量更新下一次内迭代要使用的序号为m+1的
            vec_add(  p[m+1], beta[l],  p[l],  p[m+1]);
            vec_add( ap[m+1], beta[l], ap[l], ap[m+1]);
        }
    }

finish:
    // 清理数据
    for (int i = 0; i < inner_iter_max; i++) {
        alloc.destroy( p + i);
        alloc.destroy(ap + i);
    }
    alloc.deallocate( p, inner_iter_max);
    alloc.deallocate(ap, inner_iter_max);

    if (pc_buf_x) delete pc_buf_x;
    if (pc_buf_b) delete pc_buf_b;
    this->final_iter = mm;
}

template class GCRSolver<int, double, double, double, 1>;
template class GCRSolver<int, double, double, double, 2>;
template class GCRSolver<int, double, double, double, 3>;
template class GCRSolver<int, double, double, double, 4>;

template class GCRSolver<int, float , double, double, 1>;
template class GCRSolver<int, float , double, double, 2>;
template class GCRSolver<int, float , double, double, 3>;
template class GCRSolver<int, float , double, double, 4>;

template class GCRSolver<int, float , float , double, 1>;
template class GCRSolver<int, float , float , double, 2>;
template class GCRSolver<int, float , float , double, 3>;
template class GCRSolver<int, float , float , double, 4>;

template class GCRSolver<int, float , float , float , 1>;
template class GCRSolver<int, float , float , float , 2>;
template class GCRSolver<int, float , float , float , 3>;
template class GCRSolver<int, float , float , float , 4>;
#ifdef USE_FP16
template class GCRSolver<int, __fp16, float , double, 1>;
template class GCRSolver<int, __fp16, float , double, 2>;
template class GCRSolver<int, __fp16, float , double, 3>;
template class GCRSolver<int, __fp16, float , double, 4>;

template class GCRSolver<int, __fp16, float , float , 1>;
template class GCRSolver<int, __fp16, float , float , 2>;
template class GCRSolver<int, __fp16, float , float , 3>;
template class GCRSolver<int, __fp16, float , float , 4>;
#endif
