//----------------------------------------------------------------------------
// M A R I T I M E  R E S E A R C H  I N S T I T U T E  N E T H E R L A N D S
//----------------------------------------------------------------------------
// Copyright (C) 2011 - MARIN - All rights reserved - http://www.marin.nl
//----------------------------------------------------------------------------
// Program    : mermaid
// Module     : vbm
// File       : CudaRrbSolver.cu
//----------------------------------------------------------------------------

#include <math.h>
#include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
#include "FloatUtils.h"
#include "Timer.h"
#include "assertplus.h"
#include "CudaRrbSolver.h"
#include "CUDAPACKAGE/common/defs.h"
#include "CUDAPACKAGE/common/macros.h"
#include "CUDAPACKAGE/common/misc.h"
#include "CUDAPACKAGE/rrbsolver/prec.h"
#include "CUDAPACKAGE/rrbsolver/solv.h"
#include "CUDAPACKAGE/rrbsolver/matv.h"
#include "CUDAPACKAGE/rrbsolver/dotp.h"
#include "CUDAPACKAGE/rrbsolver/axpy.h"
#include "CUDAPACKAGE/rrbsolver/subs.h"

/// constructor
CudaRrbSolver::CudaRrbSolver(Array2D<REAL> *pcc,
                             Array2D<REAL> *pcs,
                             Array2D<REAL> *pcw,
                             int Nx1,
                             int Nx2,
                             REAL tolerance)
: base(Nx1, Nx2, tolerance)
, m_pcc(pcc)
, m_pcs(pcs)
, m_pcw(pcw)
, m_prec()
, m_orig()
, m_dcc()
, m_dss()
, m_dww()
, m_dX()
, m_dB()
, m_dXr1r2b1b2()
, m_dYr1r2b1b2()
, m_dBr1r2b1b2()
, m_dPr1r2b1b2()
, m_dQr1r2b1b2()
, m_dRr1r2b1b2()
, m_dZr1r2b1b2()
, m_drem()
, m_hrem()
, m_nx()
, m_ny()
, m_cx()
, m_cy()
, m_remSize(0)
, m_numLevels(0)
{
    ASSERT(pcc->rows() == COLUMNS && pcc->columns() == ROWS, 
        "c array size mismatch");
    ASSERT(pcs->rows() == COLUMNS && pcs->columns() == ROWS, 
        "s array size mismatch");
    ASSERT(pcw->rows() == COLUMNS && pcw->columns() == ROWS, 
        "w array size mismatch");

    // Computes the sizes of the grids
    determineGrids(Nx1, Nx2);

    // Sets up all grids
    setup();

    // Constructs the preconditioner
    preconditioner();

} // constructor


/// destructor
CudaRrbSolver::~CudaRrbSolver()
{
    cleanup();
} // destructor


void CudaRrbSolver::cleanup()
{
    // free all memory that was allocated on the device
    common::freeGpuVector<REAL>(m_dX);
    common::freeGpuVector<REAL>(m_dB);
    common::freeGpuVector<REAL>(m_dXr1r2b1b2);
    common::freeGpuVector<REAL>(m_dBr1r2b1b2);
    common::freeGpuVector<REAL>(m_dPr1r2b1b2);
    common::freeGpuVector<REAL>(m_dQr1r2b1b2);
    common::freeGpuVector<REAL>(m_dRr1r2b1b2);

    // march through all levels and free memory
    for (unsigned int k = 1; k <= m_numLevels; ++k) 
    {
        common::freeGpuVector<REAL>(m_prec[k].cc);
        common::freeGpuVector<REAL>(m_prec[k].nn);
        common::freeGpuVector<REAL>(m_prec[k].ee);
        common::freeGpuVector<REAL>(m_prec[k].ss);
        common::freeGpuVector<REAL>(m_prec[k].ww);
        common::freeGpuVector<REAL>(m_dZr1r2b1b2[k]);
    }

    common::freeGpuVector<REAL>(m_orig.cc);
    common::freeGpuVector<REAL>(m_orig.nn);
    common::freeGpuVector<REAL>(m_orig.ee);
    common::freeGpuVector<REAL>(m_orig.ss);
    common::freeGpuVector<REAL>(m_orig.ww);

    common::freeGpuVector<REAL>(m_drem);
    common::freeCpuVector<REAL>(m_hrem);
}

/// computes the dimensions of all levels
void CudaRrbSolver::determineGrids(unsigned int Nx1, unsigned int Nx2)
{
    m_nx[0] = DIM_COMPUTE_BLOCK * (int)ceil((float)Nx2 / DIM_COMPUTE_BLOCK) 
               + BORDER_WIDTH2;

    m_ny[0] = DIM_COMPUTE_BLOCK * (int)ceil((float)Nx1 / DIM_COMPUTE_BLOCK) 
               + BORDER_WIDTH2;

    m_cx[0] = m_nx[0] - BORDER_WIDTH2;
    m_cy[0] = m_ny[0] - BORDER_WIDTH2;

    int k = 1;
    while (m_cx[k-1] > DIM_COMPUTE_BLOCK || m_cy[k-1] > DIM_COMPUTE_BLOCK) 
    {
        m_cx[k] = DIM_COMPUTE_BLOCK * (int)ceil((float)(m_cx[k-1] / 2) 
                   / DIM_COMPUTE_BLOCK);

        m_cy[k] = DIM_COMPUTE_BLOCK * (int)ceil((float)(m_cy[k-1] / 2) 
                   / DIM_COMPUTE_BLOCK);

        m_nx[k] = 2 * m_cx[k] + BORDER_WIDTH + BORDER_WIDTH2;
        m_ny[k] = 2 * m_cy[k] + BORDER_WIDTH + BORDER_WIDTH2;
        ++k;
    }
    m_numLevels = k - 1;
} // determineGrids


void CudaRrbSolver::setup()
{
    m_prec = new Grid[1 + m_numLevels];

    for (unsigned int k = 1; k <= m_numLevels; ++k) 
    {
        m_prec[k].cc = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 1.0);
        m_prec[k].nn = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].ee = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].ss = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].ww = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].ne = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].se = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].sw = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].nw = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
        m_prec[k].nx = m_nx[k];
        m_prec[k].ny = m_ny[k];
        m_prec[k].cx = m_cx[k];
        m_prec[k].cy = m_cy[k];
    }

    m_dX = common::initGpuVector<REAL,INIT>(m_nx[0] * m_ny[0], 0.0);
    m_dB = common::initGpuVector<REAL,INIT>(m_nx[0] * m_ny[0], 0.0);

    // 2x2 vectors on device
    m_dXr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_dBr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_dPr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_dQr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_dRr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_dYr1r2b1b2 = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);

    m_dZr1r2b1b2 = new REAL *[1 + m_numLevels]; // needs full depth
    for (unsigned int k = 1; k <= m_numLevels; ++k)
    {
        m_dZr1r2b1b2[k] = common::initGpuVector<REAL,INIT>(m_nx[k] * m_ny[k], 0.0);
    }

    // initialize original stencils
    m_orig.cc = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 1.0);
    m_orig.nn = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_orig.ee = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_orig.ss = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_orig.ww = common::initGpuVector<REAL,INIT>(m_nx[1] * m_ny[1], 0.0);
    m_orig.nx = m_nx[1];
    m_orig.ny = m_ny[1];
    m_orig.cx = m_cx[1];
    m_orig.cy = m_cy[1];

    // this part is needed for computing dot products:
    m_remSize = (m_cx[1] * m_cy[1]) / (DIM_COMPUTE_BLOCK * DIM_COMPUTE_BLOCK);

    // host vector  
    m_hrem = common::initCpuVector<REAL,1>(m_remSize, 0.0);

    // device vector
    m_drem = common::initGpuVector<REAL,1>(m_remSize, 0.0);

} // setup


/// preconditioner
void CudaRrbSolver::preconditioner()
{
    size_t pitch = m_nx[0] * sizeof(REAL);
    unsigned int dstOffset = (BORDER_WIDTH - 1) * m_nx[0] + (BORDER_WIDTH - 1);
    unsigned int srcOffset = 0;
   
    // center stencil
    m_dcc = common::initGpuVector<REAL,INIT>(m_nx[0] * m_ny[0], 1);
    common::fillGpuVector<REAL>(m_dcc, dstOffset, pitch, m_pcc, srcOffset);
    common::from1to4<REAL>(m_prec[1].cc, m_dcc, m_nx[0], m_ny[0], 
                                 m_prec[1]);
    common::freeGpuVector<REAL>(m_dcc);

    // west and east stencil
    m_dww = common::initGpuVector<REAL,INIT>(m_nx[0] * m_ny[0], 0);
    common::fillGpuVector<REAL>(m_dww, dstOffset, pitch, m_pcs, srcOffset);
    common::from1to4<REAL>(m_prec[1].ww, m_dww, m_nx[0], m_ny[0], 
                                 m_prec[1]);
    common::from1to4<REAL>(m_prec[1].ee, m_dww + 1, m_nx[0], m_ny[0], 
                                 m_prec[1]);
    common::freeGpuVector<REAL>(m_dww);

    // south and north stencil
    m_dss = common::initGpuVector <REAL,INIT>(m_nx[0] * m_ny[0], 0);
    common::fillGpuVector<REAL>(m_dss, dstOffset, pitch, m_pcw, srcOffset);
    common::from1to4<REAL>(m_prec[1].ss, m_dss, m_nx[0], m_ny[0], 
                                 m_prec[1]);
    common::from1to4<REAL>(m_prec[1].nn, m_dss + m_nx[0], m_nx[0], 
                                 m_ny[0], m_prec[1]);
    common::freeGpuVector<REAL>(m_dss);

    // copy the preconditioner stencils into the original stencils
    common::memcpy<REAL>(m_orig.cc, m_prec[1].cc, m_orig.nx * m_orig.ny);
    common::memcpy<REAL>(m_orig.nn, m_prec[1].nn, m_orig.nx * m_orig.ny);
    common::memcpy<REAL>(m_orig.ee, m_prec[1].ee, m_orig.nx * m_orig.ny);
    common::memcpy<REAL>(m_orig.ss, m_prec[1].ss, m_orig.nx * m_orig.ny);
    common::memcpy<REAL>(m_orig.ww, m_prec[1].ww, m_orig.nx * m_orig.ny);

    // forward sweep: from fine (many nodes) to coarse (few nodes)
    for (unsigned int j = 1; j <= m_numLevels; ++j) //m_numLevels; ++j) 
    {
        // for all levels except 1st level
        if (j > 1) 
        {
            // preconditioner phase 1
            rrbsolver::prec_phase1<REAL>(m_prec[j]);
        }

        // preconditioner phase 2
        rrbsolver::prec_phase2<REAL>(m_prec[j]);

        // preconditioner phase 3
        rrbsolver::prec_phase3<REAL>(m_prec[j]);

        // for all level except final level
        if (j < m_numLevels) 
        {
            // 1 to 4 for all stencils:
            common::divideR2in4<REAL>(m_prec[j+1], m_prec[j]);
        }
    }

    // final level
    rrbsolver::prec_final<REAL>(m_prec[m_numLevels]);

    // Backward sweep: from coarse (few nodes) to fine (many nodes)
    for (unsigned int j = m_numLevels-1; j >= 2; --j)
    {
        common::compress4toR2<REAL> (m_prec[j-1], m_prec[j]);
    }

    for (unsigned int j = m_numLevels; j >= 1; --j)
    {
        // divide all stencils by center stencil
        rrbsolver::prec_phase4<REAL>(m_prec[j]);
    }

    // After making the preconditioner, the NE, SE, SW, and NW 
    // stencils can be deleted as they are not used later on
    for (unsigned int k = 1; k <= m_numLevels; ++k) 
    {
        common::freeGpuVector<REAL>(m_prec[k].ne);
        common::freeGpuVector<REAL>(m_prec[k].se);
        common::freeGpuVector<REAL>(m_prec[k].sw);
        common::freeGpuVector<REAL>(m_prec[k].nw);
    }

    // saving 1/center for black nodes in original stencils 
    // which is needed for the matrix-vector product
    rrbsolver::prec_phase5<REAL>(m_orig);
} // preconditioner


/// Conjugated Gradient Method
void CudaRrbSolver::cg()
{
    // elimination of black points
    rrbsolver::subs_forward<REAL>(m_dRr1r2b1b2, m_orig);

    // y = S1 * x, where S1 is the first Schur-complement
    matvec(m_dYr1r2b1b2, m_dXr1r2b1b2); 

    // r = r - y
    axpy(m_dRr1r2b1b2, m_dYr1r2b1b2, 1, -1);

    // solve Mz = r for z
    solver(); 

    // rhoold = <r,z>
    REAL rhoold = dot(m_dZr1r2b1b2[1], m_dRr1r2b1b2);

    // rhonew = rhoold
    REAL rhonew = rhoold;

    // CG parameters
    REAL alpha = 0.0;
    REAL beta  = 0.0;
    REAL sigma = 0.0;

    // needed for the stopping criterium
    REAL rhobew = (rhonew + 1) * FloatUtils::square(m_tolerance);

    // number of CG iterations
    unsigned int iter = 0;

    // here the CG iterations start
    while ((fabs(rhonew) > fabs(rhobew)) && (iter < MAXITER))
    {
        iter++;

        // for the first iteration a special treatment 
        if (iter == 1) 
        {
            // p = z
            common::memcpy<REAL>(m_dPr1r2b1b2, 
                               m_dZr1r2b1b2[1], 
                               m_prec[1].nx * m_prec[1].ny);
        } else 
        {
            // p = z + beta * p
            axpy(m_dPr1r2b1b2, m_dZr1r2b1b2[1], beta, 1);
        }
        
        // q = S1 * p
        matvec(m_dQr1r2b1b2, m_dPr1r2b1b2);

        // sigma = <p,q>
        sigma = dot(m_dPr1r2b1b2, m_dQr1r2b1b2);

        // alpha = rhoold / sigma
        alpha = rhoold / sigma;

        // x = x + alpha * p
        axpy(m_dXr1r2b1b2, m_dPr1r2b1b2, 1,  alpha);

        // r = r - alpha * q
        axpy(m_dRr1r2b1b2, m_dQr1r2b1b2, 1, -alpha);

        // solve Mz = r for z
        solver();

        // rhonew = <r,z>
        rhonew = dot(m_dRr1r2b1b2, m_dZr1r2b1b2[1]);

        // beta = rhonew / rhoold
        beta = rhonew / rhoold;

        // rhoold = rhonew
        rhoold = rhonew;
    }

    // backward substitution (compute black nodes at the hand of 
    // the red nodes)
    rrbsolver::subs_backward<REAL>(m_dXr1r2b1b2, m_dRr1r2b1b2, m_orig);
}


/// standard base-class function for solving S1*psi = b, see "Solver.h"
void CudaRrbSolver::solve(Array2D<REAL> *pB,
                          Array2D<REAL> *pX)
{
    // needed for a special copy, see getGpuVector() and statement below
    size_t pitch = m_nx[0] * sizeof(REAL);
    unsigned int dstOffset = (BORDER_WIDTH - 1) * m_nx[0] + (BORDER_WIDTH - 1);

    // copy vector b from the host to the device
    // note: the vector b is actually embedded in a somewhat larger grid
    // which allows coalesced memory reads
    common::fillGpuVector<REAL>(m_dB, dstOffset, pitch, pB, 0);

    // divide the vector b according to the r1/r2/b1/b2 storage
    common::from1to4<REAL>(m_dRr1r2b1b2, m_dB, 
                          m_nx[0], m_ny[0], m_prec[1]);

    cg();

    // from r1/r2/b1/b2 format back to the original storage format
    common::from4to1<REAL>(m_dX, m_dXr1r2b1b2, 
                           m_nx[0], m_ny[0], m_prec[1]);

    // copy solution x (= psi) from device to host
    common::getGpuVector<REAL>(pX, m_dX, dstOffset, pitch);

} // solve


/// extra function that does not involve memory transactions between host and
/// device (when WeaklyReflective() is on GPU too).
void CudaRrbSolver::cudaSolve(REAL *dbpsi,
                              REAL *dpsi,
                              Array2D<REAL> *pB,
                              Array2D<REAL> *pX)
{
    // divide the vector b according to the r1/r2/b1/b2 storage
    // note: device vector dbpsi is used rather than host vector pB
    common::from1to4<REAL>(m_dRr1r2b1b2, dbpsi, 
                         m_nx[0], m_ny[0], m_prec[1]);

    // run Conjugate Gradients Method
    cg();

    // from r1/r2/b1/b2 format back to the original storage format
    common::from4to1<REAL>(dpsi, m_dXr1r2b1b2, 
                         m_nx[0], m_ny[0], m_prec[1]);
} // cudaSolve 


/// solves system
void CudaRrbSolver::solver()
{
    // forward loop: from fine (many nodes) to coarse (few nodes)
    rrbsolver::solv_phase1v1<REAL>(m_dZr1r2b1b2[2], m_dZr1r2b1b2[1], m_dRr1r2b1b2, 
                           m_prec[2], m_prec[1]);

    for (unsigned int j = 2; j <= m_numLevels; ++j) 
    {
        rrbsolver::solv_phase2<REAL>(m_dZr1r2b1b2[j], m_prec[j]);

        rrbsolver::solv_phase5<REAL>(m_dZr1r2b1b2[j], m_prec[j]);

        if (j < m_numLevels) 
        {
            rrbsolver::solv_phase1v2<REAL>(m_dZr1r2b1b2[j+1], m_dZr1r2b1b2[j], 
                                           m_prec[j+1], m_prec[j]);
        }
        else 
        {
            rrbsolver::solv_phase1v3<REAL>(m_dZr1r2b1b2[j], m_prec[j]);
        }
        rrbsolver::solv_phase6<REAL>(m_dZr1r2b1b2[j], m_prec[j]);
    }

    // final level, coarsest grid (16 x 16 or 32 x 32 nodes)
    rrbsolver::solv_final<REAL>(m_dZr1r2b1b2[m_numLevels], m_prec[m_numLevels]);

    // forward loop: from coarse (few nodes) to fine (many nodes)
    for (unsigned int j = m_numLevels; j >= 2; --j) 
    {
        rrbsolver::solv_phase4<REAL>(m_dZr1r2b1b2[j], m_prec[j]);

        rrbsolver::solv_phase3<REAL>(m_dZr1r2b1b2[j-1], m_dZr1r2b1b2[j], 
                                     m_prec[j-1], m_prec[j]);
    }

    rrbsolver::solv_phase4<REAL>(m_dZr1r2b1b2[1], m_prec[1]);
} // solver


/// matrix vector product: y = S1 * x, where S1 is the first
/// Schur-complement
void CudaRrbSolver::matvec(REAL *dY, REAL *dX)
{
    rrbsolver::matv_phase1<REAL>(dY, dX, m_orig); 

    rrbsolver::matv_phase2<REAL>(dY, dX, m_orig);
} // matvec


/// dot product <y,x>
REAL CudaRrbSolver::dot(REAL *dY, REAL *dX)
{
    REAL val = 0.0;
    REAL yy  = 0.0;
    REAL tt  = 0.0;

    // mass reduction on GPU:
    rrbsolver::dotp_phase1<REAL>(m_hrem, m_drem, dY, dX, m_remSize, m_prec[1]);

    // Kahan summation on CPU:
    REAL c = 0.0;
    for (unsigned int i = 0; i < m_remSize; i++) 
    {
        yy = m_hrem[i] - c;  
        tt = val + yy;      
        c = (tt - val) - yy;  
        val = tt;           
    }

    return val;
} // dot


/// vector update: y = a*y + b*x
void CudaRrbSolver::axpy(REAL *dY, REAL *dX, REAL a, REAL b)
{
    rrbsolver::axpy<REAL>(dY, dX, a, b, m_prec[1]);
} // vector update

