#include "sgPetsc.h"
#include "ArgvKeeper.h"

// The PETSC_FUNCTION_NAME_CXX will be defined only if the '--with-cxx' is enabled.
// However, it seems that the PETSc installed by pacman was built without this
// configuration.
// see https://petsc.org/release/install/install/
// see petscconfig.h

// #define PETSC_FUNCTION_NAME_CXX __func__
// #define PETSC_FUNCTION_NAME_C __func__

// #include "petscksp.h"
// #include "petscpc.h"
// #include "petscsys.h"
#include <petsc.h>
#include <iostream>
// #include "omp.h"

/**
 * KNOWLEDGES ABOUT PETSc.
 *
 * PETSc/TAO is a suite of data structures and routines that provide the building blocks for
 * implementing large-scale application codes on parallel (and serial) computers. PETSc uses the MPI
 * standard for all distributed memory communication.
 *
 * PETSc can be used to provide a “MPI parallel linear solver” in an otherwise sequential or OpenMP
 * parallel code. This approach can provide modest improvements in the application time by utilizing
 * modest numbers of MPI processes. See PCMPI for details on how to utilize the PETSc MPI linear
 * solver server.
 *
 * The PETSc infrastructure creates a foundation for building large-scale applications.
 *
 * Note that PETSc users need not program much message passing directly with MPI, but they must be
 * familiar with the basic concepts of message passing and distributed memory computing.
 *
 * Numerical computing today has multiple levels of parallelism (concurrency).
 * - Low-level, single instruction multiple data (SIMD) parallelism or, somewhat similar,
 *  on-GPU parallelism,
 * - medium-level, multiple instruction multiple data shared memory parallelism (thread parallelism),
 *  and
 * - high-level, distributed memory parallelism.
 *
 * PETSc supports all these parallelism levels, but its strongest support is for MPI-based
 * distributed memory parallelism.
 *
 * Since PETSc uses the message-passing model for parallel programming and employs MPI for all
 * interprocessor communication, the user is free to employ MPI routines as needed throughout an
 * application code. However, by default the user is shielded from many of the details of message
 * passing within PETSc, since these are hidden within parallel objects, such as vectors, matrices,
 * and solvers. In addition, PETSc provides tools such as generalized vector scatters/gathers and
 * distributed arrays to assist in the management of parallel data.
 *
 * The primary diference between the sequential and parallel is each processor forms only its local
 * part of the matrix and vectors in the parallel case.
 *
 * In the GPU programming model that PETSc uses, the GPU memory is distinct from the CPU memory.
 * This means that data that resides on the CPU memory must be copied to the GPU (often, this copy
 * is done automatically by the libraries, and the user does not need to manage it) if one wishes
 * to use the GPU computational power on it. This memory copy is slow compared to the GPU speed;
 * hence, it is crucial to minimize these copies. This often translates to trying to do almost
 * all the computation on the GPU and not constantly switching between computations on the CPU
 * and the GPU on the same data.
 *
 * When using any of the complex numbers versions of PETSc, all vector and matrix elements are
 * treated as complex, even if their imaginary components are zero. Of course,one can elect to
 * use only the real parts of the complex numbers when using the complex versions of the PETSc
 * libraries; however, when working only with real numbers in a code, one should use a version
 * of PETSc for real numbers for best eﬀciency.
 *
 * here are three abstract basic data objects (classes): index sets, IS, vectors, Vec, and
 * matrices, Mat. Plus, a larger number of abstract algorithm objects (classes) starting
 * with: preconditioners, PC, Krylov solvers, KSP, and so forth.
 *
 * We highly recommend most PETSc users work directly with SNES, rather than using PETSc for
 * the linear problem and writing their own nonlinear solver. Similarly, users should use TS
 * rather than rolling their own time integrators.
 *
 * - Vectors
 *
 * Vectors (denoted by Vec) are used to store discrete PDE solutions, right-hand sides for
 * linear systems, etc.Users can create and manipulate entries in vectors directly with a basic,
 * low-level interface or they can use the PETSc DM objects to connect actions on vectors to the
 * type of discretization and grid that they are working with. These higher-level interfaces
 * handle much of the details of the interactions with vectors and hence, are preferred in
 * most situations.
 *
 * PETSc provides many ways to create vectors. The most basic, where the user is responsible
 * for managing the parallel distribution of the vector entries, and a variety of higher-level
 * approaches, based on DM, for classes of problems such as structured grids, staggered grids,
 * unstructured grids, networks, and particles.
 *
 * For applications running in parallel that involve multi-dimensional structured grids,
 * unstructured grids,networks, etc, it is cumbersome for users to explicitly manage the needed
 * local and global sizes of the vectors. Hence, PETSc provides a powerful abstract object called
 * the DM to help manage the vectors and matrices needed for such applications.
 *
 * DMDA are intended for use with logically structured rectangular grids when communication of
 * nonlocal data is needed before certain local computations can occur.
 *
 * The DMDA object contains parallel data layout information and communication information and
 * is used to create vectors and matrices with the proper layout.
 *
 * Note that addition and insertion calls to VecSetValues() cannot be mixed. Instead, one must
 * add and insert vector elements in phases, with intervening calls to the assembly routines.
 *
 * You can call VecGetValues() to pull local values from a vector (but not of-process values).
 *
 * It is important to note that VecGetArray() and VecRestoreArray() do not copy the vector elements;
 * they merely give users direct access to the vector elements. Thus, these routines require
 * essentially no time to call and can be used eﬀciently.
 *
 * vec-stash is used during the assembly process to store values that belong to other processors.
 *
 * - Index sets (IS)
 *
 * To facilitate creating general vector scatters and gathers used, for example, in updating
 * ghost points for problems for which no DM currently exists PETSc employs the concept of an
 * index set, via the IS class. An index set, a generalization of a set of integer indices,
 * is used to defne scatters, gathers, and similar operations on vectors and matrices.
 *
 * - Matrices
 *
 * Currently, all PETSc parallel matrix formats are partitioned by contiguous chunks of rows
 * across the processors.
 *
 * PETSc automatically generates appropriately partitioned matrices and vectors when MatCreate()
 * and VecCreate() are used with the same communicator.
 * The user can alternatively specify the local vector and matrix dimensions when more sophisticated
 * partitioning is needed (replacing the PETSC_DECIDE argument in the VecSetSizes() statement below).
 *
 * For almost all unstructured grid computation, the distribution of portions of the grid across
 * the process’s work load and memory can have a very large impact on performance. In most PDE
 * calculations the grid partitioning and distribution across the processes can (and should) be
 * done in a “pre-processing” step before the numerical computations.
 *
 * Performance tuning note: For problems of substantial size, preallocation of matrix memory is
 * crucial for attaining good performance.
 *
 * The stash is where elements of a matrix destined to be stored on other processors are kept
 * until matrix assembly is done.
 *
 * To develop an application program that uses PETSc, start to modify the program to develop your
 * application. For adding PETSc to an existing application, slowly start utilizing PETSc functionality
 * in your code, and ensure that your code continues to build and run correctly
 *
 * @see https://petsc.org/release/
 * @see Balay S. PETSc/TAO Users Manual, Revision 3.22. Argonne National Laboratory, 2025.
 * @see Bueler E. PETSc for Partial Differential Equations: Numerical Solutions in C and Python. 2020.
 */

// the following codes are re-formed from a simple uniprocessor example.
// see https://petsc.org/release/manual/getting_started/

using namespace SG::Algebra;

namespace  // Anonymous
{
    // Convertion between PetscScalar and builtin types.
    template <typename TDataType>
    TDataType P2C (const PetscScalar& petsc_value);

    template <>
    Real_t P2C<Real_t> (const PetscScalar& petsc_value)
    {
#if defined(PETSC_HAVE_COMPLEX)
        return PetscRealPart (petsc_value);
#else
        return petsc_value;
#endif
    }

    template <>
    Complex_t P2C<Complex_t> (const PetscScalar& petsc_value)
    {
#if defined(PETSC_HAVE_COMPLEX)
        return Complex_t (PetscRealPart (petsc_value), PetscImaginaryPart (petsc_value));
#else
        return Complex_t (petsc_value, 0.0);
#endif
    }

    template <typename TDataType>
    PetscScalar C2P (const TDataType& petsc_value);

    template <>
    PetscScalar C2P<Real_t> (const Real_t& value)
    {
        PetscScalar petsc_value;
#if defined(PETSC_HAVE_COMPLEX)
        petsc_value = PetscCMPLX (value, 0.0);
        return petsc_value;
#else
        petsc_value = value;
        return petsc_value;
#endif
    }

    template <>
    PetscScalar C2P<Complex_t> (const Complex_t& value)
    {
        PetscScalar petsc_value;
#if defined(PETSC_HAVE_COMPLEX)
        petsc_value = PetscCMPLX (value.real (), value.imag ());
        return petsc_value;
#else
        petsc_value = value.real ();
        return petsc_value;
#endif
    }
}  // namespace

namespace SG 
{
    namespace Algebra
    {
        struct PetscKspPrivate
        {
            KSP ksp; /* linear solver context */
            PC  pc;  /* preconditioner context */
            Mat A;   /* linear system matrix */
        };
    }  // namespace SG::Algebra
}


//=============================================================================
// PetscVector

PetscEnvironment::PetscEnvironment (int argc, char** argv)
    : MpiEnvironment(argc, argv)
{
    // PetscInitialize() automatically calls MPI_Init() if MPI has not been not previously initialized.
    // In certain circumstances in which MPI needs to be initialized directly (or is initialized by some
    // other library), the user can ﬁrst call MPI_Init() (or have the other library do it), and then call
    // PetscInitialize(). By default, PetscInitialize() sets the PETSc “world” communicator,given by
    // PETSC_COMM_WORLD, to MPI_COMM_WORLD.
    PetscInitialize (&argc, &argv, "", nullptr);
}

PetscEnvironment::~PetscEnvironment ()
{
    // calls MPI_Finalize()if PetscInitialize() began MPI. If MPI was initiated externally from PETSc
    // (by either the user or another software package), the user is responsible for calling MPI_Finalize().
    PetscFinalize ();
}

bool PetscEnvironment::initialized ()
{
    PetscBool      is_initialized = PETSC_FALSE;
    PetscErrorCode err            = PetscInitialized (&is_initialized);
    return (err == PETSC_SUCCESS) && is_initialized;
}

bool PetscEnvironment::finalized ()
{
    PetscBool      is_finalized = PETSC_FALSE;
    PetscErrorCode err          = PetscFinalized (&is_finalized);
    return (err == PETSC_SUCCESS) && is_finalized;
}

//=============================================================================
// PetscKsp

template <typename TDataType>
TPetscKsp<TDataType>::TPetscKsp ()
{
    d = new PetscKspPrivate ();

    // int nthreads = omp_get_max_threads();
    // std::cout << "omp:  nthreads = " << nthreads << std::endl;

    //// All PETSc programs use the MPI (Message Passing Interface) standard for
    //// message-passing communication [For94]. Thus, to execute PETSc programs,
    //// users must know the procedure for beginning MPI jobs on their selected computer system(s).

    //// For PETSc and even SuperLU_DIST, MPI context should be built before the linear
    //// solver gets started. It seems that our unique solver should be oriented to MPI
    //// programing model. At least, some utility like 'ArgvKeeper' should be introduced
    //// to hold the argc/argv parameters.
    //// see https://github.com/SalomePlatform/kernel/tree/master/src/ArgvKeeper
    // int argc = 1;
    // char **argv = {};
    // PetscMPIInt size;
    // PetscInitialize(&argc, &argv, (char *)0, help);
    // MPI_Comm_size(PETSC_COMM_WORLD, &size);
    // PetscCheck(size == 1, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, "This is a uniprocessor example only!");

    // int    argc = 0;
    // char** argv = nullptr;
    // PetscInitialize (&argc, &argv, "", nullptr);

    /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                Create the linear solver and set various options
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
    KSPCreate (PETSC_COMM_WORLD, &d->ksp);

    /*
     Set runtime options, e.g.,
         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
     These options will override those specified above as long as
     KSPSetFromOptions() is called _after_ any other customization
     routines.
     */
    KSPSetFromOptions (d->ksp);
}

template <typename TDataType>
TPetscKsp<TDataType>::~TPetscKsp ()
{
    /*
     Free work space.  All PETSc objects should be destroyed when they
     are no longer needed.
    */
    KSPDestroy (&d->ksp);

    // /*
    //  Always call PetscFinalize() before exiting a program.  This routine
    //    - finalizes the PETSc libraries as well as MPI
    //    - provides summary and diagnostic information if certain runtime
    //      options are chosen (e.g., -log_view).
    // */
    // PetscFinalize ();

    if (d)
    {
        delete d;
    }
}

template <typename TDataType>
void TPetscKsp<TDataType>::compute (const SparseMatrixType& matrix)
{
    SparseMatrixType& mat = const_cast<SparseMatrixType&> (matrix);

    auto rows = mat.rows ();
    auto cols = mat.cols ();

    auto row_start = mat.row_start ();
    auto row_end   = mat.row_end ();

    PetscInt M = rows;
    PetscInt N = cols;

    // Mat A; /* linear system matrix */

    /*
     Create matrix.  When using MatCreate(), the matrix format can
     be specified at runtime.

     Performance tuning note:  For problems of substantial size,
     preallocation of matrix memory is crucial for attaining good
     performance. See the matrix chapter of the users manual for details.
    */
    MatCreate (PETSC_COMM_WORLD, &d->A);
    MatSetSizes (d->A, PETSC_DECIDE, PETSC_DECIDE, M, N);
    // PetscCall(MatSetFromOptions(d->A));
    MatSetUp (d->A);

    // dump matrix elements into PETSc Mat.

    //// The SparseMatrix and SparseVector classes take three template arguments:
    //// the scalar type (e.g., double) the storage order (ColMajor or RowMajor,
    //// the default is ColMajor) the inner index type (default is int).
    //// see https://eigen.tuxfamily.org/dox/group__TutorialSparse.html

    //// The default matrix representation within PETSc is the general
    //// sparse AIJ format (also called the compressed sparse row format, CSR).
    //// see https://petsc.org/release/manual/mat/#ch-matrices

    std::vector<Index_t>   outerIndex;
    std::vector<Index_t>   innerIndex;
    std::vector<TDataType> values;

    mat.get_csr (outerIndex, innerIndex, values);

    // for (auto i = 0; i < mat.outerSize (); ++i)
    for (auto i = row_start; i < row_end; ++i)
    {
        PetscInt r       = i;
        PetscInt c_start = outerIndex[r - row_start];
        PetscInt c_end   = outerIndex[r - row_start + 1];
        PetscInt c_count = c_end - c_start;

        for (auto j = c_start; j < c_end; ++j)
        {
            PetscInt c = innerIndex[j];
            // PetscScalar* value   = mat.valuePtr () + r_start;
            PetscScalar value = values[j];

            MatSetValues (d->A, 1, &r, 1, &c, &value, INSERT_VALUES);
        }
    }
    MatAssemblyBegin (d->A, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd (d->A, MAT_FINAL_ASSEMBLY);

    /*
     Set operators. Here the matrix that defines the linear system
     also serves as the matrix that defines the preconditioner.
    */
    KSPSetOperators (d->ksp, d->A, d->A);

    /*
     Set linear solver defaults for this problem (optional).
     - By extracting the KSP and PC contexts from the KSP context,
       we can then directly call any KSP and PC routines to set
       various options.
     - The following four statements are optional; all of these
       parameters could alternatively be specified at runtime via
       KSPSetFromOptions();
    */
    KSPGetPC (d->ksp, &d->pc);
    PCSetType (d->pc, PCJACOBI);
    KSPSetTolerances (d->ksp, 1.e-5, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
    // KSPSetTolerances(d->ksp, 1.e-2 / M, 1.e-50, PETSC_CURRENT, PETSC_CURRENT);
}

template <typename TDataType>
void TPetscKsp<TDataType>::solve (const VectorType& B, VectorType& X) const
{
    auto rows      = B.rows ();
    auto row_start = B.row_start ();
    auto row_end   = B.row_end ();

    PetscInt M = rows;

    // Vec_t X (M);
    X.resize (M);

    Vec x, b; /* approx solution, RHS */

    /*
     Create vectors.  Note that we form 1 vector from scratch and
     then duplicate as needed.
    */
    VecCreate (PETSC_COMM_WORLD, &x);
    // If you never call VecSetType() or VecSetFromOptions()
    // it will generate an error when you try to use the vector.
    // see https://petsc.org/release/manualpages/Vec/VecCreate/
    // VecSetType(x, VECSTANDARD);
    VecSetSizes (x, PETSC_DECIDE, M);
    VecSetUp (x);
    VecDuplicate (x, &b);

    // dump rhs into b
    for (int i = row_start; i < row_end; ++i)
    {
        TDataType   y     = B[i];
        PetscScalar value = C2P<TDataType> (y);
        VecSetValue (b, i, value, INSERT_VALUES);
    }
    VecAssemblyBegin (b);
    VecAssemblyEnd (b);

    /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                      Solve the linear system
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
    KSPSolve (d->ksp, b, x);

    // fetch the numerical solution
    for (auto i = row_start; i < row_end; ++i)
    {
        PetscScalar y;
        VecGetValues (x, 1, &i, &y);
        TDataType value = P2C<TDataType> (y);
        X.set (i, value);
    }
    X.assemble ();

    VecDestroy (&x);
    VecDestroy (&b);
}

template <typename TDataType>
void TPetscKsp<TDataType>::solve(const SparseMatrixType& B, SparseMatrixType& X) const {
    printf("\nERROR Executed a null function 'void TPetscKsp<TDataType>::computeInverse'! \n");
    exit(4);
}

template <typename TDataType>
Info_t TPetscKsp<TDataType>::info () const
{
    return ( Info_t ) 0;
}

//=============================================================================
// Explicit instantiations to hide the underlying implementation.
namespace SG
{
    namespace Algebra
    {
        template class ALGEBRA_EXPORT TPetscKsp<Real_t>;
        template class ALGEBRA_EXPORT TPetscKsp<Complex_t>;
    }  // namespace Algebra
}  // namespace SG

//=============================================================================
// Explicit specilizations

//------------------------------------------------------------------
// Real_t

template <>
const char* ALGEBRA_EXPORT TPetscKsp<Real_t>::getClassName ()
{
    return "TPetscKsp<Real_t>";
}

//------------------------------------------------------------------
// Complex_t

template <>
const char* ALGEBRA_EXPORT TPetscKsp<Complex_t>::getClassName ()
{
    return "TPetscKsp<Complex_t>";
}
