#include "sgCPardiso.h"
#include "mpi.h"
#include "mkl_cluster_sparse_solver.h"

/**
 * KNOWLEDGES ABOUT Parallel Direct Sparse Solver for Clusters Interface
 *
 * The Parallel Direct Sparse Solver for Clusters Interface solves large linear systems of equations
 * with sparse matrices on clusters.
 *
 * @see https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2025-0/parallel-direct-sp-solver-for-clusters-iface.html
 */

// Define the format to printf MKL_INT values
#if !defined(MKL_ILP64)
#define IFORMAT "%i"
#else
#define IFORMAT "%lli"
#endif

using namespace SG::Algebra;

namespace SG::Algebra
{
    template <typename TDataType>
    struct CPardisoPrivate
    {
        /*
         * MPI communicator. The solver uses the Fortran MPI communicator internally. 
         * Convert the MPI communicator to Fortran using the
         * MPI_Comm_c2f() function. See the examples in the <install_dir>/examples directory.
         */
        MPI_Fint comm; 

        /* Matrix data in CSC format. */
        /* Number of equations in the sparse linear systems of equations A*X = B.*/
        std::vector<TDataType> a;
        std::vector<MKL_INT>   ia;
        std::vector<MKL_INT>   ja;

        MKL_INT mtype = 11; /* Real unsymmetric matrix */
        // // Descriptor of main sparse matrix properties
        // struct matrix_descr descrA;
        // // Structure with sparse matrix stored in CSR format
        // sparse_matrix_t csrA;
        MKL_INT nrhs = 1; /* Number of right hand sides. */
        /* Internal solver memory pointer pt, */
        /* 32-bit: int pt[64]; 64-bit: long int pt[64] */
        /* or void *pt[64] should be OK on both architectures */
        void* pt[64];

        /* Pardiso control parameters. */
        MKL_INT iparm[64];
        MKL_INT maxfct, mnum, error, msglvl;
        /* Auxiliary variables. */
        TDataType ddum; /* Double dummy */
        MKL_INT   idum; /* Integer dummy. */
    };
}  // namespace SG::Algebra

//=============================================================================
// CPardiso

template <typename TDataType>
CPardiso<TDataType>::CPardiso ()
{
    d = new CPardisoPrivate<TDataType> ();
    // test_pardiso_unsym_csc ();
    init ();
}

template <typename TDataType>
CPardiso<TDataType>::~CPardiso ()
{
    clearFactors ();

    if (d) 
    {
        delete d;
    }
}

template <typename TDataType>
void CPardiso<TDataType>::init ()
{
    m_factorizationIsOk = false;

    d->comm = MPI_Comm_c2f(MPI_COMM_WORLD);

    if (typeid (double) == typeid (TDataType))
    {
        d->mtype = 11; /* Real unsymmetric matrix */
    }
    else if (typeid (Complex_t) == typeid (TDataType))
    {
        d->mtype = 13; /* Complex unsymmetric matrix */
    }
    
    d->nrhs = 1;       /* Number of right hand sides. */

    /* -------------------------------------------------------------------- */
    /* .. Setup Pardiso control parameters. */
    /* -------------------------------------------------------------------- */
    for (int i = 0; i < 64; i++)
    {
        d->iparm[i] = 0;
    }
    d->iparm[0]  = 1;  /* No solver default */
    d->iparm[1]  = 2;  /* Fill-in reordering from METIS */
    d->iparm[3]  = 0;  /* No iterative-direct algorithm */
    d->iparm[4]  = 0;  /* No user fill-in reducing permutation */
    d->iparm[5]  = 0;  /* Write solution into x */
    d->iparm[6]  = 0;  /* Not in use */
    d->iparm[7]  = 2;  /* Max numbers of iterative refinement steps */
    d->iparm[8]  = 0;  /* Not in use */
    d->iparm[9]  = 13; /* Perturb the pivot elements with 1E-13 */
    d->iparm[10] = 1;  /* Use nonsymmetric permutation and scaling MPS */
    d->iparm[11] = 0;  /* Conjugate/transpose solve */
    d->iparm[12] = 1;  /* Maximum weighted matching algorithm is switched-on (default for non-symmetric) */
    d->iparm[13] = 0;  /* Output: Number of perturbed pivots */
    d->iparm[14] = 0;  /* Not in use */
    d->iparm[15] = 0;  /* Not in use */
    d->iparm[16] = 0;  /* Not in use */
    d->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
    d->iparm[18] = -1; /* Output: Mflops for LU factorization */
    d->iparm[19] = 0;  /* Output: Numbers of CG Iterations */
    d->iparm[34] = 1;  /* Zero-based indexing: columns and rows indexing in arrays ia, ja, and perm starts from 0 (C-style indexing).*/
    d->iparm[39] = 2;  /* // All inputs are distributed between MPI processes */
    /*
     * Beginning of input domain.
     * The number of the matrix A row, RHS element, and, for iparm[39]=2, solution vector that begins the input domain belonging to this MPI
     * process. Only applicable to the distributed assembled matrix input format (iparm[39]> 0).
     */
    d->iparm[40] = 0;
    /*
     * End of input domain.
     * The number of the matrix A row, RHS element, and, for iparm[39]=2, solution vector that ends the input domain belonging to this MPI
     * process. Only applicable to the distributed assembled matrix input format (iparm[39]> 0).
     */
    d->iparm[41] = 0;

    d->maxfct    = 1;  /* Maximum number of numerical factorizations. */
    d->mnum      = 1;  /* Which factorization to use. */
    // d->msglvl    = 0;  /* pardiso generates no output  */
    d->msglvl = 0; /* pardiso generates no output  */
    d->error  = 0; /* Initialize error flag */

    /* -------------------------------------------------------------------- */
    /* .. Initialize the internal solver memory pointer. This is only */
    /* necessary for the FIRST call of the PARDISO solver. */
    /* -------------------------------------------------------------------- */
    for (int i = 0; i < 64; i++)
    {
        d->pt[i] = 0;
    }

#ifdef SG_VERBOSE
    d->msglvl    = 1;  /* Print statistical information  */
    d->iparm[26] = 1;  // Matrix checker.
#endif

    // d->iparm[11] = 2;  /*Solve a transposed system ATX = B based on the factorization of the matrix A*/
    // d->iparm[33] = 1;  // Optimal number of OpenMP threads for conditional numerical reproducibility (CNR) mode.
}

template <typename TDataType>
void CPardiso<TDataType>::clearFactors ()
{
    if (!m_factorizationIsOk)  // decompsed?
    {
        return;
    }
    MKL_INT n = d->ia.size () - 1;

    // mkl_sparse_destroy (d->csrA);

    /* -------------------------------------------------------------------- */
    /* .. Termination and release of memory. */
    /* -------------------------------------------------------------------- */
    MKL_INT phase = -1; /* Release internal memory. */
    CLUSTER_SPARSE_SOLVER (d->pt,
                           &d->maxfct,
                           &d->mnum,
                           &d->mtype,
                           &phase,
                           &n,
                           &d->ddum,
                           d->ia.data (),
                           d->ja.data (),
                           &d->idum,
                           &d->nrhs,
                           d->iparm,
                           &d->msglvl,
                           &d->ddum,
                           &d->ddum,
                           &d->comm,
                           &d->error);

    // flag that LU factorization has been not performed.
    m_factorizationIsOk = false;
}

template <typename TDataType>
void CPardiso<TDataType>::compute (const SparseMatrixType& matrix)
{
    // the solver can be re-used to another matrix equations
    // Thus, clear the old LU factorization.
    clearFactors ();

    init ();

    SparseMatrixType& mat = const_cast<SparseMatrixType&> (matrix);

    MKL_INT M = mat.rows ();
    MKL_INT N = mat.cols ();

    assert (M == N && "A Square sparse matrix should be specified!");

    MKL_INT row_start = mat.row_start ();
    MKL_INT row_end   = mat.row_end ();

    /*
     * Beginning of input domain.
     * The number of the matrix A row, RHS element, and, for iparm[39]=2, solution vector that begins the input domain belonging to this MPI process.
     * Only applicable to the distributed assembled matrix input format (iparm[39]> 0).
    */
    d->iparm[40] = row_start;  
    /*
     * End of input domain.
     * The number of the matrix A row, RHS element, and, for iparm[39]=2, solution vector that ends the input domain belonging to this MPI process.
     * Only applicable to the distributed assembled matrix input format (iparm[39]> 0).
     */
    d->iparm[41] = row_end - 1;

    std::vector<Index_t> outerIndex;
    std::vector<Index_t> innerIndex;
    std::vector<TDataType> values;

    mat.get_csr (outerIndex, innerIndex, values);

    d->a.resize (values.size ());
    for (auto i = 0; i < values.size (); ++i)
    {
        d->a[i] = values[i];
    }

    d->ia.resize (outerIndex.size());
    for (auto i = 0; i < outerIndex.size (); ++i)
    {
        d->ia[i] = outerIndex[i];
    }

    d->ja.resize (innerIndex.size());
    for (auto i = 0; i < innerIndex.size (); ++i)
    {
        d->ja[i] = innerIndex[i];
    }

    // checkMatrix ();

    /* -------------------------------------------------------------------- */
    /* .. Reordering and Symbolic Factorization. This step also allocates */
    /* all memory that is necessary for the factorization. */
    /* -------------------------------------------------------------------- */
    MKL_INT phase = 11;
    CLUSTER_SPARSE_SOLVER (d->pt,
                           &d->maxfct,
                           &d->mnum,
                           &d->mtype,
                           &phase,
                           &M,
                           &d->a[0],
                           &d->ia[0],
                           &d->ja[0],
                           &d->idum,
                           &d->nrhs,
                           d->iparm,
                           &d->msglvl,
                           &d->ddum,
                           &d->ddum,
                           &d->comm,
                           &d->error);
    if (d->error != 0)
    {
        printf ("\nERROR during symbolic factorization: \n" IFORMAT, d->error);
        exit (1);
    }
    // printf ("\nReordering completed ... ");
    // printf ("\nNumber of nonzeros in factors = " IFORMAT, d->iparm[17]);
    // printf ("\nNumber of factorization MFLOPS = " IFORMAT, d->iparm[18]);

    /* -------------------------------------------------------------------- */
    /* .. Numerical factorization. */
    /* -------------------------------------------------------------------- */
    phase = 22;
    CLUSTER_SPARSE_SOLVER (d->pt,
                           &d->maxfct,
                           &d->mnum,
                           &d->mtype,
                           &phase,
                           &M,
                           &d->a[0],
                           &d->ia[0],
                           &d->ja[0],
                           &d->idum,
                           &d->nrhs,
                           d->iparm,
                           &d->msglvl,
                           &d->ddum,
                           &d->ddum,
                           &d->comm,
                           &d->error);
    if (d->error != 0)
    {
        printf ("\nERROR during numerical factorization: \n" IFORMAT, d->error);
        exit (2);
    }
    // printf ("Factorization completed ... \n");

    // Now, the LU factorization has been finished.
    m_factorizationIsOk = true;
}

template <typename TDataType>
void CPardiso<TDataType>::solve (const VectorType& B, VectorType& X) const
{
    MKL_INT M = B.rows ();

    MKL_INT row_start = B.row_start ();
    MKL_INT row_end   = B.row_end ();

    // On entry, contains the right-hand side vector/matrix B, which is placed in memory contiguously.
    // The b[+k*nrhs] element must hold the i-th component of k-th right-hand side vector.
    std::vector<TDataType> b (row_end - row_start);
    for (auto i = row_start; i < row_end; ++i)
    {
        b[i - row_start] = B (i);
    }

    // If iparm[5]=0 it contains solution vector/matrix X, which is placed contiguously in memory.
    // The x[i + k*n] element must hold the i-th component of the k-th solution vector.
    std::vector<TDataType> x (row_end - row_start);

    /* -------------------------------------------------------------------- */
    /* .. Back substitution and iterative refinement. */
    /* -------------------------------------------------------------------- */
    MKL_INT phase = 33;

    // d->descrA.type = SPARSE_MATRIX_TYPE_GENERAL;
    // d->descrA.mode = SPARSE_FILL_MODE_UPPER;
    // d->descrA.diag = SPARSE_DIAG_NON_UNIT;

    // // rows_start[i] - indexing is the first index of row i in the arrays values and col_indx.
    // // rows_end[i] - indexing - 1 is the last index of row i in the arrays values and col_indx.
    // // For zero-based indexing, array containing the column indices for each non-zero element of the matrix A.
    // // Array containing non-zero elements of the matrix A
    // // see https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2024-2/mkl-sparse-create-csr.html
    // MKL_INT* rows_start = d->ia.data ();
    // MKL_INT* rows_end   = d->ia.data () + 1;
    // MKL_INT* col_indx   = d->ja.data();
    // double*  values     = d->a;

    // mkl_sparse_d_create_csr (&d->csrA, SPARSE_INDEX_BASE_ZERO, n, n, rows_start, rows_end, col_indx, values);

    // d->iparm[5]  = 1;  /* The solver stores the solution on the right-hand side b. */
    d->iparm[11] = 2;  // Transpose solve is used for systems in CSC format

    // printf ("\n\nSolving the system in CSC format...\n");
    CLUSTER_SPARSE_SOLVER (d->pt,
                           &d->maxfct,
                           &d->mnum,
                           &d->mtype,
                           &phase,
                           &M,
                           &d->a[0],
                           &d->ia[0],
                           &d->ja[0],
                           &d->idum,
                           &d->nrhs,
                           d->iparm,
                           &d->msglvl,
                           b.data (),
                           x.data (),
                           &d->comm,
                           &d->error);
    if (d->error != 0)
    {
        printf ("\nERROR during solution: \n" IFORMAT, d->error);
        exit (3);
    }

    // fetch the solution
    // VectorType X (M);
    X.resize (M);
    for (auto i = row_start; i < row_end; ++i)
    {
        X.set (i, x[i-row_start]);
    }
    X.assemble ();
}


template <typename TDataType>
void CPardiso<TDataType>::solve (const SparseMatrixType& B, SparseMatrixType& X) const{

    printf("\nERROR Executed a null function 'void CPardiso<TDataType>::computeInverse'! \n");
    exit(4);
}

template <typename TDataType>
Info_t CPardiso<TDataType>::info () const
{
    return ( Info_t ) d->error;
}

//=============================================================================
// Explicit instantiations to hide the underlying implementation.
namespace SG
{
    namespace Algebra
    {
        template class ALGEBRA_EXPORT CPardiso<Real_t>;
        template class ALGEBRA_EXPORT CPardiso<Complex_t>;
    }  // namespace Algebra
}  // namespace SG

//=============================================================================
// Explicit specilizations

//------------------------------------------------------------------
// Real_t

template <>
const char* ALGEBRA_EXPORT CPardiso<Real_t>::getClassName ()
{
    return "CPardiso<Real_t>";
}

//------------------------------------------------------------------
// Complex_t
template <>
const char* ALGEBRA_EXPORT CPardiso<Complex_t>::getClassName ()
{
    return "CPardiso<Complex_t>";
}