#include "femSolids.h"
#include "MyTime.h"
#include "TimeFunction.h"
#include "ElementBase.h"
#include "SolutionDataSolid.h"
#include <chrono>
#include <string>
#include "log.h"
#include "mpi.h"
#include "util.h"
#include "NewMaterial.h"
#include "SolverPetsc.h"


extern  std::vector<unique_ptr<TimeFunction> > timeFunction;
extern  MyTime myTime;
extern  bool debug;


int femSolids::setSolver(int slv)
{
    solverPetsc = make_unique<SolverPetsc>();

    prepareMatrixPattern();

    solverPetsc->setSolverAndParameters();

    setSolverDataForFullyImplicit();

    solverPetsc->Fext.resize(nNode_global*ndof);

    solverOK = true;

    if( ( convert2TISSOLID(SolnData.timeIntegrationScheme) != TISSOLID::STATIC) )
      setInitialConditions();

    return 0;
}



// TODO::first fixed here
int femSolids::prepareMatrixPattern()
{
    PetscPrintf(MPI_COMM_WORLD, "\n\n     femSolids::prepareMatrixPattern()  .... STARTED ...\n\n");

    int  r, c, r1, c1, count=0, count1=0, count2=0, iii, e, ind, nsize;
    int  npElem, val1, val2, n1, n2, a, b, ll, pp, nnz;
    int  ind1, ind2, ee, ii, jj, kk, e1, e2, nn, dof, size1, size2;
    int  side, start1, start2, nr1, nr2, count_diag, count_offdiag, tempInt;

    /////////////////////////////////////////////////////////////
    //
    // prepare the matrix pattern
    /////////////////////////////////////////////////////////////


    vector<vector<int> >     NodeDofArray;
    vector<vector<bool> >    NodeType;

    // set sizes of some data arrays
    vector<bool>  vecBoolTempFalse(ndof, false);
    NodeType.resize(nNode_global, vecBoolTempFalse);

    vector<int>  vecIntTempM1(ndof, -1);
    NodeDofArray.resize(nNode_global, vecIntTempM1);

    // fix the specified Dirichlet BCs
    setSpecifiedDOFs(NodeType);


    ntotdofs_global = 0;
    for(ii=0;ii<nNode_global;++ii)
    {
      for(jj=0;jj<ndof;++jj)
      {
        if(!NodeType[ii][jj])
        {
          NodeDofArray[ii][jj] = ntotdofs_global++;
        }
      }
    }
    MPI_Barrier(MPI_COMM_WORLD);

    ntotdofs_local = ntotdofs_global;
    row_start      =  0;
    row_end        = ntotdofs_global-1;

    dispDOF = ntotdofs_global;

    if(n_mpi_procs > 1)
    {
      // compute first and last row indices of the rows owned by the local processor
      row_start  =  1e9;
      row_end    = -1e9;
      ntotdofs_local = 0;
      for(ii=node_start; ii<=node_end; ii++)
      {
        for(jj=0; jj<ndof; jj++)
        {
          if(NodeType[ii][jj] == false)
          {
            ind = NodeDofArray[ii][jj];
            row_start  = min(row_start, ind);
            row_end    = max(row_end,   ind);
            ntotdofs_local++;
          }
        }
      }

      LOG_INFO("pid: "        + to_string(this_mpi_proc) +
        "\tntotdofs_local: "  + to_string(ntotdofs_local) +
        "\tntotdofs_global: " + to_string(ntotdofs_global) +
        "\trow_start: "       + to_string(row_start) +
        "\trow_end: "         + to_string(row_end) +
        string(__FILE__) + ":" + to_string(__LINE__));

    MPI_Barrier(MPI_COMM_WORLD);

      // check if the sum of local problem sizes is equal to that of global problem size
      ind=0;
      errpetsc = MPI_Allreduce(&ntotdofs_local, &ind, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

      if(ind != ntotdofs_global)
      {
        cerr << "Sum of local problem sizes is not equal to global size" << endl;
        cout << this_mpi_proc << '\t' << "ind = " << ind << '\t' << ntotdofs_global << endl;
      }
    }
    errpetsc = MPI_Barrier(MPI_COMM_WORLD);
    PetscPrintf(MPI_COMM_WORLD, "\n\n Calculating forAssyVec arrays \n\n");

    forAssyVecAll.resize(nElem_local);
    globalDOFnumsAll.resize(nElem_local);

    //printVector(elem_proc_id);

    vector<int> nodeNums, globalDOFnums;
    vector<PetscInt> forAssyVec;
    for(ee=0; ee<nElem_local; ee++)
    {
        nodeNums = elems[ee]->nodeNums;
        npElem = nodeNums.size();

        nsize = ndof*npElem;

        forAssyVec.resize(nsize);
        globalDOFnums.resize(nsize);

        for(ii=0; ii<npElem; ii++)
        {
          n1 = ndof*ii;
          n2 = ndof*nodeNums[ii];

          kk = nodeNums[ii];

          for(dof=0; dof<ndof; dof++)
          {
            globalDOFnums[n1+dof] = n2+dof;

            forAssyVec[n1+dof] = NodeDofArray[kk][dof];
          }
        }
        //printVector(forAssyVec);
        //printVector(globalDOFnums);

        forAssyVecAll[ee]    = forAssyVec;
        globalDOFnumsAll[ee] = globalDOFnums;
    }
    errpetsc = MPI_Barrier(MPI_COMM_WORLD);
    PetscPrintf(MPI_COMM_WORLD, "\n\n Prepared forAssyVec arrays \n\n");


    assyForSoln.resize(dispDOF);
    count = 0;
    for(ii=0;ii<nNode_global;ii++)
    {
      for(jj=0;jj<ndof;jj++)
      {
        if( NodeDofArray[ii][jj] != -1)
          assyForSoln[count++] = ii*ndof + jj;
      }
    }

    PetscPrintf(MPI_COMM_WORLD, "\n\n Element DOF values initialised \n\n");
    errpetsc = MPI_Barrier(MPI_COMM_WORLD);

    PetscPrintf(MPI_COMM_WORLD, " Total DOF   = %d \t %d \n", ntotdofs_local, ntotdofs_global);


    // process DOF data for pressure
    //
    ind = nNode_global;
    vector<bool>  NodeTypePres(ind,  false);
    vector<int>   IDpres(ind,  -1);


    for(ee=0;ee<nElem_local;++ee)
    {
      nodeNums = elems[ee]->nodeNums;
      npElem   = nodeNums.size();

      ind = ndof*npElem;

      forAssyVec.resize(ind);

      //printVector(nodeNums);

      for(ii=0; ii<npElem; ++ii)
      {
        ind = ndof*ii;

        kk = nodeNums[ii];

        for(jj=0;jj<ndof;++jj)
        {
          forAssyVec[ind+jj] = NodeDofArray[kk][jj];
        }
      }
      elems[ee]->forAssyVec = forAssyVec;

      //printVector(forAssyVec);
    }

    //prepareDataForPressure();

    totalDOF = dispDOF + presDOF;

    if(ARC_LENGTH)  totalDOF += 1;


    PetscPrintf(MPI_COMM_WORLD, " Mesh statistics ...\n ");
    PetscPrintf(MPI_COMM_WORLD, " nElem_global     = %d \n", nElem_global);
    PetscPrintf(MPI_COMM_WORLD, " nNode_global     = %d \n", nNode_global);
    PetscPrintf(MPI_COMM_WORLD, " ndof             = %d \n", ndof);
    PetscPrintf(MPI_COMM_WORLD, " dispDOF          = %d \n", dispDOF);
    PetscPrintf(MPI_COMM_WORLD, " presDOF          = %d \n", presDOF);
    PetscPrintf(MPI_COMM_WORLD, " ntotdofs_global  = %d \n", ntotdofs_global);

    PetscPrintf(MPI_COMM_WORLD, "\n element DOF values initialised \n\n");
    PetscPrintf(MPI_COMM_WORLD, "\n Preparing matrix pattern \n\n");

    // matrix pattern needs to be prepared only for the implicit solver

    vector<vector<PetscInt> >  forAssyMat;
    vector<PetscInt>::iterator  it;

    forAssyMat.resize(totalDOF);
    //for(ii=row_start; ii<=row_end; ii++)
      //forAssyMat[ii].reserve(500);

    for(ee=0; ee<nElem_local; ee++)
    {
        forAssyVec = elems[ee]->forAssyVec;
        nsize = forAssyVec.size();

        for(ii=0;ii<nsize;ii++)
        {
          r = forAssyVec[ii];

          if(r != -1)
          {
            if(r >= row_start && r <= row_end)
            {
              for(jj=0;jj<nsize;jj++)
              {
                if(forAssyVec[jj] != -1)
                {
                  forAssyMat[r].push_back(forAssyVec[jj]);
                }
              }
            }
          }
        }
    }
    errpetsc = MPI_Barrier(MPI_COMM_WORLD);
    PetscPrintf(MPI_COMM_WORLD, "\n\n Preparing matrix pattern DONE \n\n");


    PetscInt  *diag_nnz, *offdiag_nnz;

    errpetsc  = PetscMalloc1(ntotdofs_local,  &diag_nnz);CHKERRQ(errpetsc);
    errpetsc  = PetscMalloc1(ntotdofs_local,  &offdiag_nnz);CHKERRQ(errpetsc);

    // TODO:: use PetscInt or int
    int  nnz_max_row = 0;
    kk = 0;
    for(ii=row_start; ii<=row_end; ii++)
    {
      findUnique(forAssyMat[ii]);
      size1 = forAssyMat[ii].size();

      nnz_max_row = max(nnz_max_row, size1);


      count_diag=0, count_offdiag=0;
      for(it=forAssyMat[ii].begin(); it!=forAssyMat[ii].end(); ++it)
      {
        tempInt = *it;

        if(tempInt >= row_start && tempInt <= row_end)
          count_diag++;
        else
          count_offdiag++;
      }

      //cout << " count_diag ..." << ii << '\t' << count_diag << '\t' << count_offdiag << endl;

      diag_nnz[kk]    = count_diag;
      offdiag_nnz[kk] = count_offdiag;
      kk++;
    }

    errpetsc = MPI_Barrier(MPI_COMM_WORLD);
    PetscPrintf(MPI_COMM_WORLD, "\n\n Initialising petsc solver \n\n");

    // Initialize the petsc solver
    solverPetsc->initialise(ntotdofs_local, ntotdofs_global, diag_nnz, offdiag_nnz);
    errpetsc = MPI_Barrier(MPI_COMM_WORLD);


    PetscPrintf(MPI_COMM_WORLD, " Initialise the Matrix pattern \n", errpetsc);

    PetscScalar  Klocal[nnz_max_row];
    for(ii=0; ii<nnz_max_row; ii++)  Klocal[ii] = 0.0;

    PetscInt  rows[1];
    size1 = 1;
    for(ii=row_start; ii<=row_end; ii++)
    {
      rows[0] = ii;

      forAssyVec = forAssyMat[ii];
      size2 = forAssyVec.size();

      errpetsc = MatSetValues(solverPetsc->mtx, size1, rows, size2, &forAssyVec[0], Klocal, INSERT_VALUES);
    }

    solverPetsc->ierr = MatAssemblyBegin(solverPetsc->mtx, MAT_FLUSH_ASSEMBLY);CHKERRQ(solverPetsc->ierr);
    solverPetsc->ierr = MatAssemblyEnd(solverPetsc->mtx, MAT_FLUSH_ASSEMBLY);CHKERRQ(solverPetsc->ierr);

    errpetsc = MPI_Barrier(MPI_COMM_WORLD);

    // Create reaction vector
    errpetsc = VecCreate(PETSC_COMM_WORLD, &(solverPetsc->reacVec));
    CHKERRQ(errpetsc);

    ind1 = nNode_owned*ndof;
    ind2 = nNode_global*ndof;

    LOG_INFO("Initialise Matrix VecSetSizes: local " + 
        to_string(ind1) + " global " + to_string(ind2));
    errpetsc = VecSetSizes(solverPetsc->reacVec, ind1, ind2);
    CHKERRQ(errpetsc);

    errpetsc = VecSetFromOptions(solverPetsc->reacVec);
    CHKERRQ(errpetsc);

    errpetsc = MPI_Barrier(MPI_COMM_WORLD);


    solverPetsc->currentStatus = PATTERN_OK;


    errpetsc  = PetscFree(diag_nnz);   CHKERRQ(errpetsc);
    errpetsc  = PetscFree(offdiag_nnz);   CHKERRQ(errpetsc);

    PetscPrintf(MPI_COMM_WORLD, "\n     femSolids::prepareMatrixPattern()  .... FINISHED ...\n\n");

    return 0;
}






// set the off-diagonal terms for the solver
int femSolids::setSolverDataForFullyImplicit()
{
    PetscPrintf(MPI_COMM_WORLD, " femSolids::setSolverDataForFullyImplicit() ... STARTED \n");
/*
    int  ee, ii, jj, size1, size2, row, col;
    vector<int>  vecIntTemp(10);

    cout << " Total DOF   = " << '\t' << totalDOF << endl;

    rhsVec.resize(totalDOF);


    matK.setZero();
    matK.resize(totalDOF, totalDOF);

    VectorXi  nnzVec(totalDOF);

    jj = ceil(totalDOF*0.1);
    jj = 1000;

    for(ii=0; ii<totalDOF; ii++)
      nnzVec(ii) = jj;

    matK.reserve(nnzVec);

    for(ee=0; ee<nElem_global; ee++)
    {
        size1 = elems[ee]->forAssyVec.size();

        //printVector(elems[ee]->forAssyVec);

        for(ii=0; ii<size1; ii++)
        {
          row = elems[ee]->forAssyVec[ii];

          if(row != -1)
          {
            for(jj=0; jj<size1; jj++)
            {
              col = elems[ee]->forAssyVec[jj];

              //cout << ii << '\t' << jj << '\t' << row << '\t' << col << endl;

              if(col != -1)
              {
                matK.coeffRef(row, col) = 0.0;
              }
            }
          }//if(row != -1)
        } //for(ii=0;)
    } //for(ee=0;)

    matK.makeCompressed();

    //initialise_pardiso();
    solver.analyzePattern(matK);
    solver.factorize(matK);
    //solver.compute(matK);
*/
    PetscPrintf(MPI_COMM_WORLD, " femSolids::setSolverDataForFullyImplicit() ... ENDED \n");

    return 0;
}





int femSolids::solveFullyImplicit()
{
    if(totalDOF == 0)
      return 0;

    cout << " NONLINEAR_SOLVER_TYPE = " << NONLINEAR_SOLVER_TYPE << endl;

    //if( (SolnData.timeIntegrationScheme != "STEADY") || (NONLINEAR_SOLVER_TYPE == SOLVER_TYPE_NEWTONRAPHSON) )
    if( NONLINEAR_SOLVER_TYPE == SOLVER_TYPE_NEWTONRAPHSON )
      solveWithNewtonRaphson();
    else if( NONLINEAR_SOLVER_TYPE == SOLVER_TYPE_ARCLENGTH )
      solveWithArclength();
    else
    {
        PetscPrintf(MPI_COMM_WORLD, "\n\n femSolids::solve() ... Solver type is not available \n\n");
    }

    return 0;
}







int femSolids::solveWithNewtonRaphson()
{
    PetscPrintf(MPI_COMM_WORLD, "\n\n femSolids::solveWithNewtonRaphson \n\n");

    int stepsCompleted=1, err = 0;

    setInitialConditions();
    postProcess();
    writeNodalData();

    convergedFlagPrev = convergedFlag = false;

    //Time loop
    // (myTime.cur <= (timeFinal-EPSILON)) && 
    while( (stepsCompleted <= stepsMax) )
    {
        // do a time update: reset variables and flags, and prediction step of fields
        timeUpdate();

        PetscPrintf(MPI_COMM_WORLD, " ==================================================================== \n");
        PetscPrintf(MPI_COMM_WORLD, " Time step number     =  %d  \n", stepsCompleted);
        PetscPrintf(MPI_COMM_WORLD, " Time step size       =  %f  \n", myTime.dt);
        PetscPrintf(MPI_COMM_WORLD, " Current time         =  %f  \n", myTime.cur);
        PetscPrintf(MPI_COMM_WORLD, " ==================================================================== \n");

        convergedFlagPrev = convergedFlag;
        convergedFlag = false;

        rhsNormPrev = rhsNorm = -1.0;

        PetscPrintf(MPI_COMM_WORLD, "\n\n");
        
        for(int iter=1; iter <= iterationsMax; iter++)
        {

            firstIteration = (iter == 1);

            // Compute the velocity and acceleration and the respective values at n+af, n+am
            updateIterStep();
            errpetsc = MPI_Barrier(MPI_COMM_WORLD);

            // compute the global stiffness matrix and residual
            try
            {
                calcStiffnessAndResidual();
            }
            catch(runtime_error& err)
            {
                cerr << err.what() << endl;
                break;
            }
            errpetsc = MPI_Barrier(MPI_COMM_WORLD);

            // compute contributions from the external loads
            addExternalForces();
            errpetsc = MPI_Barrier(MPI_COMM_WORLD);

            //cout << " RHS " << endl;    printVector(solverPetsc->Fext);

            // TODO: update this loop and the relevant code to improve performance
            // for(int ii=0; ii<dispDOF; ii++)
            // //for(int ii=row_start; ii<=row_end; ii++)
            // {
            //   VecSetValue(solverPetsc->rhsVec, ii, solverPetsc->Fext[assyForSoln[ii]], ADD_VALUES);
            // }

            // TODO::profiling
            PetscInt    nonzero_count = 0;
            PetscInt    *nonzero_indices;
            PetscScalar *nonzero_values;

            for (int ii = 0; ii < dispDOF; ii++) {
                if (solverPetsc->Fext[assyForSoln[ii]] != 0.0) {
                    nonzero_count++;
                }
            }

            errpetsc = PetscMalloc1(nonzero_count, &nonzero_indices); CHKERRQ(errpetsc);
            errpetsc = PetscMalloc1(nonzero_count, &nonzero_values); CHKERRQ(errpetsc);

            nonzero_count = 0;
            for (int ii = 0; ii < dispDOF; ii++) {
                if (solverPetsc->Fext[assyForSoln[ii]] != 0.0) {
                    nonzero_indices[nonzero_count] = ii;
                    nonzero_values[nonzero_count] = solverPetsc->Fext[assyForSoln[ii]];
                    nonzero_count++;
                }
            }

            errpetsc = VecSetValues(solverPetsc->rhsVec, nonzero_count, nonzero_indices, nonzero_values, ADD_VALUES); CHKERRQ(errpetsc);

            PetscPrintf(MPI_COMM_WORLD, " VecSetValue end start MPI_Barrier \n");
            /*** MPI_Barrier ***/
            errpetsc = MPI_Barrier(MPI_COMM_WORLD);
            /*** MPI_Barrier ***/

            rhsNormPrev = rhsNorm;
            PetscPrintf(MPI_COMM_WORLD, " reach before VecAssemblyBegin \n");

            VecAssemblyBegin(solverPetsc->rhsVec);
            VecAssemblyEnd(solverPetsc->rhsVec);

            errpetsc = PetscFree(nonzero_indices); CHKERRQ(errpetsc);
            errpetsc = PetscFree(nonzero_values); CHKERRQ(errpetsc);

            VecNorm(solverPetsc->rhsVec, NORM_2, &rhsNorm);

            FILE *file;
            char path[50];
            long pages, dummy, data, stack, text, total_vm, rss;
            sprintf(path, "/proc/%d/statm", getpid());
            file = fopen(path, "r");
            if (file != NULL) {
                fscanf(file, "%ld %ld %ld %ld %ld %ld",
                   &total_vm, &data, &stack, &text, &dummy, &rss);
                fclose(file);

                pages = sysconf(_SC_PAGESIZE) / 1024;
                double rss_mb = rss * pages / 1024.0;

                LOG_INFO("Iter: " + to_string(iter) + " RSS in usage: " + to_string(rss_mb * 1e-3) + " Gbytes");
            }

            PetscPrintf(MPI_COMM_WORLD, " femSolids ...  %3d \t %11.4e \n", (iter), rhsNorm);

            // check for convergence and divergence of the iterations
            if( converged() )
            {
              PetscPrintf(MPI_COMM_WORLD, "\n femSolids ...  Iterations CONVERGED \n\n\n");

              convergedFlag = true;

              break;
            }
            else if( (iter > 3) && diverging(1.0e7) )
            {
              PetscPrintf(MPI_COMM_WORLD, " femSolids ...  Iterations are diverging. NR loop is terminated. \n\n\n");
              break;
            }

            // solve the matrix system and update the unknown DOFs
            factoriseSolveAndUpdate();
        }

        // if the residual is converged, then save the DOFs vectors
        if( convergedFlag )
        {
            stepsCompleted++;

            postProcess();

            writeNodalData();

            saveSolution();

            myTime.stck();
        }
        else
        {
            myTime.cut();

            reset();
        }
    }

    PetscPrintf(MPI_COMM_WORLD, "\n\n\n Simulation reached the specified final time or maximum steps specified ... \n\n\n");

    return err;
}





int femSolids::solveWithArclength()
{
    PetscPrintf(MPI_COMM_WORLD, "\n\n femSolids::solveWithArclength \n\n");


    int stepsCompleted=1, err = 0;
    VectorXd  DuFull(SolnData.disp.rows()), du(SolnData.disp.rows());
    double  Dl, dl, DsFactor, value;
    int resln[3];

    postProcess();
    writeNodalData();

    // compute contributions from the external loads
    addExternalForces();

    solverPetsc->dispDOF = dispDOF;
    solverPetsc->presDOF = presDOF;
    solverPetsc->assyForSoln = assyForSoln;

    convergedFlagPrev = convergedFlag = false;

    loadFactor = myTime.dt;

    //Time loop
    while( (myTime.cur <= (timeFinal-EPSILON)) && (stepsCompleted <= stepsMax) )
    {
        // do a time update: reset variables and flags, and prediction step of fields
        timeUpdate();

        PetscPrintf(MPI_COMM_WORLD, " ==================================================================== \n");
        PetscPrintf(MPI_COMM_WORLD, " Time step number     =  %d  \n", stepsCompleted);
        PetscPrintf(MPI_COMM_WORLD, " Load factor          =  %f  \n", loadFactor);
        PetscPrintf(MPI_COMM_WORLD, " ==================================================================== \n\n\n");


        convergedFlagPrev = convergedFlag;
        convergedFlag = false;

        rhsNormPrev = rhsNorm = -1.0;


        SolnData.dispIncr.setZero();


        if(stepsCompleted > 1)
        {
          DsFactor = arclenIncr/arclenIncrPrev;

          SolnData.disp  = (1.0+DsFactor)*SolnData.dispPrev - DsFactor*SolnData.dispPrev2;
          //SolnData.pres  = (1.0+DsFactor)*SolnData.presPrev - DsFactor*SolnData.presPrev2;
          loadFactor     = (1.0+DsFactor)*loadFactorPrev - DsFactor*loadFactorPrev2;
        }

        PetscPrintf(MPI_COMM_WORLD, "arclenIncr = %14.10f \t %14.10f \t %14.10f \n", arclenIncr, arclenIncrPrev, DsFactor);
        PetscPrintf(MPI_COMM_WORLD, "loadFactor = %14.10f \t %14.10f \t %14.10f \n", loadFactor, loadFactorPrev, DsFactor);

        DuFull = SolnData.disp - SolnData.dispPrev;
        Dl = loadFactor - loadFactorPrev;

        convergedFlagPrev = convergedFlag;
        convergedFlag = false;


        for(int iter=1; iter<iterationsMax; iter++)
        {
            firstIteration = (iter == 1);

            updateIterStep();

            try
            {
                calcStiffnessAndResidual();
            }
            catch(runtime_error& err)
            {
                cout << err.what() << endl;
                break;
            }


            for(int ii=0; ii<dispDOF; ii++)
            {
              //solverEigen->rhsVec[ii] += (loadFactor*solverEigen->Fext[assyForSoln[ii]]);
              value = loadFactor*solverPetsc->Fext[assyForSoln[ii]];
              VecSetValue(solverPetsc->rhsVec, ii, value, ADD_VALUES);
            }

            //printVector(solverEigen->rhsVec);

            rhsNormPrev = rhsNorm;

            VecAssemblyBegin(solverPetsc->rhsVec);
            VecAssemblyEnd(solverPetsc->rhsVec);

            VecNorm(solverPetsc->rhsVec, NORM_2, &rhsNorm);

            PetscPrintf(MPI_COMM_WORLD, "\t femSolids ... %d \t %11.4E \n", iter, rhsNorm);

            if( rhsNorm < conv_tol )
            {
              convergedFlag = true;
              break;
            }

            //solverEigen->currentStatus = ASSEMBLY_OK;
            solverPetsc->solveArclengthSystem(stepsCompleted, DuFull, Dl, arclenIncr, dl);

            PetscScalar *arrayTemp;

            VecGetArray(solverPetsc->solnVec, &arrayTemp);

            for(int ii=0; ii<dispDOF; ii++)
            {
              SolnData.dispIncr[assyForSoln[ii]] = arrayTemp[ii];
              DuFull[assyForSoln[ii]] += arrayTemp[ii];
            }

            VecRestoreArray(solverPetsc->solnVec, &arrayTemp);

            SolnData.disp += SolnData.dispIncr;

            if(MIXED_ELEMENT)
            {
              //printVector(assyForSolnPres);
              for(int ii=0; ii<presDOF; ii++)
              {
                //cout << ii << '\t' << assyForSolnPres[ii] << '\t' << solverEigen->soln[dispDOF+ii] << endl;
                //SolnData.var2[assyForSolnPres[ii]] += solverEigen->soln[dispDOF+ii];
                //SolnData.pres[ii] += solverEigen->soln[dispDOF+ii];
              }
              //printVector(SolnData.var2);
            }

            loadFactor += dl;
            Dl += dl;
        }

        if(convergedFlag)
        {
          //printVector(SolnData.var1);

          if(stepsCompleted == 1)
          {
            arclenIncr = sqrt(DuFull.dot(DuFull) + loadFactor*loadFactor*solverPetsc->Fext.dot(solverPetsc->Fext));

            arclenIncrMax = arclenIncr;
            arclenIncrMin = arclenIncr/1024.0;
          }

          loadFactorPrev2 = loadFactorPrev;
          loadFactorPrev  = loadFactor;

          SolnData.dispPrev2 = SolnData.dispPrev;
          SolnData.dispPrev  = SolnData.disp;

          SolnData.presPrev2 = SolnData.presPrev;
          SolnData.presPrev  = SolnData.pres;

          arclenIncrPrev = arclenIncr;
          if(convergedFlagPrev)
            arclenIncr = min(max(2.0*arclenIncr, arclenIncrMin), arclenIncrMax);

          loadfactorVec.push_back(loadFactor);

          loadStepConverged = loadStepConverged + 1;

          postProcess();

          writeNodalData();

          stepsCompleted++;
        }
        else
        {
          if(convergedFlagPrev)
            arclenIncr = max(arclenIncr*0.5, arclenIncrMin);
          else
            arclenIncr = max(arclenIncr*0.25, arclenIncrMin);
        }

        PetscPrintf(MPI_COMM_WORLD, " arclenIncr = %f \n", arclenIncr);
    }

    return 0;
}




int femSolids::calcStiffnessAndResidual()
{
    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     femSolids::calcStiffnessAndResidual ...STARTED \n\n");}

    int  ee, ii, jj, nsize;

    MatrixXd  Kuu, Kup, Kpu, Kpp;
    VectorXd  FlocalU, FlocalP;

    vector<int>  vecTemp;

    SolnData.reac.setZero();
    solverPetsc->zeroMtx();

    // int sync_count = 0;
    // int sync_num   = 0;

    for(ee=0;ee<nElem_local_max;ee++)  // loop over all the elements
    {
      if(ee < nElem_local) {
        if(MIXED_ELEMENT)
        {
          try
          {
            elems[ee]->calcStiffnessAndResidualMixed(Kuu, Kup, Kpu, Kpp, FlocalU, FlocalP);
          }
          catch(runtime_error& err)
          {
            cerr << err.what() << endl;
            throw runtime_error("Negative Jacobian encountered");
          }

          if(firstIteration)
            elems[ee]->applyDirichletBCs2field(1, Kuu, Kup, Kpu, Kpp, FlocalU, FlocalP, elems[ee]->forAssyVec, elems[ee]->forAssyVecPres, SolnData.dispApplied, SolnData.presApplied);
              //elems[ee]->applyDirichletBCsMixed(1, Kuu, Kup, Kpu, Kpp, FlocalU, FlocalP);
            //cout << " BBBBBBBBBBB " << endl;

          solverPetsc->assembleMatrixAndVector2field(dispDOF, Kuu, Kup, Kpu, Kpp, FlocalU, FlocalP, elems[ee]->forAssyVec, elems[ee]->forAssyVecPres);
        }
        else
        {
          try
          {
            elems[ee]->calcStiffnessAndResidual(Kuu, FlocalU);
          }
          catch(runtime_error& err)
          {
            cerr << err.what() << endl;
            throw runtime_error("Negative Jacobian encountered");
          }

          if(firstIteration)
            elems[ee]->applyDirichletBCs(Kuu, FlocalU);

            //printMatrix(Kuu);
            //printVector(FlocalU);
            //printVector(elems[ee]->forAssyVec);
          solverPetsc->assembleMatrixAndVector(0, 0, elems[ee]->forAssyVec, elems[ee]->forAssyVec, Kuu, FlocalU);
        }
          // add up reaction forces
        vecTemp = elems[ee]->globalDOFnums;
        nsize = vecTemp.size();
        for(ii=0;ii<nsize;ii++)
        {
          SolnData.reac[vecTemp[ii]] += FlocalU[ii];
        }
      }
    }

    solverPetsc->currentStatus = ASSEMBLY_OK;

    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     femSolids::calcStiffnessAndResidual ... ENDED \n\n");}

    return 0;
}




int femSolids::factoriseSolveAndUpdate()
{
    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     femSolids::factoriseSolveAndUpdate ... STARTED \n\n");}

    time_t tstart, tend;

    //cout << " RHS " << endl;        printVector(solverEigen->rhsVec); printf("\n\n\n");
    //for(int ii=dispDOF-100; ii<totalDOF; ii++)
      //cout << ii << '\t' << solverEigen->rhsVec[ii] << endl;

    SolnData.dispIncr.setZero();

    // add specified Dirichlet boundary conditions if first iteration
    if(firstIteration)
    {
        int ii, dof;
        for(ii=0; ii<dofs_specified.size(); ii++)
        {
            dof = dofs_specified[ii];
            SolnData.dispIncr[dof] += SolnData.dispApplied[dof] ;
        }

        for(ii=0; ii<dofs_specified_pres.size(); ii++)
        {
            dof = dofs_specified_pres[ii];
            SolnData.pres[dof] += SolnData.presApplied[dof] ;
        }
    }

    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     matrix solution STARTED \n\n");}

    tstart = time(0);
    if( solverPetsc->factoriseAndSolve() )
    {
        PetscPrintf(MPI_COMM_WORLD, " PETSc solver not converged. \n\n");
        return -1;
    }

    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     matrix solution DONE \n\n");}

    tend = time(0);
    PetscPrintf(MPI_COMM_WORLD, "It took %8.4f second(s) \n ", difftime(tend, tstart) );

    //printVector(solverPetsc->soln);

    PetscScalar *arrayTempSoln;
    Vec            vecseq;
    VecScatter     ctx;

    VecScatterCreateToAll(solverPetsc->solnVec, &ctx, &vecseq);


    VecScatterBegin(ctx, solverPetsc->solnVec, vecseq, INSERT_VALUES, SCATTER_FORWARD);
    VecScatterEnd(ctx,   solverPetsc->solnVec, vecseq, INSERT_VALUES, SCATTER_FORWARD);

    VecGetArray(vecseq, &arrayTempSoln);

    // update solution vector
    for(int ii=0; ii<dispDOF; ii++)
    {
        SolnData.dispIncr[assyForSoln[ii]] = arrayTempSoln[ii];
    }
    SolnData.disp += SolnData.dispIncr;


    //if(debug) printVector(SolnData.disp);

    
    for(int ii=0; ii<presDOF; ii++)
    {
        //SolnData.var2[assyForSolnPres[ii]] += arrayTemp[dispDOF+ii];
        SolnData.pres[ii] += arrayTempSoln[dispDOF+ii];
    }

    VecRestoreArray(vecseq, &arrayTempSoln);

    VecScatterDestroy(&ctx);
    VecDestroy(&vecseq);

    // solve pressure variable in the mixed formulation
    // only for the constant pressure elements, Quad4/1, Hex8/1, TRIA6/1, TET10/1

    int  idd = 0;//SolnData.ElemProp[0]->id;
    if( (idd == 6002) || (idd == 6052) )
    {
      for(int ee=0;ee<nElem_local;ee++)  // loop over all the elements
      {
        elems[ee]->solveForPressure();
      }
    }

    if(debug) {PetscPrintf(MPI_COMM_WORLD, "     femSolids::factoriseSolveAndUpdate ... ENDED \n\n");}

    return 0;
}






int femSolids::computeElementErrors(int ind)
{
/*
    cout << " Computing errors \n " << endl;

    VectorXd  solnVTK(nNode_global*(ndim+1));
    int  n1, n2, dd;
    for(int ii=0; ii<nNode_global; ++ii)
    {
      n1 = ii*ndof;
      n2 = ii*ndim;

      for(dd=0; dd<ndim; dd++)
        solnVTK(n1+dd) = velo(n2+dd);

      solnVTK(n1+ndim) = pres(ii);
    }

    double totalError = 0.0, timeNow;
    for(int index=0; index<4; index++)
    {
      totalError = 0.0;
      for(int ee=0; ee<nElem_global; ++ee)
      {
        totalError += elems[ee]->CalculateError(nodePosData, elemData, timeData, solnVTK, veloDot, pres, timeNow, index);
      }

      totalError = sqrt(totalError);

      if(index == 0)
        printf(" \n\n \t L2 Error in X-velocity = %12.6E \n\n " , totalError);
      else if(index == 1)
        printf(" \n\n \t L2 Error in Y-velocity = %12.6E \n\n " , totalError);
      else if(index == 2)
        printf(" \n\n \t L2 Error in pressure   = %12.6E \n\n " , totalError);
      else
        printf(" \n\n \t H1 Error in velocity   = %12.6E \n\n " , totalError);
    }
*/
    return 0;
}





