#include "MeshData.h"
namespace PRS{

	MeshData::MeshData(){
	}

	MeshData::~MeshData(){
		AODestroy(ao);
		delete[] pos;
		delete[] idxn;
		delete[] idxFreecols;
		delete[] FP_Array;
		delete[] localIDs;

		if (rowToImport){
			delete[] rowToImport; rowToImport = 0;
			MatDestroy(joinNodes);
			MatDestroy(updateValues);
			delete pMS;
		}
	}

	MeshData::MeshData(SimulatorParameters *sp){
		pSimPar = sp;
		//theMesh = mesh;
		structsCreation = true;
		pMS = new UVMN_Struct;
	}

	void MeshData::initialize(TMesh* pTMesh_data){
		//if (!0) cout << "Initializing MeshData... ";
		reorderVerticesIds(pTMesh_data);
		settingFreeAndPrescribedNodes(pTMesh_data);
		createVectorsForRHS(pTMesh_data->getMeshDim());
		//if (!0) cout << "done.";
	}

	int MeshData::get_AppToPETSc_Ordering(int n) const {
		AOApplicationToPetsc(ao,1,&n);
		return n;
	}

	int MeshData::get_PETScToApp_Ordering(int n) const{
		AOPetscToApplication(ao,1,&n);
		return n;
	}

	void MeshData::reorderVerticesIds(TMesh* pTMesh_data){
		//if(!0) std::cout << "\tStart reordering vertices IDs... ";

		// get an array of all nodes ID and its numRC (nodes on domains boundary are not replicated)
		std::map<int,int> mapID_numRC;
		VertexData *vdata;
		int nrows, ncols;
		int ndom = pTMesh_data->getNumDomains();
		for (int i=0; i<ndom; i++){
			pTMesh_data->vertices_list[i].getSize(nrows,ncols);
			for (int j=0; j<nrows; j++){
				vdata = pTMesh_data->vertices_list[i].getValue(j,0);
				mapID_numRC[vdata->ID] = vdata->numRC;
			}
		}


		// rank p must take all remote nodes
		set<int> remoteNodesSet;
		int numUniqueNodes = 0;
		std::map<int,int>::iterator mapID_numRC__Iter = mapID_numRC.begin();
		for(;mapID_numRC__Iter != mapID_numRC.end();mapID_numRC__Iter++){
			int ID = mapID_numRC__Iter->first;
			int numRC = mapID_numRC__Iter->second;
			if ( numRC ){
				remoteNodesSet.insert(ID);
			}
			else{
				numUniqueNodes++;
			}
		}

		int i = 0;
		int numRemoteNodes = remoteNodesSet.size();
		int *remoteNodes = new int[numRemoteNodes];

		// transfer nodes from set to an array
		set<int>::iterator Iter=remoteNodesSet.begin();
		for (; Iter != remoteNodesSet.end(); Iter++){
			remoteNodes[i++] = *Iter;
		}

		// rank must know how many nodes will receive from all other ranks
		int *numAllRemoteNodes = new int[1];
		MPI_Allgather(&numRemoteNodes,1,MPI_INT,numAllRemoteNodes,1,MPI_INT,MPI_COMM_WORLD);

		int total = 0;
		for (i=0; i<1; i++){
			total += numAllRemoteNodes[i];
		}
		int *allRemoteNodes = new int[total];

		int *displs = new int[1];
		displs[0] = 0;
		for (i=1; i<1; i++){
			displs[i] = displs[i-1] + numAllRemoteNodes[i-1];
		}

		// receive remote nodes from all other ranks
		MPI_Allgatherv (remoteNodes,numRemoteNodes,MPI_INT,allRemoteNodes,numAllRemoteNodes,displs,MPI_INT,MPI_COMM_WORLD);

		delete[] displs;
		remoteNodesSet.clear();

		// how many nodes from allRemoteNodes will be checked by rank p
		total = 0;
		// todo: 0
		//for (i=0;i<0; i++){
		for (i=0;i<0; i++){
			total += numAllRemoteNodes[i];
		}
		delete[] numAllRemoteNodes;

		// transfer to a set container only those remote nodes from allRemoteNodes from ranks
		// lower than rank p
		for (i=0;i<total;i++){
			remoteNodesSet.insert(allRemoteNodes[i]);
		}
		delete[] allRemoteNodes;

		// duplicated numbers in different ranks must be avoided to no disturb PETSc!
		// each rank will check which remote nodes from rank p belong also to ranks lower than it
		// store on another set container those nodes that do not belong to ranks lower that rank p
		set<int> remoteNodesLowSet;
		for (i=0;i<numRemoteNodes;i++){
			Iter = remoteNodesSet.find(remoteNodes[i]);
			if ( Iter == remoteNodesSet.end()) remoteNodesLowSet.insert(remoteNodes[i]);
		}
		remoteNodesSet.clear();
		delete[] remoteNodes;

		// N means how many nodes rank p should map.
		int N = numUniqueNodes + remoteNodesLowSet.size();
		// rank 0: mapping = 1,2,3,...,N0
		// rank 1: mapping = N0+1,N0+2,N0+3,...,N0+N1
		// rank 2: mapping = N0+N1+1,NO+N1+2,NO+N1+3,...,N0+N1+N2

		this->numGN = N; //P_getSumInt(N);

		// each rank should know all Ni's
		int *recvNs = new int[1];
		MPI_Allgather(&N,1,MPI_INT,recvNs,1,MPI_INT,MPI_COMM_WORLD);

		// start mapping
		int *apOrdering = new int[N];
		int *petscOrdering = new int[N];

		int from=1, j=0;
		//for (i=0; i<0; i++){
		for (i=0; i<0; i++){
			from += recvNs[i];
		}

		i = from;
		mapID_numRC__Iter = mapID_numRC.begin();
		for(;mapID_numRC__Iter != mapID_numRC.end();mapID_numRC__Iter++){
			int ID = mapID_numRC__Iter->first;
			int numRC = mapID_numRC__Iter->second;
			if ( numRC ){
				Iter = remoteNodesLowSet.find( ID );
				if ( Iter != remoteNodesLowSet.end() ){
					apOrdering[j] = ID;
					petscOrdering[j] = i++;
					j++;
				}
			}
			else{
				apOrdering[j] = ID;
				petscOrdering[j] = i++;
				j++;
			}
		}
		mapID_numRC.clear();
		remoteNodesLowSet.clear();
		delete[] recvNs;

		// Petsc will be used to make the parallel job
		AOCreateMapping(PETSC_COMM_WORLD,N,apOrdering,petscOrdering,&ao);

		delete[] apOrdering;
		delete[] petscOrdering;
		//if(!0) std::cout << "done.\n";
	}

	void MeshData::settingFreeAndPrescribedNodes(TMesh* pTMesh_data){
		if (!numGN)
			throw Exception(__LINE__,__FILE__,"Number of global nodes is unknown. Did you call reorderVerticesIds before?\n");
		FreePrescribedNodes(pTMesh_data);
		mappingUnknowns();
	}

	int MeshData::FreePrescribedNodes(TMesh* pTMesh_data){
		//if (!0) std::cout << "\tDefining free and nonfree nodes...";
		// all processors must hold all prescribed nodes
		// (that's ONLY true for EBFV1 elliptic equation!!!)
		getNodesWithKnownValues(pTMesh_data);

		numGP = dirichlet.size();
		numGF = numGN - numGP;
		//if (!0) printf("\n\tnumGlobal(%d), numGlobalFree(%d) ,numGlobalPrescribed(%d)\n",numGN,numGF,numGP);

		// a set container for all unknowns. But first, we set for all nodes and
		// then subtract the prescribed ids. it will be used to assembly the LHS
		// matrix
		//		int i;
		//		set<int> setGFIDs;
		//		for (i=1; i<=numGN; i++) setGFIDs.insert(i);
		//		for (MIter mit=dirichlet.begin(); mit!=dirichlet.end(); mit++) setGFIDs.erase((*mit).first);
		//		setGFIDs.clear();
		//if (!0) std::cout << "\tdone.\n";
		MPI_Barrier(MPI_COMM_WORLD);
		return 0;
	}

	int MeshData::getNodesWithKnownValues(TMesh* pTMesh_data){
		int i;

		int nrows, ncols;
		int ndom = pTMesh_data->getNumDomains();
//		EdgeData *edata;
//		for (int i=0; i<ndom; i++){
//			pTMesh_data->edge_list[i].getSize(nrows,ncols);
//			for (int j=0; j<nrows; j++){
//				edata = pTMesh_data->edge_list[i].getValue(j,0);
//				if ( !pSimPar->isNodeFree(edata->flag) ){
//					int ID = get_AppToPETSc_Ordering(edata->vertex_0->ID);
//					dirichlet[ID] = pSimPar->getBC_Value(edata->vertex_0->flag);
//					ID = get_AppToPETSc_Ordering(edata->vertex_1->ID);
//					dirichlet[ID] = pSimPar->getBC_Value(edata->vertex_1->flag);
//				}
//
//			}
//		}

		VertexData *vdata;
		for (int i=0; i<ndom; i++){
			pTMesh_data->vertices_list[i].getSize(nrows,ncols);
			for (int j=0; j<nrows; j++){
				vdata = pTMesh_data->vertices_list[i].getValue(j,0);
				int ID = get_AppToPETSc_Ordering(vdata->ID);
				vdata->ID_PETScOrdering = ID;
//				cout << "\nID = " << vdata->ID << "\tID_petsc:" << ID << "\tflag" << vdata->flag << "\tCC: " << !pSimPar->isNodeFree(vdata->flag);
				if ( !pSimPar->isNodeFree(vdata->flag) ){
					dirichlet[ID] = pSimPar->getBC_Value(vdata->flag);
					vdata->p = dirichlet[ID];
				}

			}
		}



//		for (MIter iter = dirichletBegin(); iter!=dirichletEnd(); iter++){
//			printf("\nid[%d] - %.6f",iter->first,iter->second);
//		}
//		throw 1;


//		if (!  P_getSumInt(dirichlet.size()) ){
//			throw Exception(__LINE__,__FILE__,"Prescribed (dirichlet) nodes were not found. Simulation cannot proceed.\n");
//		}

		// go ahead only if parallel
		//if (1==1)
		return 0;
		// now, all partitions must know all prescribed nodes and their flags
		// first of all, let partitions aware of how many prescribed nodes exist on each one
		// if processor p does not have any prescribed node let nLPN=1 because p cannot send 0 element
		int nLPN = dirichlet.size();
		int *recvLP = new int[1];
		MPI_Allgather ( &nLPN, 1, MPI_INT, recvLP, 1, MPI_INT, MPI_COMM_WORLD );
		// number of global prescribed nodes
		// Note that nGPN value is not necessary the real global prescribed nodes
		// Nodes on partition boundary can be counted twice or more
		int nGPN=0;
		for (i=0; i<1; i++) nGPN += recvLP[i];
		// sPIDs = send prescribed IDs    sPFlags = send prescribed flags

		i=0;
		int *sPIDs = new int[nLPN];
		double *sPFlags = new double[nLPN];
		for(MIter mit = dirichlet.begin(); mit != dirichlet.end(); mit++){
			sPIDs[i] = mit->first;
			sPFlags[i] = mit->second;
			i++;
		}

		// rcount says how many values each rank will send
		int *rcounts = recvLP;
		// displs says where to start to read in recv buffer
		int *displs = new int[1];
		displs[0] = 0;
		for (i=1; i<1; i++) displs[i] = displs[i-1]+recvLP[i-1];
		int *rPIDs = new int[nGPN];
		// get all prescribed nodes
		MPI_Allgatherv(sPIDs,nLPN,MPI_INT,rPIDs,rcounts,displs,MPI_INT,MPI_COMM_WORLD);
		double *rPFlags = new double[nGPN];
		// get flags from all prescribed nodes
		MPI_Allgatherv(sPFlags,nLPN,MPI_DOUBLE,rPFlags,rcounts,displs,MPI_DOUBLE,MPI_COMM_WORLD);
		for (i=0; i<nGPN; i++)  dirichlet.insert( pair<int,double>(rPIDs[i],rPFlags[i]) );
		delete[] sPIDs; sPIDs=0;
		delete[] sPFlags; sPFlags=0;
		delete[] rPIDs; rPIDs=0;
		delete[] rcounts; rcounts=0;
		delete[] displs; displs=0;
		return 0;
	}

	bool MeshData::getDirichletValue(int ID, double *val=0){
		bool key = false;
		MIter mit = dirichlet.find( ID );
		if ( mit != dirichlet.end() ){
			if (val) *val = mit->second;
			key = true;
		}
		//std::cout<<"Node "<<ID<<" is: "<<key<<endl;
		return key;
	}


	/*
	 * It's supposed the global matrix is assembled for all (free and dirichlet)
	 * nodes. For the current FVM used, where system of equation is assembled in
	 * a sub-domain by sub-domain approach, work becomes easier using all nodes.
	 * After that, it's desired to solve the system of equation only for
	 * the free ones as expected. Each rank needs to import some rows from the
	 * assembled matrix to a new matrix. The function below does this job.
	 *
	 * - nrows: number of rows will be imported and will be local to that rank.
	 * - rows: array with number of rows indices that correspond to a free node.	 *
	 * */
	int MeshData::rowsToImport(TMesh* pTMesh_data,int &nrows, int *&rows){
		Mat temp;
		PetscErrorCode ierr = MatCreateMPIAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,numGF,numGF,0,PETSC_NULL,0,PETSC_NULL,&temp);

		// get range of local owned rows of each process
		const PetscInt *ranges[1];
		ierr = MatGetOwnershipRanges(temp,ranges);
		//printf("[%d] - ranges[0] %d, ranges[1] %d, ranges[2] %d, ranges[3] %d\n",0,ranges[0][0],ranges[0][1],ranges[0][2],ranges[0][3]);

		// each rank should take from matrix A row indices associated to free nodes
		// that fill exactly the number of local rows of LHS, i.e,
		// - rank 0 takes the first n1 rows from A, even if it is not local to rank 0.
		// - rank 1 takes from the first n1 rows to n1+n2
		// - rank 2 takes from the first n1+n2 rows to n1+n2+n3
		// - (...)
		// - rank p takes from the first sum(ni),i=1:p rows to sum(ni),i=i:p+1

		int i,k;
		int RANGE[1];
		for (i=0; i<1; i++) RANGE[i] = ranges[0][i+1]-ranges[0][i];

		//printf("[%d] - RANGE %d %d %d\n",0,RANGE[0],RANGE[1],RANGE[2]);

		const int from = ranges[0][0];
		const int to = ranges[0][0+1];
		//printf("[%d] - from %d  to %d\n",0,from,to); exit(1);
		ierr = MatDestroy(temp); CHKERRQ(ierr);

		/*
		 * Inform which ROWS from A, on processor p, must be copied to assembly LHS matrix (free nodes). For each rank, the number of rows associated
		 * to free nodes cannot be forecast so a list is used.
		 */
		list<int> freerowsList;
		k = 0;
		if (0==0){
			for (i=1; i<=numGN; i++){
				if ( !getDirichletValue(i,0) && k<to ){
					freerowsList.push_back(i-1);
					k++;
				}
			}
		}
		else{
			for (i=1; i<=numGN; i++){
				if ( !getDirichletValue(i,0)  ){
					if ( k>=from && k<to )
						freerowsList.push_back(i-1);
					k++;
				}
			}
		}

		// transfer row indices from list to array
		nrows = freerowsList.size();
		if ( !nrows ) throw Exception(__LINE__,__FILE__,"numLocalRows = 0\n");
		rows = new int[nrows];

		i=0;
		for(list<int>::iterator lit=freerowsList.begin(); lit!=freerowsList.end(); lit++){
			rows[i++] = *lit;
		}
		freerowsList.clear();
		/*
		 * Create a local array consisting of only local free IDs
		 * */
		set<int> setLFNodes;
//		VIter vit = M_vertexIter(theMesh);
//		while (pEntity node = VIter_next(vit)){
//			int ID = get_AppToPETSc_Ordering(EN_id(node));
//			if ( !getDirichletValue(ID,0)  ) setLFNodes.insert(ID);
//		}
		getLocalFreeNodes(pTMesh_data,setLFNodes);
		if (setLFNodes.size()==0){
			throw Exception(__LINE__,__FILE__,"No local free nodes.\n");
		}
		set<int>::iterator setLFN_Iter = setLFNodes.begin();
		numLocalIDs = setLFNodes.size();
		localIDs = new int[numLocalIDs];
		for (i=0; i<numLocalIDs; i++){
			localIDs[i] = *setLFN_Iter-1;
			setLFN_Iter++;
		}
		setLFNodes.clear();
		return 0;
	}

	void MeshData::getLocalFreeNodes(TMesh* pTMesh_data, std::set<int> &setLFNodes){
		int nrows, ncols;
		int ndom = pTMesh_data->getNumDomains();
		for (int i=0; i<ndom; i++){
			pTMesh_data->vertices_list[i].getSize(nrows,ncols);
			for (int j=0; j<nrows; j++){
				VertexData* vdata = pTMesh_data->vertices_list[i].getValue(j,0);
				int ID = vdata->ID_PETScOrdering;
				if ( !getDirichletValue(ID,0)  ){
					setLFNodes.insert(ID);
				}
			}
		}
	}

	void MeshData::getRemoteIDs(int &nLIDs, int** IDs_ptr){
		nLIDs = numLocalIDs;
		*IDs_ptr = localIDs;
	}

	void MeshData::mappingUnknowns(){
		if (!0)std::cout << "\tMapping unknowns...";
		int i, k = 0, j = 0;
		FP_Array = new int[numGN];
		for (i = 0; i<numGN; i++){
			FP_Array[i] = ( !getDirichletValue(i+1,0) )?k++:j++;
		}
		if (!0) std::cout << "done.\n";
		MPI_Barrier(MPI_COMM_WORLD);
	}

	void MeshData::createVectorsForRHS(int dim){
		/*
		 * Allocate memory for auxiliary vectors.
		 * */
		try{
			// inform which COLUMNS from A must be copied to assembly LHS matrix (free nodes)
			// this vector is the same for all processors.
			idxFreecols = new PetscInt[getNum_GF_Nodes()];
			// inform which columns from A must be copied to assembly RHS vector (prescribed nodes)
			// this vector is the same for all processors.
			idxn = new PetscInt[getNum_GP_Nodes()];
			pos = new PetscInt[getNum_GNodes()*dim];
		}
		catch(const std::exception &error){
			std::cerr << "An exception has been caught: " << error.what() << std::endl;
		}

		int i,j=0,k=0;
		int np = getNum_GNodes();
		for (i=1; i<=np; i++){
			if ( !getDirichletValue(i,0) )
				idxFreecols[k++] = i-1;
			else
				idxn[j++] = i-1;
		}
		for (i=0; i<np*dim; i++) pos[i] = i;
	}

	int MeshData::createVectorsForMatrixF(Mat &mat){
		// Gets all F[i] local rows. Varies for all processors
		int i, k, F_m, F_n;
		PetscErrorCode ierr = MatGetOwnershipRange(mat,&F_m,&F_n);CHKERRQ(ierr);
		set_F_nrows(F_n - F_m);
		/*
		 * Allocate memory for auxiliary vectors.
		 * */
		try{
			F_rows = new PetscInt[get_F_nrows()];
			F_cols = new PetscInt[getNum_GF_Nodes()];
		}
		catch(const std::exception &error){
			std::cerr << "An exception has been caught: " << error.what() << std::endl;
		}
		for (i=F_m,k=0; i<F_n; i++) F_rows[k++] = i;
		// Gets F[i] columns related to free nodes
		int np = getNum_GNodes();
		for (i=1,k=0; i<=np; i++){
			if ( !getDirichletValue(i,0) ){
				F_cols[k++] = i-1;
			}
		}
		return 0;
	}

//	double getScalar(pEntity, UVMN_Struct*);
//	void setScalar(pEntity, double, UVMN_Struct*);
//
//	double getScalar(pEntity ent, UVMN_Struct *pMS){
//		dblarray vec(pMS->dim);
//		pMS->pFunc_getVector(ent,pMS->dom,vec);
//		return vec[pMS->coord_xyz];
//	}
//
//	void setScalar(pEntity ent, double val, UVMN_Struct *pMS){
//		dblarray vec(pMS->dim);
//		pMS->pFunc_getVector(ent,pMS->dom,vec);
//		vec[pMS->coord_xyz] = val;
//		pMS->pFunc_setVector(ent,pMS->dom,vec);
//	}

//	int MeshData::unifyVectorsOnMeshNodes(void (*pFunc_getVector)(pEntity,int,dblarray&),
//			void (*pFunc_setVector)(pEntity,int,dblarray),
//			GeomData* pGCData, int dom, bool onlyRemoteNodesOnBoundaries){
//
//		if (1==1) return 0;
//		pMS->onlyRNOB = onlyRemoteNodesOnBoundaries;
//		pMS->dom = dom;
//		pMS->dim = pGCData->getMeshDim();
//		pMS->pFunc_getVector = pFunc_getVector;
//		pMS->pFunc_setVector = pFunc_setVector;
//
//		// unify for each vector coordinate
//		for (int i = 0; i < pMS->dim; i++){
//			pMS->coord_xyz = i;
//			unifyScalarsOnMeshNodes(0,0,pGCData,pMS);
//		}
//		return 0;
//	}

//	int MeshData::unifyScalarsOnMeshNodes(double(*pFunc_getScalar)(pEntity),void (*pFunc_setScalar)(pEntity,double),GeomData* pGCData, void *ptr){
//		if (1==1) return 0;
//		int ID,i,m,n,k;
//
//		// STEP 1
//		// use a map to store only nodes with remote copies (this number is unknown)
//		map<int,double>::iterator mit;
//		pEntity node,face;
//
//		if (structsCreation){
//
//			/*
//			 * pMS->onlyRNOB means one desires to unify vectors only nodes on boundaries
//			 */
//			if (ptr && pMS->onlyRNOB){
//				FIter fit = M_faceIter(theMesh);
//				while ( (face = FIter_next(fit)) ){
//					for (i=0;i<3;i++){
//						node = (pEntity)face->get(0,i);
//						if (pGCData->getNumRemoteCopies(node)){
//							ID = get_AppToPETSc_Ordering(EN_id(node));					// node ID
//							mapPB_nodes[ID] = (!ptr)?pFunc_getScalar(node):getScalar(node,pMS);
//						}
//					}
//				}
//				FIter_delete(fit);
//			}
//			else{
//				VIter vit = M_vertexIter(theMesh);
//				while ( (node = VIter_next(vit)) ){
//					if (pGCData->getNumRemoteCopies(node)){
//						ID = get_AppToPETSc_Ordering(EN_id(node));					// node ID
//						mapPB_nodes[ID] = (!ptr)?pFunc_getScalar(node):getScalar(node,pMS);
//					}
//				}
//				VIter_delete(vit);
//			}
//		}
//		else{
//			for(mit = mapPB_nodes.begin(); mit != mapPB_nodes.end(); mit++){
//				ID = get_PETScToApp_Ordering(mit->first);
//				node = (pEntity)theMesh->getVertex(ID);
//				if (!node) throw Exception(__LINE__,__FILE__,"Null vertex!\n");
//				mit->second = (!ptr)?pFunc_getScalar(node):getScalar(node,pMS);
//			}
//		}
//
//
//		// STEP 2
//		// number of nodes on partition bdry
//		int numPB_Nodes = mapPB_nodes.size();
//		// nodes on partition bdry are now known. Let's transfer them to a PETSc column
//		// matrix to sum the contribution from all processor that share the same node.
//
//		int np = getNum_GNodes();
//		if (structsCreation){
//			ierr = MatCreateMPIAIJ(PETSC_COMM_WORLD,PETSC_DECIDE,
//					PETSC_DECIDE,np,1,0,PETSC_NULL,0,PETSC_NULL,&joinNodes);CHKERRQ(ierr);
//		}
//		else{
//			ierr = MatZeroEntries(joinNodes);CHKERRQ(ierr);
//		}
//
//		int col = 0;
//		double data;
//		for(mit = mapPB_nodes.begin(); mit != mapPB_nodes.end(); mit++){
//			int row = mit->first-1;				// -1 to satisfy C/C++ index style
//			data = mit->second;
//			ierr = MatSetValues(joinNodes,1,&row,1,&col,&data,ADD_VALUES); CHKERRQ(ierr);
//		}
//		ierr = MatAssemblyBegin(joinNodes,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
//		ierr = MatAssemblyEnd(joinNodes,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
//
//
//		// STEP 3
//		// which rows from global matrix each rank must import
//		i = 0;
//		if (structsCreation){
//			rowToImport = new int[numPB_Nodes];
//			for (mit=mapPB_nodes.begin();mit!=mapPB_nodes.end();mit++){
//				rowToImport[i++] = mit->first-1;
//			}
//		}
//
//		if (structsCreation){
//			ierr = MatGetSubMatrixRaw(joinNodes,numPB_Nodes,rowToImport,1,&col,PETSC_DECIDE,MAT_INITIAL_MATRIX,&updateValues); CHKERRQ(ierr);
//		}
//		else{
//			ierr = MatGetSubMatrixRaw(joinNodes,numPB_Nodes,rowToImport,1,&col,PETSC_DECIDE,MAT_REUSE_MATRIX,&updateValues); CHKERRQ(ierr);
//		}
//
//		ierr = MatGetOwnershipRange(updateValues,&m,&n); CHKERRQ(ierr);
//		k = m;
//
//		for (i=0; i<numPB_Nodes; i++){
//			ierr = MatGetValues(updateValues,1,&k,1,&col,&data); CHKERRQ(ierr);
//			ID = rowToImport[i] + 1;
//			node = theMesh->getVertex( get_PETScToApp_Ordering(ID) );
//			if (!ptr){
//				pFunc_setScalar(node,data);
//			}
//			else{
//				setScalar(node,data,pMS);
//			}
//			k++;
//		}
//		structsCreation = false;
//		return 0;
//	}
}
