#include <stdio.h>
#include <stdlib.h>
#include "arralloc.h"
#include <math.h>
#include <mpi.h>


//**********************************************************//
//*****EDIT THIS REGION OF THE CODE BEFORE COMPILING********//
//**********************************************************//
#define M 192
#define N 360
#define P 8
#define DELTA 0.01
#define FILEIN "edge600x840.dat"
#define FILEOUT "image600x840.pgm"
#define DEL_CHECK 500
//#define DELTA_VAL_PRINT  			//remove comment on this to print out delta values at each iteration

//**********************************************************//
//**********************************************************//

int tag=0;

int main(int argc, char **argv)
{
//declaring variables
//i,j,k are counter variables, maxfactor is stores a factor of value P, where P=number of processors.
//nrows and ncols : the number of rows and columns after decomposition.

int i,j,k,prime,maxfactor,nrows,ncols;
float delta,localdelta, deltapartial,sumdiff;
int breakflag=1;
double itime,ftime,totaltime,partstart,partend,parttotal,periteration;
double checkroot;
long integer;
double fraction;
//declaring the MPI variables
int rank,rank2D,size,comm,comm2D,right,left,up,down;
int displ=-1;
int index=0;
int coords[2];
int dims[2];
int periods[2];
periods[0]=0;	//non cyclic
periods[1]=0;	//non cyclic
int ndim=2; //number of dimensions
int reorder=0; //no reordering of ranks
//old communication area
comm=MPI_COMM_WORLD;
MPI_Init(NULL,NULL);
MPI_Comm_rank( comm,&rank);
MPI_Comm_size( comm,&size);

//checks to find if the number of procs specified is a perfect square
checkroot=sqrt(P);
integer=(long)checkroot;
fraction=checkroot-integer;
if (fraction==0)
{
	dims[0]=integer;
	dims[1]=integer;
}	//checks for prime number of 
	
else{
	for(i=2; i<P;i++)
	{
		if(P%i==0)
		{prime=0;
		break;}	
	prime=1;
	}
	if ((P==2)||(P==3))
	prime=1;
	if(prime==1){
		dims[0]=P;
		dims[1]=1;
	}
	else 	
	{
		for(i=P/2-1;i>=2;i--)
		{
		if (P%i==0)
		{maxfactor=i;
		break;
		}
		}
	dims[0]=maxfactor;
	dims[1]=P/maxfactor;
	}
}

//prints out decomposition information
if(rank==0)
{
printf("THE IMAGE IS DECOMPOSED INTO  %d %d DIMENSIONS\n",dims[0],dims[1]);
}
 nrows=dims[0];
 ncols=dims[1];

//new cartesian topology

MPI_Cart_create(comm, ndim, dims, periods, reorder, &comm2D);
MPI_Comm_rank(comm2D, &rank2D);
MPI_Cart_coords(comm2D, rank2D, ndim, coords);
index=0;
MPI_Cart_shift(comm2D, index, displ,&right,&left);
index=1;
MPI_Cart_shift(comm2D, index, displ,&up,&down);
//declaring the arrays for image storage
 float **edge;
 float **old;
 float **buff;
 float **new;
 float masterbuff[M][N];
 float resultbuff[M][N];
//calculation of MP and NP

 int MP=M/nrows;
 int NP=N/ncols;

 if (M%nrows!=0)
 {
	if ((ncols==1)&&(rank==size-1)) 		//if 1D decomposition and M doesn't divide
	{
		MP=M/nrows+M%nrows;
	}
	
	else if(ncols!=1)				//if 2D decompostition and M doesn't divide
	{
	if(coords[0]==nrows-1)
		MP=M/nrows+M%nrows;
	}
 }

if(N%ncols!=0)						//this can occur only for 2D decomposition
{
	if (coords[1]==ncols-1)
	NP=N/ncols+N%ncols;
}
							//calling arralloc.c to dynamically declare the arrays at run tine.
							//this makes sure the arrays are contiguously allocated.
 edge=arralloc(sizeof(float),2,MP+2,NP+2);
 old=arralloc(sizeof(float),2,MP+2,NP+2);
 buff=arralloc(sizeof(float),2,MP,NP);
 new=arralloc(sizeof(float),2,MP+2,NP+2);

//start the time clock
itime=MPI_Wtime();

if (rank==0)
{
printf("	THIS PROGRAM RUN ON %d THREADS\n",size);
//checking if size matches
if(size!=P)
{	
	printf("mismatch in threads and decomposition!!!");
}
}

//initializing the old,new and edge arrays
for(i=0; i<MP+2; i++)
{
	for(j=0; j<=NP+2; j++)
	{	
		edge[i][j]=0;
		new[i][j]=0;
		old[i][j]=0;
	}
}

int rowlowerindex,rowupperindex,collowerindex,colupperindex;
	datread(FILEIN,masterbuff,M,N);

//calculating the upper and lower boundaries using co-ords. The co-ords returns the cartesian co-ordinated of the processor
//using this the upper and lower index of the processors arrays are mapped into the larger image.
collowerindex=coords[1]*NP;
colupperindex=(coords[1]+1)*NP;
rowlowerindex=coords[0]*MP;
rowupperindex=(coords[0]+1)*MP;
if((ncols==1)&&(M%nrows!=0))
	{
	if (rank==size-1)
		{	
		rowupperindex=M;
		rowlowerindex=M-MP;
		}
		
	}
if((ncols!=1)&&(M%nrows!=0))
	{
	if (coords[0]==nrows-1)
		{
		rowupperindex=M;
		rowlowerindex=M-MP;
		}	
	}
if(N%ncols!=0)          //this can occur only for 2D decomposition
{
        if (coords[1]==ncols-1)
        {
	colupperindex=N;
	collowerindex=N-NP;
	}
}

int m,n;

//reading the masterbuff into smaller local arrays using upper and lower limits.
for(i=rowlowerindex,m=0; i<rowupperindex;i++,m++)
{
for(j=collowerindex,n=0; j<colupperindex;j++,n++)
{
buff[m][n]=masterbuff[i][j];
}
}

//setting edge=buffer
for(i=1; i<=MP; i++)
{
	for(j=1; j<=NP; j++)
	{
		edge[i][j]=buff[i-1][j-1];
	}	
}

//setting old=edge
for(i=1; i<=MP; i++)
{
	for(j=1; j<=NP; j++)
	{
		old[i][j] = edge[i][j];
	}
}

//setting up vars for communication and cartesian topology
MPI_Status status;
MPI_Request request1;
MPI_Request request2;
MPI_Request request3;
MPI_Request request4;
MPI_Request request5;
MPI_Request request6;
MPI_Request request7;
MPI_Request request8;

//definition of contiguous derived datatype. This is used to simplify the communication between the MP rows.
MPI_Datatype MPI_FLOAT_ARRAY;
MPI_Type_contiguous(NP, MPI_FLOAT, &MPI_FLOAT_ARRAY);
MPI_Type_commit(&MPI_FLOAT_ARRAY);

//defintion of the vector datatype. This is used to send the non-contiguous regions of the array to the columns.
MPI_Datatype MPI_VECTOR;
MPI_Type_vector(MP,1,NP+2,MPI_FLOAT,&MPI_VECTOR);
MPI_Type_commit(&MPI_VECTOR);


partstart=MPI_Wtime();

do														
	{	k++; 	//k is a counter for calculating delta value here	

			// using cartesian nonperiodic boundary
		
		MPI_Issend( &old[MP][1], 1, MPI_FLOAT_ARRAY, right, tag,comm,&request1);
		MPI_Irecv( &old[0][1], 1, MPI_FLOAT_ARRAY, left, tag,comm, &request2);

		MPI_Issend( &old[1][1], 1, MPI_FLOAT_ARRAY, left, tag,comm,&request3);
		MPI_Irecv( &old[MP+1][1], 1, MPI_FLOAT_ARRAY, right, tag,comm, &request4);

                MPI_Issend( &old[1][NP], 1, MPI_VECTOR, up, tag,comm,&request5);
                MPI_Irecv( &old[1][0], 1, MPI_VECTOR, down, tag,comm, &request6);

                MPI_Issend( &old[1][1], 1, MPI_VECTOR, down, tag,comm,&request7);
                MPI_Irecv( &old[1][NP+1], MP, MPI_VECTOR, up, tag,comm, &request8);
		MPI_Wait(&request2,&status);
		MPI_Wait(&request1,&status);
		MPI_Wait(&request4,&status);
		MPI_Wait(&request3,&status);
                MPI_Wait(&request6,&status);
                MPI_Wait(&request5,&status);
                MPI_Wait(&request8,&status);
                MPI_Wait(&request7,&status);
//	updating the image.
		for(i=1; i<=MP; i++)
		{
			for(j=1; j<=NP; j++)
			{	
			new[i][j] = (0.25000)*(old[i-1][j] + old[i+1][j] + old[i][j-1] + old[i][j+1] - edge[i][j]);
			}
		}

//	 checking delta value 
		if(k%DEL_CHECK==0)
		{
			sumdiff = 0;
			for (i=1; i<=MP; i++)
			{
				for (j=1; j<=NP;j++)
				{
					sumdiff=sumdiff+ ((new[i][j] - old[i][j])*(new[i][j] - old[i][j]));
				}
			}
			MPI_Allreduce(&sumdiff,&deltapartial,1,MPI_FLOAT,MPI_SUM,comm);
			localdelta=deltapartial/(M*N);
			delta=sqrt(localdelta);
			if(delta<=DELTA)
				{
				if (rank==0){
				printf("Final value of delta= %f at k =%d\n",delta,k);
				}
				fflush(stdout);
				breakflag=0;
				}
#ifdef DELTA_VAL_PRINT

			else { 
			if (rank==0){
			printf("Value of delta= %f at k=%d \n",delta,k);
			fflush(stdout);
			}
			}
#endif
		}					
	//equating old=new

		for (i=1; i<=MP;i++)
		{
			for (j=1; j<=NP;j++)
			{
			old[i][j]=new[i][j];
			}	
		}		

} while(breakflag);	

partend=MPI_Wtime();

parttotal=partend-partstart;
periteration=parttotal/k; 		//periteration= time taken per iteration. This is used to analyse performance data


//array old is copied into buff without the halo's.
for (i=1; i<=MP;i++)
	{
		for (j=1; j<=NP;j++)
		{
		buff[i-1][j-1]=old[i][j];
		}
	}


for(i=rowlowerindex,m=0; i<rowupperindex;i++,m++)
	{
		for(j=collowerindex,n=0; j<colupperindex;j++,n++)
		{
		masterbuff[i][j]=buff[m][n];
		}
	}

//The final image is reduced with MPI_SUM to add up the data in the smaller images.
size=M*N;
MPI_Allreduce(masterbuff,resultbuff,size,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);

if(rank==0)
	{
	pgmwrite(FILEOUT,resultbuff,M,N);
	}

//stop the time clock
ftime=MPI_Wtime();
totaltime=ftime-itime;
if (rank==0){
	printf("***Total time taken to run the program is %f seconds ***\n",totaltime);

	printf("***Time taken per iteration is %f seconds ***\n",periteration);
	}

MPI_Finalize();
free(edge);
free(old);
free(buff);
free(new);
}

