#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <mpi.h>


char bits[8] = {1,2,4,8,16,32,64,128};

/* Could use these macros */
#define GET_BIT(var,pos) ((var) & (1<<(pos)))   
#define SET_BIT(var,pos) ((var) |= (1<<(pos)))

/* did use these ones !! */
#define GET_ARRAY_BIT(var,pos) ((var[pos/8] & bits[pos % 8]) > 0 ? 1 : 0)
#define SET_ARRAY_BIT(var,pos) (var[pos/8] |= bits[pos % 8])


#define TAG 123

/* Globals */
int rank, size; /* MPI */

/* Functions */
int initMPI(int *pargc, char *pargv[]);

int main(int argc, char *argv[])
{
	clock_t c0, c1;
	unsigned short *eratosthenes;
	unsigned long k, l, offset;
	unsigned long start, end, gridpart, gridsize;

	c0=clock();

	initMPI(&argc, argv); /* setup MPI, size and rank */

	/* set up range parameters */
	gridsize = atol(argv[1]); /* total size */
	
	// GILES TODO - we are assuming that the number of procs is a factor of gridsize here
	// in fact unless this is the case then the last rank should be smaller (or the rank
	// size should be distributed in a more even fashion) -- HOWEVER the symmetry of making this assumption
	// makes the code very simple so perhaps the extra crossing off up to the next factor of gridpart in the 
	// higest rank process is worth this simplification (and hence speed) -- need to do the math on the speedup
	start = rank * gridsize / size;
	end = (rank + 1) * gridsize / size;
	gridpart = end - start;
	 
	/* gridpart = gridsize / size; */
	/* changing size of the parts may optimise the algorithmn */
	/* start = (rank == 0) ? 0 : (rank * gridpart) + 1; */
	/* end = (rank == (size - 1)) ? gridsize : (rank + 1) * gridpart; */
	/* gridpart = end - start; */ /* Last rank may have a different size */

	printf("Rank: %d, Gridsize: %ld, Gridpart: %ld, Start: %ld, End: %ld\n", rank, gridsize, gridpart, start, end);

	/* Need to use calloc to initialise values */
	eratosthenes = (unsigned short *) (calloc(ceil(gridpart/8.0), sizeof(unsigned short)));

	// k is the current prime that we cross of the multiples of, start with k = 2;
	k = 2;

	if (rank == 0)
	{
        SET_ARRAY_BIT(eratosthenes, 1);
	}

	while(pow(k,2) < gridsize)
	{
		if(rank == 0)
		{
			// rank zero chooses the primes to multiply (k)
			// skip any bits already set
			while(GET_ARRAY_BIT(eratosthenes, k) && k < end)
				k++;
		}

		/* broadcast k to everyone */
		MPI_Bcast(&k, 1, MPI_LONG, 0, MPI_COMM_WORLD);

		// everyone marks off their multiples of k (starting from an offset of the
		// first factor of k in their block
		offset = k - (start % k);

		for(l=offset; l<=gridpart; l+=k)
		{
			SET_ARRAY_BIT(eratosthenes, l);	
		}

		k++; // we just marked off the current k so this saves a GET_BIT_ARRAY
	} 

	// GILES TODO - in theory we should agregate the results back to rank zero so it can create the list of
	// of primes (or transmit the highest prime found from top rank if that was our goal)
	// NOTE that the overhead of collecting all the primes would be noticable in your speedup calculations
	// ---> MPI_REDUCE goes here

	c1=clock();

	// find the highest prime in our block
	k=gridpart;
	while(GET_ARRAY_BIT(eratosthenes, k))
		k--;

	free(eratosthenes);
	printf ("Rank: %d, Done, largest prime %ld, %0.2f seconds\r\n", rank, k+start, (float)(c1 - c0)/CLOCKS_PER_SEC);

	MPI_Finalize(); /* Finalise MPI */

	return 0;
}

int initMPI(int *pargc, char *pargv[])
{
	int result;	

	/* Initialise MPI */
	result = MPI_Init(pargc, &pargv);
	if(result!=MPI_SUCCESS)
	{
		fprintf(stderr, "Error initialising MPI.");
		fflush(stderr);
		MPI_Abort(MPI_COMM_WORLD,result);
	}
	MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get rank */
	MPI_Comm_size(MPI_COMM_WORLD, &size); /* get size */
	return 0;
}
