#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>

#define send_data_tag 2001
#define return_data_tag 2002
#define uc unsigned char 

typedef struct {
	uc r, g, b;
} pixel;

// function to apply the smooth 
void smooth(pixel ** in, pixel ** out, int start_row, int end_row, int nrow, int ncol, int id) 
{
	int i, j, k, l;
	// for which pixel (i,j) calculates the smooth
	for(i = start_row; i <= end_row; i++) {
		for(j = 0; j < ncol; j++) {

			// sums for each component of RGB 
			int sumr, sumg, sumb;
			sumr = sumg = sumb = 0;

			// considers 5x5 square 
			for(k = i - 2; k <= i + 2; k++) {

				for(l = j - 2; l <= j + 2; l++) {

					// assumes zero for border pixels
					if( k < 0 || k >= nrow || l < 0 || l >= ncol)
						continue;

					// acumulate
					sumr += in[k][l].r;
					sumg += in[k][l].g;
					sumb += in[k][l].b;
				}
			}

			// calculate the mean
			int sub = (id ? start_row : 0);
			out[i - sub][j].r = sumr/25;
			out[i - sub][j].g = sumg/25;
			out[i - sub][j].b = sumb/25;
		}
	}
}

int start_mat(int pid, int rows_per_process) 
{
	int start_row = (pid - 1) * rows_per_process - 2;
	if( start_row < 0) start_row = 0;
	return start_row;
}

int end_mat(int pid, int rows_per_process, int nrow)
{
	int end_row = pid * rows_per_process + 1;
	if( end_row >= nrow) end_row = nrow - 1;
	return end_row;
}

int start_work(int pid, int rows_per_process) 
{
	int real_start = (pid - 1) * rows_per_process - start_mat(pid, rows_per_process);
	return real_start;
}

int main (int argc, char ** argv)
{ 
	// require arguments to run
	if( argc != 3 ) {
		printf("usage: ./build/mpi <file_input_name.ppm> <file_output_name.ppm>\n");
		exit(1);
	}

	int my_id, num_procs;
	int ncol, nrow, rows_per_process;
	int i, j, pid;
	char name_procs[63];
	int  size_name_procs;

	/*
	 * BEGIN - Init Parallel program 
	 */
	MPI_Status status;
	MPI_Init(&argc, &argv);

	MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
	MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
	MPI_Get_processor_name(name_procs, &size_name_procs);

	if( my_id == 0 ) {

		// open file
		FILE * fp = fopen(argv[1], "r");
		FILE * fout = fopen(argv[2], "w");
		char encoding[4];
		uc maxValue;

		// read file header
		fscanf(fp, "%s", encoding);
		fscanf(fp, "%d %d", &ncol, &nrow);
		fscanf(fp, "%hhu", &maxValue);

		rows_per_process = nrow / num_procs;

		// alloc memory for both matrices
		pixel ** in = (pixel **) malloc ( nrow * sizeof (pixel *) );
		pixel ** out = (pixel **) malloc ( nrow * sizeof (pixel *) );
		for(i = 0; i < nrow; i++) {
			in[i] = (pixel *) malloc ( ncol * sizeof (pixel ) );
			out[i] = (pixel *) malloc ( ncol * sizeof (pixel ) );
		}

		// read image
		for(i = 0; i < nrow; i++) {
			for(j = 0; j < ncol; j++) 
				fscanf(fp, "%hhu %hhu %hhu", &in[i][j].r, &in[i][j].g, &in[i][j].b);
		}

		/*****************
		 * Begins new and different paralell program.
		 *	ROOT 
		 */

		struct timeval start_time, end_time, end_send;
		gettimeofday(&start_time, NULL);
		for(pid = 1; pid < num_procs; pid++) {
			int start_at_work = start_work(pid, rows_per_process);
			int nrows_send = end_mat(pid, rows_per_process, nrow) - start_mat(pid, rows_per_process) + 1;
			int start_at = start_mat(pid, rows_per_process);

			MPI_Send(&nrow, 1, MPI_INT,  pid, send_data_tag, MPI_COMM_WORLD);
			MPI_Send(&ncol, 1, MPI_INT,  pid, send_data_tag, MPI_COMM_WORLD);

			for(i = 0; i < nrows_send ; i++) {
				MPI_Send(in[start_at + i], ncol * sizeof (pixel), 
							MPI_BYTE,  pid, send_data_tag, MPI_COMM_WORLD);
			}
		}
		gettimeofday(&end_send, NULL);
		double result_time_send = end_send.tv_sec - start_time.tv_sec + (end_send.tv_usec - start_time.tv_usec)/1000000.0;
		//printf("SEND_time: %lf\n", result_time_send);

		/*	 work assigned to the root process 	*/
		// smooth
		smooth(in, out, (num_procs - 2) * rows_per_process + 1, nrow - 1, nrow, ncol, my_id);

		/* collet the partial matrix */
		pixel * partial = (pixel *) malloc ( ncol * sizeof (pixel) );

		for(pid = 1; pid < num_procs; pid ++) {

			for(i = 0; i < rows_per_process; i++) {
				MPI_Recv(partial, ncol * sizeof(pixel), MPI_BYTE, 
					pid, return_data_tag, MPI_COMM_WORLD, &status);

				memcpy(out[(pid - 1) * rows_per_process + i], 
					partial, ncol * sizeof(pixel));
			}
		}
		gettimeofday(&end_time, NULL);
		double result_time = end_time.tv_sec - start_time.tv_sec + (end_time.tv_usec - start_time.tv_usec)/1000000.0;
		printf("%lf\n", result_time);

		free(partial);

		// write the results in output file
		fprintf(fout, "%s\n", encoding);
		fprintf(fout, "%d %d\n", ncol, nrow);
		fprintf(fout, "%hhu\n", maxValue);
		for(i = 0; i < nrow; i++) {
			for(j = 0; j < ncol; j++) 
				fprintf(fout, "%hhu %hhu %hhu ", out[i][j].r, out[i][j].g, out[i][j].b);
			fprintf(fout, "\n");
		}

		// free memory
		for(i = 0; i < nrow; i++)
			free(in[i]), free(out[i]);
		free(in);
		free(out);
		fclose(fp);
		fclose(fout);
	}
	else {
		MPI_Recv(&nrow, 1, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD, &status);
		MPI_Recv(&ncol, 1, MPI_INT, 0, send_data_tag, MPI_COMM_WORLD, &status);

		rows_per_process = nrow / num_procs;

		int nrows_recv = end_mat(my_id, rows_per_process, nrow) - start_mat(my_id, rows_per_process) + 1;

		pixel ** local_in = (pixel **) malloc ( nrows_recv * sizeof (pixel *) );
		for(i = 0; i < nrows_recv; i++) 
			local_in[i] = (pixel *) malloc ( ncol * sizeof (pixel ) );

		pixel ** local_out = (pixel **) malloc ( rows_per_process * sizeof (pixel *) );
		for(i = 0; i < rows_per_process; i++) 
			local_out[i] = (pixel *) malloc ( ncol * sizeof (pixel ) );
			
		for(i = 0; i < nrows_recv; i++) {
			MPI_Recv(local_in[i], ncol * sizeof(pixel), MPI_BYTE, 
				0, send_data_tag, MPI_COMM_WORLD, &status);
		}

		/**
		 * Process using smooth
		 */

		smooth(local_in, local_out, start_work(my_id, rows_per_process), 
			start_work(my_id, rows_per_process) + rows_per_process - 1, nrow, ncol, my_id);
	
		for(i =0; i < rows_per_process; i++) {
			MPI_Send(local_out[i], ncol * sizeof (pixel), MPI_BYTE, 
				0, return_data_tag, MPI_COMM_WORLD);
		}

		for(i = 0; i < nrows_recv; i++)
			free(local_in[i]);
		for(i = 0; i < rows_per_process; i++)
			free(local_out[i]);
		free(local_in);
		free(local_out);
	}

	MPI_Finalize();

	return 0;
}
