#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <time.h>
#define	NDEBUG
#include <assert.h>

#define	CREATE_SAME_SEQ

#define RANDOM(min, max)	(rand() % ((max) - (min) + 1) + (min))
#define SEQUENCE_MIN				1
#define SEQUENCE_MAX				10000

#define ORIGINAL_SEQ				"original_seq.txt"
#define SORTED_SEQ					"sorted_seq.txt"

int compare_int(const void *a, const void *b)
{
	return (*(int *)a - *(int *)b);
}

// separate the sequence into two parts according to pivot
int list_partion(int pivot, int *src, int len, int *dst,  \
						int *len_s, int *len_l)
{
	int i;
	int cnt_s = 0, cnt_l = 0;
	int *temp = malloc(len * sizeof(int));

	for (i = 0; i < len; i++) {
		if (*(src + i) <= pivot) {
			dst[cnt_s] = *(src + i);
			cnt_s++;
		} else {
			temp[cnt_l] = *(src + i);
			cnt_l++;
		}
	}	

	memcpy(dst + cnt_s, temp, cnt_l * sizeof(int));
	free(temp);
	*len_s = cnt_s;
	*len_l = cnt_l;
	
	return 0;	
}

void printf_val(int *src, int len)
{
	int i;
	printf("\n");
	for (i = 0; i < len; i++)
		printf("%d ", *(src + i));
	printf("\n");

}


// all_len is the total number of elements in the comunicator
int qsort_split(MPI_Comm comm, int *list, int *list_len, int all_len)
{
	int i, err, rank, comm_size;
	int *recv_cnt_s, *recv_cnt_l;
	int *send_cnt, *recv_cnt;
	int *send_displs, *recv_displs;
	
	int *temp_val;
	int len_s, len_l;
	int total_len_l, total_len_s; // the len of all the elements in the communicator
	int local_len;
	int comm_size_s, comm_size_l;
	int pivot, pivot_val, pivot_rank, pivot_index, temp_cnt = 0;

	MPI_Comm comm_new;
	int color;

    err= MPI_Comm_rank(comm, &rank );
    err= MPI_Comm_size(comm, &comm_size );
	
	if (comm_size == 1) {
		// quick sort locally
		qsort(list, *list_len, sizeof(int), compare_int);

	} else {
		// comm_size > 1 && all_len == 0 is a error
		assert(all_len != 0);

		temp_val = (int *)malloc(sizeof(int) * (all_len + 1));
		recv_cnt_s = (int *)malloc(sizeof(int) * comm_size);
		recv_cnt_l = (int *)malloc(sizeof(int) * comm_size);
		send_cnt = (int *)malloc(sizeof(int) * comm_size);
		recv_cnt = (int *)malloc(sizeof(int) * comm_size);
		send_displs = (int *)malloc(sizeof(int) * comm_size);
		recv_displs = (int *)malloc(sizeof(int) * comm_size);

		//all-gather lenth of each processor in the communicator
		MPI_Allgather(list_len, 1, MPI_INT, recv_cnt, 1, MPI_INT, comm); 
		
		//find a pivot
		pivot = RANDOM(0, all_len - 1);	
		temp_cnt = 0;
		for (i = 0; i < comm_size; i++) {
			temp_cnt += recv_cnt[i];
			if (temp_cnt > pivot) {
				pivot_rank = i;
				pivot_index = pivot - (temp_cnt - recv_cnt[i]);
				break;
			}
		}
		pivot_val = list[pivot_index];

		//broadcast pivot to other processors in the communicator
		MPI_Bcast(&pivot_val, 1, MPI_INT, pivot_rank, comm);

		//partion local array into subarrays	
		list_partion(pivot_val, list, *list_len, temp_val, &len_s, &len_l);

		// all-gather lenth of smaller part, larger part
		// gather the len information 
		MPI_Allgather(&len_s, 1, MPI_INT, recv_cnt_s, 1, MPI_INT, comm);  
		MPI_Allgather(&len_l, 1, MPI_INT, recv_cnt_l, 1, MPI_INT, comm);

		total_len_s = 0;
		total_len_l = 0;
		for (i = 0; i < comm_size; i++) {
			total_len_s += recv_cnt_s[i];	
			total_len_l += recv_cnt_l[i];	
		}
				
		// caculate the size of communicator
		comm_size_s = (total_len_s * comm_size) / all_len;	
		if (comm_size_s == 0)
			comm_size_s = 1;
		if (comm_size_s == comm_size)
			comm_size_s--; 		

		comm_size_l = comm_size - comm_size_s;																						 
		
		// the communicator size should be larger than 0
		assert((comm_size_s > 0) && (comm_size_l > 0));

		color = (rank < comm_size_s) ? 0 : 1;

		//caculate sending, recv count and displacement before a all-to-all communication
		send_displs[0] = 0;
		recv_displs[0] = 0;
		local_len = 0;
		for (i = 0; i < comm_size; i++) {
			// calculate send_cnt
			if (i < comm_size_s) {
				send_cnt[i] = len_s / comm_size_s;
				if (i < len_s % comm_size_s)
					send_cnt[i] += 1;
			} else {
				send_cnt[i] = len_l / comm_size_l;
				if ((i < comm_size_s + len_l % comm_size_l) && \
							(i >= comm_size_s))
					send_cnt[i] += 1;
			}

			// calculate recv_cnt
			if (rank < comm_size_s) { //distribute smaller part
				recv_cnt[i] = recv_cnt_s[i] / comm_size_s;
				if (rank < recv_cnt_s[i] % comm_size_s)
					recv_cnt[i] += 1;
			} else {
				recv_cnt[i] = recv_cnt_l[i] / comm_size_l;
				if ((rank < comm_size_s + recv_cnt_l[i] % comm_size_l) && \
							(rank >= comm_size_s))
					recv_cnt[i] += 1;
			}

			// calculate placements
			if (i >= 1) {
				send_displs[i] = send_displs[i-1] + send_cnt[i - 1];
				recv_displs[i] = recv_displs[i-1] + recv_cnt[i - 1];
			}
			local_len += recv_cnt[i];
		}
	
		// distibute numbers among the communicator	
		MPI_Alltoallv(temp_val, send_cnt, send_displs, MPI_INT, 
						list, recv_cnt, recv_displs, MPI_INT, comm);	

		free(temp_val);
		free(recv_cnt_s);
		free(recv_cnt_l);
		free(send_cnt);
		free(recv_cnt);
		free(send_displs);
		free(recv_displs);

		// split the communicator into two
		MPI_Comm_split(comm, color, rank, &comm_new);
		*list_len = local_len;
		if (color == 0)
			qsort_split(comm_new, list, list_len, total_len_s);
		else 
			qsort_split(comm_new, list, list_len, total_len_l);

	}
	return 0;

}

int parallel_qsort(int *list, int len)
{
	int i, err, rank, proc_cnt;
	int recv_size, *local_val;
	int local_len, total_len;
	int *recv_cnt, *recv_displs;
	int *send_cnt, *send_displs;

    err= MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    err= MPI_Comm_size( MPI_COMM_WORLD, &proc_cnt );

	local_val = malloc(len * sizeof(int));

	recv_cnt = (int *)malloc(sizeof(int) * proc_cnt);
	recv_displs = (int *)malloc(sizeof(int) * proc_cnt);
	send_cnt = (int *)malloc(sizeof(int) * proc_cnt);
	send_displs = (int *)malloc(sizeof(int) * proc_cnt);
	
	send_displs[0] = 0;
	for (i = 0; i < proc_cnt; i++) {
		send_cnt[i] = len / proc_cnt;
		if (i < len % proc_cnt)
			send_cnt[i] += 1;
		if (i > 0)
			send_displs[i] = send_displs[i - 1] + send_cnt[i - 1];
	}

	recv_size = send_cnt[rank];
	local_len = recv_size;

	// scatter all the numbers to each node first
	MPI_Scatterv(list, send_cnt, send_displs, MPI_INT, \
				local_val, recv_size, MPI_INT, 0, MPI_COMM_WORLD);

	srand(1); // set same seed
	//do the parallel quick sort recursively
	qsort_split(MPI_COMM_WORLD, local_val, &local_len, len);

	// gather the number info of each node to the root processor here
	MPI_Allgather(&local_len, 1, MPI_INT, recv_cnt, 1, MPI_INT, MPI_COMM_WORLD);  

	recv_displs[0] = 0;
	for (i = 1; i < proc_cnt; i++) {
		recv_displs[i] = recv_displs[i - 1] + recv_cnt[i - 1];
	}
	total_len = recv_displs[proc_cnt - 1] + recv_cnt[proc_cnt - 1];

	// gather all the sorted sequences to root node here	
	MPI_Gatherv(local_val, local_len, MPI_INT,	\
				list, recv_cnt, recv_displs, MPI_INT, 0, MPI_COMM_WORLD);

	
	free(local_val);
	free(recv_cnt);
	free(recv_displs);
	free(send_cnt);
	free(send_displs);

	return 0;
}

// initialize all the numbers	
int sequence_init(int len)
{
	FILE *fp;
	int i;

	fp = fopen(ORIGINAL_SEQ, "wb");

#ifdef CREATE_SAME_SEQ
	srand(1); //make same initialization every time
#else
	srand(time(NULL)); //make different initialization every time
#endif
	for (i = 0; i < len; i++) {
		fprintf(fp, "%d ", RANDOM(SEQUENCE_MIN, SEQUENCE_MAX));
	}

	fclose(fp);
	return 0;
}

int read_sequence(int *list, int *len)
{
	FILE *fp;
	int *p = list;
	int cnt = 0;

	fp = fopen(ORIGINAL_SEQ, "r");

	while (fscanf(fp, "%d", p) != EOF) {
		p++;
		cnt++;
	}
	*len = cnt;
	fclose(fp);
	return 0;
}

int print_sorted_sequence(int *list, int len)
{
	FILE *fp;
	int i;	

	fp = fopen(SORTED_SEQ, "w");
	
	for (i = 0; i < len; i++) {
		fprintf(fp, "%d ", *list++);
	}

	fclose(fp);
	return 0;

}

int main(int argc, char *argv[])
{
	int i;	
	int err, rank, proc_size;
	int *args, *list_val;
	int list_len;
	double t0, t1;

    err = MPI_Init( &argc, &argv);
    err= MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    err= MPI_Comm_size( MPI_COMM_WORLD, &proc_size );

	if (argc == 1) {
		if (rank == 0)
			printf("Error: please pass the sequence size as the first argument.\r\n");
    	err = MPI_Finalize();
		return err;
	}

	args = malloc(sizeof(int) * argc);	

	for (i = 1; i < argc; i++) {
		args[i - 1] = atoi(argv[i]);	
	}

	list_len = args[0];
	list_val = malloc(sizeof(int) * list_len);

	if (rank == 0) {
		sequence_init(list_len);
		// read sequence
		read_sequence(list_val, &list_len);
		printf("Read %d numbers\r\n", list_len);
	}

	t0 = MPI_Wtime();
	// parallel quick sort
	parallel_qsort(list_val, list_len);
	t1 = MPI_Wtime();

	// print the result
	if (rank == 0) {
		print_sorted_sequence(list_val, list_len);
		printf("Finished sorting.\r\n");
		printf("total_len: %d\r\n", list_len);
		printf("###Time assuming: %f us\r\n", (t1 - t0) * 1000000);
	}

	free(list_val);	
	free(args);	

    err = MPI_Finalize();
    return err;
}
