#include <mpi.h>
#include <stdio.h>

#include "p2p.h"
#include "../utils/utils.h"

#define NUM_BUFFERS	2


int p2p_onesided(struct algorithm_data* info, int type)
{
	int serverpid, pserverpid;
	struct matrix_t* buf[NUM_BUFFERS];
	long bufsize;
	int bufindex=0, pbufindex;
	int i, err=0;
	MPI_Win win;
	MPI_Group group, group_ex;
	double t1, t2, tcomm=0, tcpu=0;

	for (i = 0; i < NUM_BUFFERS; ++i) {
		err |= alloc_matrix_b(&buf[i], info->b->n, info->b->m);
	}
	MPI_Comm_group(MPI_COMM_WORLD, &group);
	MPI_Group_excl(group, 1, &(info->info->proc_id), &group_ex);
	bufsize = info->b->m * info->b->n;
	err |= MPI_Win_create(
		info->b->mat,
		bufsize,
		sizeof(MPI_LONG),
		MPI_INFO_NULL, 
		MPI_COMM_WORLD,
		&win);

	serverpid = (info->info->proc_id + 1) % info->info->num_procs;

	t1 = MPI_Wtime();
	switch (type) {
		case ONESIDED_ACT:
			err |= MPI_Win_post(group_ex, 0, win);
			err |= MPI_Win_start(group_ex, 0, win);
			break;
		case ONESIDED_PAS:
			err |= MPI_Win_lock(MPI_LOCK_SHARED, serverpid, 0, win);
			break;
		case ONESIDED_FEN:
			err |= MPI_Win_fence(0, win);
			break;
	}
	err |= MPI_Get(
		buf[bufindex]->mat, bufsize, MPI_LONG,
		serverpid, 0, bufsize, MPI_LONG, win);
	tcomm += MPI_Wtime() - t1;

	t2 = MPI_Wtime();
	err |= diag_calc(info);
	tcpu += MPI_Wtime() - t2;
	do {
		t1 = MPI_Wtime();
		switch (type) {
			case ONESIDED_ACT:
				err |= MPI_Win_complete(win);
				err |= MPI_Win_wait(win);
				break;
			case ONESIDED_PAS:
				err |= MPI_Win_unlock(serverpid, win);
				break;
			case ONESIDED_FEN:
				err |= MPI_Win_fence(0, win);
				break;
		}
		tcomm += MPI_Wtime() - t1;

		pserverpid = serverpid;
		pbufindex = bufindex;
		bufindex = (bufindex + 1) % NUM_BUFFERS;
		serverpid = (serverpid + 1) % info->info->num_procs;
		if (serverpid == info->info->proc_id) break;

		t1 = MPI_Wtime();
		switch (type) {
			case ONESIDED_ACT:
				err |= MPI_Win_post(group_ex, 0, win);
				err |= MPI_Win_start(group_ex, 0, win);
				break;
			case ONESIDED_PAS:
				err |= MPI_Win_lock(MPI_LOCK_SHARED, serverpid, 0, win);
				break;
			case ONESIDED_FEN:
				err |= MPI_Win_fence(0, win);
				break;
		}
		err |= MPI_Get(
			buf[bufindex]->mat, bufsize, MPI_LONG, 
			serverpid, 0, bufsize, MPI_LONG, win);
		tcomm += MPI_Wtime() - t1;

		t2 = MPI_Wtime();
		err |= calc(info, buf[pbufindex], pserverpid);
		tcpu += MPI_Wtime() - t2;
	} while (1);
	t2 = MPI_Wtime();
	err |= calc(info, buf[pbufindex], pserverpid);
	tcpu += MPI_Wtime() - t2;

	if (type == ONESIDED_ACT)
		printf("acos.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	else if (type == ONESIDED_PAS)
		printf("paos.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	else
		printf("feos.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	
	for (i = 0; i < NUM_BUFFERS; ++i) dealloc_matrix(buf[i]);
	MPI_Win_free(&win);
	return err;
}


int p2p_onesided_np(struct algorithm_data* info, int type)
{
	int serverpid, pserverpid;
	struct matrix_t* buf;
	long bufsize;
	int err=0;
	MPI_Win win;
	double t1, t2, tcomm=0, tcpu=0;
	MPI_Group group, group_ex;

	alloc_matrix_b(&buf, info->b->n, info->b->m);

	bufsize = info->b->m * info->b->n;
	err |= MPI_Win_create(
		info->b->mat,
		bufsize,
		sizeof(MPI_LONG),
		MPI_INFO_NULL, 
		MPI_COMM_WORLD,
		&win);

	MPI_Barrier(MPI_COMM_WORLD);
	t2 = MPI_Wtime();
	err |= diag_calc(info);
	tcpu += MPI_Wtime() - t2;

	MPI_Comm_group(MPI_COMM_WORLD, &group);
	MPI_Group_excl(group, 1, &(info->info->proc_id), &group_ex);

	serverpid = (info->info->proc_id + 1) % info->info->num_procs;

	t1 = MPI_Wtime();
	switch (type) {
		case ONESIDED_ACT:
			err |= MPI_Win_post(group_ex, 0, win);
			err |= MPI_Win_start(group_ex, 0, win);
			break;
		case ONESIDED_PAS:
			err |= MPI_Win_lock(MPI_LOCK_SHARED, serverpid, 0, win);
			break;
		case ONESIDED_FEN:
			err |= MPI_Win_fence(0, win);
			break;
	}

	err |= MPI_Get(
		buf->mat, bufsize, MPI_LONG,
		serverpid, 0, bufsize, MPI_LONG, win);
	do {
		switch (type) {
			case ONESIDED_ACT:
				err |= MPI_Win_complete(win);
				err |= MPI_Win_wait(win);
				break;
			case ONESIDED_PAS:
				err |= MPI_Win_unlock(serverpid, win);
				break;
			case ONESIDED_FEN:
				err |= MPI_Win_fence(0, win);
				break;
		}
		tcomm += MPI_Wtime() - t1;

		pserverpid = serverpid;
		serverpid = (serverpid + 1) % info->info->num_procs;
		if (serverpid == info->info->proc_id) break;
		
		t2 = MPI_Wtime();
		err |= calc(info, buf, pserverpid);
		tcpu += MPI_Wtime() - t2;

		t1 = MPI_Wtime();
		switch (type) {
			case ONESIDED_ACT:
				err |= MPI_Win_post(group_ex, 0, win);
				err |= MPI_Win_start(group_ex, 0, win);
				break;
			case ONESIDED_PAS:
				err |= MPI_Win_lock(MPI_LOCK_SHARED, serverpid, 0, win);
				break;
			case ONESIDED_FEN:
				t1 = MPI_Wtime();
				break;
		}
		err |= MPI_Get(
			buf->mat, bufsize, MPI_LONG, 
			serverpid, 0, bufsize, MPI_LONG, win);
	} while (1);
	t2 = MPI_Wtime();
	err |= calc(info, buf, pserverpid);
	tcpu += MPI_Wtime() - t2;

	if (type == ONESIDED_ACT) 
		printf("acosnp.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	else if (type == ONESIDED_PAS)
		printf("paosnp.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	else 
		printf("feosnp.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	
	dealloc_matrix(buf);
	MPI_Win_free(&win);
	return err;
}


int p2p_twosided_nb(struct algorithm_data* info)
{
	int sender, receiver, psender;
	struct matrix_t* buf[NUM_BUFFERS];
	long bufsize;
	int bufindex=0, pbufindex;
	int i, err=0;
	MPI_Request sreq, rreq;
	MPI_Status status;
	double t1, t2, tcomm=0, tcpu=0;

	for (i = 0; i < NUM_BUFFERS; ++i) {
		err |= alloc_matrix_b(&buf[i], info->b->n, info->b->m);
	}

	bufsize = info->b->m * info->b->n;
	receiver = (info->info->proc_id + 1) % info->info->num_procs;
	sender = (info->info->proc_id - 1 + info->info->num_procs) % info->info->num_procs;

	if (info->info->proc_id % 2) {
		err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &sreq);
		err |= MPI_Irecv(buf[bufindex]->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &rreq);
	} else {
		err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &sreq);
		err |= MPI_Irecv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &rreq);
	}

	t2 = MPI_Wtime();
	err |= diag_calc(info);
	tcpu += MPI_Wtime() - t2;
	do {
		t1 = MPI_Wtime();
		MPI_Wait(&rreq, &status);
		tcomm += MPI_Wtime() - t1;
		psender = (info->info->proc_id % 2) ? receiver : sender;
		pbufindex = bufindex;
		bufindex = (bufindex + 1) % NUM_BUFFERS;
		receiver = (receiver + 1) % info->info->num_procs;
		sender = (sender - 1 + info->info->num_procs) % info->info->num_procs;
		if (receiver == info->info->proc_id) break;

		if (info->info->proc_id % 2) {
			err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &sreq);
			err |= MPI_Irecv(buf[bufindex]->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &rreq);
		} else {
			err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &sreq);
			err |= MPI_Irecv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &rreq);
		}
		t2 = MPI_Wtime();
		err |= calc(info, buf[pbufindex], psender);
		tcpu += MPI_Wtime() - t2;
	} while (1);
	t2 = MPI_Wtime();
	err |= calc(info, buf[pbufindex], psender);
	tcpu += MPI_Wtime() - t2;

	printf("tsnb.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	for (i = 0; i < NUM_BUFFERS; ++i) dealloc_matrix(buf[i]);
	return err;
}


int p2p_twosided_nb_np(struct algorithm_data* info)
{
	int sender, receiver, psender;
	struct matrix_t* buf;
	long bufsize;
	int err=0;
	MPI_Request sreq, rreq;
	MPI_Status status;
	double t1, t2, tcomm=0, tcpu=0;

	alloc_matrix_b(&buf, info->b->n, info->b->m);

	bufsize = info->b->m * info->b->n;
	receiver = (info->info->proc_id + 1) % info->info->num_procs;
	sender = (info->info->proc_id - 1 + info->info->num_procs) % info->info->num_procs;
	
	t2 = MPI_Wtime();
	err |= diag_calc(info);
	tcpu += MPI_Wtime() - t2;
	
	if (info->info->proc_id % 2) {
		err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &sreq);
		err |= MPI_Irecv(buf->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &rreq);
	} else {
		err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &sreq);
		err |= MPI_Irecv(buf->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &rreq);
	}
	do {
		t1 = MPI_Wtime();
		MPI_Wait(&rreq, &status);
		tcomm += MPI_Wtime() - t1;

		psender = (info->info->proc_id % 2) ? receiver : sender;
		receiver = (receiver + 1) % info->info->num_procs;
		sender = (sender - 1 + info->info->num_procs) % info->info->num_procs;
		if (receiver == info->info->proc_id) break;

		t2 = MPI_Wtime();
		err |= calc(info, buf, psender);
		tcpu += MPI_Wtime() - t2;

		if (info->info->proc_id % 2) {
			err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &sreq);
			err |= MPI_Irecv(buf->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD, &rreq);
		} else {
			err |= MPI_Isend(info->b->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &sreq);
			err |= MPI_Irecv(buf->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &rreq);
		}
	} while (1);
	t2 = MPI_Wtime();
	err |= calc(info, buf, psender);
	tcpu += MPI_Wtime() - t2;

	printf("tsnbnp.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	dealloc_matrix(buf);
	return err;
}


int p2p_twosided_b(struct algorithm_data* info)
{
	int sender, receiver, origin;
	struct matrix_t* buf[NUM_BUFFERS];
	long bufsize;
	int bufindex=0, pbufindex;
	int i, err=0;
	MPI_Status status;
	double t1, t2, tcomm=0, tcpu=0;

	for (i = 0; i < NUM_BUFFERS; ++i) {
		err |= alloc_matrix_b(&buf[i], info->b->n, info->b->m);
	}

	bufsize = info->b->m * info->b->n;
	receiver = (info->info->proc_id + 1) % info->info->num_procs;
	sender = (info->info->proc_id - 1 + info->info->num_procs) % info->info->num_procs;
	origin = sender;

	t2 = MPI_Wtime();
	err |= diag_calc(info);
	tcpu += MPI_Wtime() - t2;
	if (info->info->proc_id == 0) {
		t1 = MPI_Wtime();
		err |= MPI_Send(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD);
		err |= MPI_Recv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &status);
		tcomm += MPI_Wtime() - t1;
	} else {
		t1 = MPI_Wtime();
		err |= MPI_Recv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &status);
		err |= MPI_Send(info->b->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD);
		tcomm += MPI_Wtime() - t1;
	}
	do {
		t2 = MPI_Wtime();
		calc(info, buf[bufindex], origin);
		tcpu += MPI_Wtime() - t2;
		origin = (origin - 1 + info->info->num_procs) % info->info->num_procs;
		if (origin == info->info->proc_id) break;

		pbufindex = bufindex;
		bufindex = (bufindex + 1) % NUM_BUFFERS;
		if (info->info->proc_id == 0) {
			t1 = MPI_Wtime();
			err |= MPI_Send(buf[pbufindex]->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD);
			err |= MPI_Recv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &status);
			tcomm += MPI_Wtime() - t1;
		} else {
			t1 = MPI_Wtime();
			err |= MPI_Recv(buf[bufindex]->mat, bufsize, MPI_LONG, sender, 1, MPI_COMM_WORLD, &status);
			err |= MPI_Send(buf[pbufindex]->mat, bufsize, MPI_LONG, receiver, 1, MPI_COMM_WORLD);
			tcomm += MPI_Wtime() - t1;
		}
	} while (1);

	printf("tsb.%d [tcomm=%f, tcpu=%f]\n", info->info->proc_id, tcomm, tcpu);
	for (i = 0; i < NUM_BUFFERS; ++i) dealloc_matrix(buf[i]);
	return err;
}
