/**
 * @file crop_ss_seq_par.cpp
 * @author Andreas Bok Andersen
 * @brief CRoP SpaceSaving with sequential read and parallel reduction
 */

#include "crop.cpp"
#include <stdio.h>
#include <string.h>
#include <mpi.h>
#define MPI_H
#include "fileparser.cpp"

template<class T, class U>
void run(int argc, char *argv[]) {
	int rank, comm_size, step = 1, root = 0, ierr, k;
	double start,split,end, crop_t = 0.0000;
	string FILENAME;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &comm_size);

	CmdParser cp;
	if (!cp.parseargs(argc, argv)) {
		MPI_Abort(MPI_COMM_WORLD, -1);
	}

	if (rank == 0) {
		cp.printargs();
		cout << cp.get_cmdargs() << endl;
	}

	FILENAME = DATADIR + cp.a.get < string > (INFILE);
	const char * FILEPATH = FILENAME.c_str();
	k = cp.a.get<int>(KSIZE);

	MPI_Status status;
	MPI_Datatype entrytype;
	ENTRY_TYPE(&entrytype);

	bool done = false;
	MPI_Datatype byte_type;
	Crop<T> *crop = new Crop<T>(rank, comm_size, k);
	start = MPI_Wtime();
	U type;
	if (rank == 0) {
		MMapFILEParser<U> fp((char*) FILEPATH); // Change to command arg

/*		fprintf(stderr, "opened file\n", "");
		fprintf(stderr, "JCSIZE@%d: %d \n", rank, fp.NNZ_SIZE);
		fprintf(stderr, "COUNTS_MAX@%d: %d \n", rank,
				(int) fp.COUNTS[fp.NNZ_SIZE]);
		fprintf(stderr, "COUNTS@%d: %d \n", 333, (int) fp.COUNTS[333]);
		fprintf(stderr, "COUNTS@%d: %d \n", 996, (int) fp.COUNTS[996]);*/

		int counts = fp.NNZ_SIZE + 1;
		MPI_Bcast(&counts, 1, MPI_INT, 0, MPI_COMM_WORLD);
		MPI_Bcast(fp.COUNTS, (fp.NNZ_SIZE + 1), MPI_INT, 0, MPI_COMM_WORLD);
		int i = 0;
		while (fp.getinput()) {
			create_MPI_Datatype(&byte_type, fp.count, fp.STRIDE , type);

			MPI_Bcast((void*) fp.CURR_PTR, 1, byte_type, 0, MPI_COMM_WORLD);
			//fprintf(stderr, "Broadcast row %d: %d\n", i, fp.count);
			split = MPI_Wtime();
			crop->crop((int*) fp.CURR_PTR, fp.NUM_PTR, fp.count);
			crop_t += MPI_Wtime() - split;
			i++;
		}
	} else {
		int NNZ_SIZE;
		MPI_Bcast(&NNZ_SIZE, 1, MPI_INT, 0, MPI_COMM_WORLD);
		int * COUNTS = (int*) malloc(NNZ_SIZE * sizeof(int));
		MPI_Bcast(COUNTS, NNZ_SIZE, MPI_INT, 0, MPI_COMM_WORLD);

		int i = 0;
		int * iptr;
		float * fptr;
		void * buf = malloc(2 * sizeof(int) * COUNTS[NNZ_SIZE - 1]);
		while (i < (NNZ_SIZE - 1)) {
			create_MPI_Datatype(sizeof(int) * COUNTS[i], &byte_type, type);

			MPI_Bcast(buf, 1, byte_type, 0, MPI_COMM_WORLD);
			iptr = (int*) buf;
			fptr = (float*) (iptr + COUNTS[i]);
			split = MPI_Wtime();
			crop->crop(iptr, fptr, COUNTS[i]);
			crop_t += MPI_Wtime() -split ;
			i++;
		}
		free(buf);
		free(COUNTS);
	}
	end = MPI_Wtime();
	fprintf(stdout, "%d\t%d\tCROP\t%.8f\n", comm_size,rank, crop_t);
	fprintf(stdout, "%d\t%d\tBCAST\t%.8f\n", comm_size,rank, end-start);
	MPI_Type_free(&byte_type);

	done = false;
	int summarysize = crop->summarysize();
	ENTRY_VEC * entries = new ENTRY_VEC();

	crop->get_summary(entries);

	for (int i = summarysize; i < k; i++) {
			entries->at(i) = *new Entry();
			//cerr << "adding entry: " << entries[i].i << endl;
	}

	MPI_Barrier (MPI_COMM_WORLD);

	int _dest;
	int _source;
	MPI_Request resv_request;
	MPI_Request send_request;
	int count = 0;
	Entry * recvbuf = (Entry*) malloc(sizeof(Entry) * k);
	split = MPI_Wtime();
	fprintf(stdout, "rank=%d, starting reduction\n",rank);
	while (!done) {
		if (rank % (int) pow(2, step) != 0) {
			_dest = rank - (int) pow(2, (step - 1));
			//printf("rank: %d %s %d\n", rank, "Sending to", _dest);
			MPI_Isend(entries, summarysize, entrytype, _dest, step,
					MPI_COMM_WORLD, &send_request);

			//MPI_Test(&send_request, &ierr, &status);
			//printf("rank: %d %s %d\n", rank, "Sent to", _dest);
			done = true;
			//printf("FINISEHED at %d with error %d\n", rank, ierr);
		} else {
			_source = rank + (int) pow(2, (step - 1));
			//printf("source: %d", _source);
			MPI_Recv(recvbuf, k, entrytype, _source, step, MPI_COMM_WORLD,
					&status);

			//printf("rank: %d Received elements %d from %d\n", rank,sizeof(recvbuf), _source);
			step = status.MPI_TAG + 1;
			//printf("%rank=%d step postupdate %d\n", rank, step);
			merge_parallel(entries, recvbuf, k);
			//printf("rank: %d %s #sum %d\n", rank, "Merged in parallel",entries->size());

			if (step <= (int) log2(comm_size)) {
				if (rank != root) {
					_dest = rank - (int) pow(2, (step - 1));
					//printf("rank: %d %s %d\n", rank, "Sending to", _dest);
					MPI_Send(entries, entries->size(), entrytype, _dest, step,
							MPI_COMM_WORLD);
					//MPI_Test(&send_request, &ierr, &status);
					//printf("rank: %d %s %d\n", rank, "Sent to", _dest);
				}
			} else {
				//printf("EXIT LOOP at %d\n", rank);
				done = true;
			}
		}
	}

	if (rank == root) {
		fprintf(stderr, "FINISHED REDUCTION at ROOT: #elems: %z\n", (int)entries->size());
		stringstream ss;
		ss << FILENAME <<   "_k" << k << ".res";
		FILENAME = ss.str() ;
		writetofile((char*)FILENAME.c_str(), *entries);
	}
	end = MPI_Wtime();
	fprintf(stdout, "%d\t%d\tREDUC\t%.8f\n",comm_size, rank, end-split);
	fprintf(stdout, "%d\t%d\tTOT\t%.8f\n", comm_size,rank, end-start);
	free(recvbuf);
	delete entries;
	delete crop;
	MPI_Type_free(&entrytype);
	MPI_Finalize();
}

int main(int argc, char *argv[]) {
	CmdParser cp;
	if (!cp.parseargs(argc, argv)) {
		return -1;
	}

	if (cp.a.get < string > (FORMAT) == "dcs") {
		if (cp.a.get < string > (STYPE) == "buf") {
			run<StreamSummaryBuffer,DCS_FORMAT>(argc, argv);
		} else {
			run<StreamSummaryBucket, DCS_FORMAT>(argc, argv);
		}
	} else if (cp.a.get < string > (FORMAT) == "coo"){
		if (cp.a.get < string > (STYPE) == "buf") {
			run<StreamSummaryBuffer,COO_FORMAT>(argc, argv);
		} else {
			run<StreamSummaryBucket,COO_FORMAT>(argc, argv);
		}
	}
}
