/**
 * @file crop_ss_par_par.cpp
 * @author Andreas Bok Andersen
 * @brief CRoP SpaceSaving with parallel read and parallel reduction
 */

#include "crop.cpp"
#include <stdio.h>
#include <string.h>
#include <mpi.h>
#define MPI_H
#include "fileparser.cpp"

using namespace std;

template<class T, class U>
void run(int argc, char *argv[]) {

	/*BOOST_FOREACH(string arg, CMDARGS) {
	 cerr << arg << endl;
	 }*/

	int rank, comm_size, step = 1, root = 0, ierr, k = 4;
	string FILENAME;
	double start, end, split;

	MPI_Status status;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &comm_size);

	CmdParser cp;
	if (!cp.parseargs(argc, argv)) {
		MPI_Abort(MPI_COMM_WORLD, -1);
	}

	if (rank == 0) {
		cp.printargs();
		cout << cp.get_cmdargs() << endl;
	}

	FILENAME = DATADIR + cp.a.get < string > (INFILE);
	const char * FILEPATH = FILENAME.c_str();
	k = cp.a.get<int>(KSIZE);

	MPI_Datatype entrytype;
	ENTRY_TYPE(&entrytype);

	Crop<T> *crop = new Crop<T>(rank, comm_size, k);

	bool done = false;
	start = MPI_Wtime();



	//MMapFileParser fp((char*) FILEPATH);
	MMapFILEParser<U> fp((char*) FILEPATH);
	while (fp.getinput()) {
		crop->crop((int*) fp.CURR_PTR, fp.NUM_PTR, fp.count);
	}
	end = MPI_Wtime();
	fprintf(stdout, "%d\t%d\tCROP\t%.8f\n", comm_size, rank, end - start);

	int summarysize = crop->summarysize();

	ENTRY_VEC * entries = new ENTRY_VEC();

	crop->get_summary(entries);

	for (int i = summarysize; i < k; i++) {
		entries->at(i) = *new Entry();
		//cerr << "adding entry: " << entries[i].i << endl;
	}

	MPI_Barrier (MPI_COMM_WORLD);
	//printf("rank: %d %s\n", rank, "Starting Reduction");

	int _dest;
	int _source;
	MPI_Request resv_request;
	MPI_Request send_request;
	int count = 0;
	Entry * recvbuf = (Entry*) malloc(sizeof(Entry) * k);

	split = MPI_Wtime();
	while (!done) {
		if (rank % (int) pow(2, step) != 0) {
			_dest = rank - (int) pow(2, (step - 1));
			//printf("rank: %d %s %d\n", rank, "Sending to", _dest);
			MPI_Isend(entries, summarysize, entrytype, _dest, step,
					MPI_COMM_WORLD, &send_request);
			MPI_Test(&send_request, &ierr, &status);
			//printf("rank: %d %s %d\n", rank, "Sent to", _dest);
			done = true;
			//printf("FINISEHED at %d with error %d\n", rank, ierr);
		} else {
			//recvbuf = (Entry*) malloc(sizeof(Entry) * k);
			_source = rank + (int) pow(2, (step - 1));
			//printf("source: %d", _source);
			MPI_Recv(recvbuf, k, entrytype, _source, step, MPI_COMM_WORLD,
					&status);

			/*printf("rank: %d Received elements %d from %d\n", rank,
			 sizeof(recvbuf), _source);*/

			step = status.MPI_TAG + 1;
			//printf("%rank=%d step postupdate %d\n", rank, step);
			merge_parallel(entries, recvbuf, k);
			/*printf("rank: %d %s #sum %d\n", rank, "Merged in parallel",
			 entries->size());*/

			if (step <= (int) log2(comm_size)) {
				if (rank != root) {
					_dest = rank - (int) pow(2, (step - 1));
					//printf("rank: %d %s %d\n", rank, "Sending to", _dest);
					MPI_Send(entries, entries->size(), entrytype, _dest, step,
							MPI_COMM_WORLD);
					//printf("rank: %d %s %d\n", rank, "Sent to", _dest);
				}
			} else {
				//printf("EXIT LOOP at %d\n", rank);
				done = true;
			}
		}
	}

	end = MPI_Wtime();
	fprintf(stdout, "%d\t%d\tREDUC\t%.8f\n", comm_size, rank, end - split);
	fprintf(stdout, "%d\t%d\tTOT\t%.8f\n", comm_size, rank, end - start);
	if (rank == root) {
		fprintf(stderr, "FINISHED REDUCTION at ROOT: #elems: %d\n",
				entries->size());
		stringstream ss;
		ss << FILENAME << "_k" << k << ".res";
		FILENAME = ss.str();
		writetofile((char*) FILENAME.c_str(), *entries);

	}
	MPI_Type_free(&entrytype);
	free(recvbuf);
	delete entries;
	delete crop;
	MPI_Finalize();

}

int main(int argc, char *argv[]) {
	CmdParser cp;
	if (!cp.parseargs(argc, argv)) {
		return -1;
	}

	if (cp.a.get < string > (FORMAT) == "dcs") {
		if (cp.a.get < string > (STYPE) == "buf") {
			run<StreamSummaryBuffer,DCS_FORMAT>(argc, argv);
		} else {
			run<StreamSummaryBucket, DCS_FORMAT>(argc, argv);
		}
	} else if (cp.a.get < string > (FORMAT) == "coo"){
		if (cp.a.get < string > (STYPE) == "buf") {
			run<StreamSummaryBuffer,COO_FORMAT>(argc, argv);
		} else {
			run<StreamSummaryBucket,COO_FORMAT>(argc, argv);
		}
	}
}
