#include <vector>
#include <map>
#include <utility>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "mpi.h"
#include <papi.h>

MPI_Datatype MatrixCellType;

extern "C" {
  typedef struct {
    int row;
    int col;
    double value;
  } MatrixCell;

  void readMatrixVector(char* matrixFile, char* vectorFile, int my_rank, int numProcess);

  int getMatrixSize();
  int getMatrixRowSize();
  int getMatrixColumnSize();
  MatrixCell* getMatrix();

  int getVectorSize();
  int getVectorRowSize();
  int getVectorColumnSize();
  MatrixCell* getVector();

  void writeOutputVector(char* filename, MatrixCell* mat, int row, int col, int size);
}

/*my matrix*/
static MatrixCell *matrix;
static int matrixSize;
/*matrix container*/
std::map<int, std::map<int, float> > M;
/*my vecotr*/
static MatrixCell *vector;
static int vectorSize;
/*my result*/
MatrixCell* result;
int resultSize;
/* files */
char* matrixFile;
char* vectorFile;

// debug
int recv_c = 0;

inline void getSizes(int* rSize, int* vSize) {
  FILE *fp = fopen(matrixFile, "r");
  char c;
	while (1) {
		c = fgetc(fp);
		if (c == '\n')
			break;
	}
  fscanf(fp, "%d", rSize);
  fclose(fp);

  fp = fopen(vectorFile, "r");
	while (1) {
		c = fgetc(fp);
		if (c == '\n')
			break;
	}
  fscanf(fp, "%d", vSize);
  fscanf(fp, "%d", vSize);
  fscanf(fp, "%d", vSize);
  fclose(fp);
}

inline void distributeVector(int my_rank, int p) {
	int req;
	MPI_Aint offsets[2], extent;
	MPI_Datatype oldtypes[2];
	int blockcounts[2];

	/* Setup description of the 2 MPI_INT fields row and col */
	offsets[0] = 0;
	oldtypes[0] = MPI_INT;
	blockcounts[0] = 2;
	/* Setup description of the 1 MPI_DOUBLE field value */
	/* Need to first figure offset by getting size of MPI_INT */
	MPI_Type_extent(MPI_INT, &extent);
	offsets[1] = 2 * extent;
	oldtypes[1] = MPI_DOUBLE;
	blockcounts[1] = 1;
	/* Now define structured type and commit it */
	MPI_Type_struct(2, blockcounts, offsets, oldtypes, &MatrixCellType);
	MPI_Type_commit(&MatrixCellType);

  for(int i=(my_rank+1)%p; i!=my_rank; i=(i+1)%p) {
    MPI_Isend(vector, vectorSize, MatrixCellType, i, 1, MPI_COMM_WORLD, &req);
  }
}

inline void calculateCell(int row, float data) {
  if (M.find(row) != M.end()) {
    for(std::map<int, float>::iterator it=M[row].begin();
        it != M[row].end(); it++)
      result[it->first-1].value += data * it->second;
  }
};

void cellSum(MatrixCell * in, MatrixCell * inout, int * len, MPI_Datatype * dptr) {
  for(int i=0; i<*len; ++i) {
    inout[i].value += in[i].value;
  }
}

int main(int argc, char* argv[]) {
	int my_rank; /* rank of process */
	int p; /* number of processes */
	int source; /* rank of sender */
	int dest; /* rank of receiver */

  int vectorTotalSize;
  int* procStart, *procEnd;

	int i;
  unsigned long chkflg = PAPI_VER_CURRENT;
  unsigned long start, end;

	/* start up MPI */
	MPI_Init(&argc, &argv);
	/* find out process rank */
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	/* find out number of processes */
	MPI_Comm_size(MPI_COMM_WORLD, &p);

  /*read and distribute input data*/
	matrixFile = argv[1];
	vectorFile = argv[2];
	readMatrixVector(matrixFile, vectorFile, my_rank, p);

  /*init papi*/
  PAPI_library_init(chkflg);
  if (chkflg != PAPI_VER_CURRENT) {
    printf("Error PAPI Library out of date\n");
    exit(1);
  }

  /*start timing*/
  start = PAPI_get_real_usec();

  /*load matrix*/
  matrixSize = getMatrixSize();
  matrix = getMatrix();
  /*load vector*/
  vectorSize = getVectorSize();
  vector = getVector();

  /*send vector data to other processes*/
  distributeVector(my_rank, p);

	procStart = (int*) malloc(p * sizeof(int));
	procEnd = (int*) malloc(p * sizeof(int));

  /*result memory allocation*/
  getSizes(&resultSize, &vectorTotalSize);
  result = (MatrixCell*)malloc(resultSize*sizeof(MatrixCell));
  for(i=0; i<resultSize; i++) {
    result[i].row = i+1;
    result[i].col = 1;
    result[i].value = 0;
  }

  /*populate matrix data structure*/
  for(i=0; i<matrixSize; i++)
    M[matrix[i].col][matrix[i].row] = matrix[i].value;

  /*calculate vector segment size*/
	procStart[0] = 0;
	for (i = 1; i < p; ++i)
		procStart[i] = i * (vectorTotalSize / p);
	for (i = 0; i < (p - 1); ++i)
		procEnd[i] = (i + 1) * (vectorTotalSize / p);
	procEnd[p - 1] = vectorTotalSize;

  /*************** start the main loop **************/
  MatrixCell* oneVector;
  MatrixCell* anotherVector;
  MPI_Request oneRequest;
  MPI_Request anotherRequest;
  MPI_Request* oneRequest_p;
  MPI_Request* anotherRequest_p;
  int oneSize;
  int anotherSize;
  MPI_Status status;

  oneVector = vector;
  oneSize = vectorSize;
  oneRequest_p = &oneRequest;
  anotherRequest_p = &anotherRequest;
  i = my_rank;

  do {
    /* pre-recv data for next round */
    int next_src = (i+1)%p;
    if (next_src != my_rank) {
      anotherSize = procEnd[next_src] - procStart[next_src];
      anotherVector = (MatrixCell*)malloc(sizeof(MatrixCell)*anotherSize);
      MPI_Irecv(anotherVector, anotherSize, MatrixCellType, next_src, 1, MPI_COMM_WORLD, anotherRequest_p);
    }

    /* make sure data is received */
    if (i != my_rank) {
      int flag;
      do {
        MPI_Test(oneRequest_p, &flag, &status);
        recv_c++;
      } while(flag == 0);
    }

    /* calculation for this round */
    int j;
    for(j=0; j<oneSize; j++) {
      calculateCell(oneVector[j].row, oneVector[j].value);
    }

    if (i != my_rank) {
      free(oneVector);
    }
    /* switch one and another */
    MPI_Request* tr;
    tr = oneRequest_p;
    oneRequest_p = anotherRequest_p;
    anotherRequest_p = tr;
    oneVector = anotherVector;
    oneSize = anotherSize;
    i = (i+1)%p;
  } while(i != my_rank);

  // debugging
  //int k;
  //printf("rank: %d\n", my_rank);
  //for(k = 0; k < resultSize; ++k)
  //  printf("\t%f\n", result[k].value);

  MatrixCell* final_result;
  final_result = (MatrixCell*)malloc(sizeof(MatrixCell)*resultSize);

  /* for rank 0 only really*/
  MPI_Op op;
  MPI_Op_create((MPI_User_function*)cellSum, true, &op);

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Reduce(result, final_result, resultSize, MatrixCellType, op, 0, MPI_COMM_WORLD);

  /*end timing*/
  end = PAPI_get_real_usec();

  if (my_rank == 0)
    writeOutputVector("result.mm", final_result, resultSize, 1, resultSize);
  printf("rank: %d, time: %ld, recv_c: %d\n", my_rank, end-start, recv_c);
	MPI_Finalize();

	return 0;
}
