#include "distributed_mult.h"

void distributed_mat_mult_rec(double* a, double* b, double* c, int a_rows, int a_cols, int b_cols, int my_c_start, int base_case_size) {

  // a and b hold the local portion of the matrix

  int num_procs, my_rank;

  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);


  //printf("Starting distributed, %d\n", my_rank);

  int my_b_cols = get_row_count(b_cols, my_rank, num_procs);

  Matrix b_mat(b, a_cols, my_b_cols);

  // This is used for printing the local, full copy of c, for debugging.
  // Matrix c_mat(c, a_rows, b_cols);

  // MatrixSlice b_slice = b_mat.slice();
  //Matrix c_mat(c, a_cols, my_b_cols);


  // we only send a, since we're just splitting along one dimension
  double* a_buffers[2];

  // need to make sure both of these are large enough
  // this should make sure that the first buffer is large enough to hold the bigger
  // row slices
  a_buffers[0] = (double*) realloc(a, sizeof(double) * a_cols * get_row_count(a_rows, 0, num_procs));
  a_buffers[1] = (double*) malloc(sizeof(double) * a_cols * get_row_count(a_rows, 0, num_procs));

  MPI_Request reqs[2];
  MPI_Status status;

  int result_offset = my_c_start;

  //printf("Starting main loop, %d\n", my_rank);
  int next_rank, prev_rank;
  next_rank = (my_rank + 1) % num_procs;
  prev_rank = my_rank == 0 ? num_procs - 1 : (my_rank - 1)%num_procs;
  //printf("my_rank: %d, next_rank: %d, prev_rank: %d\n", my_rank, next_rank, prev_rank);

  // main computation loop, send data, compute, wait for data
  for (int i = 0; i < num_procs; i++) {

    //printf("Main loop number %d, %d\n", i, my_rank);
    // printf("my_rank - 1 + i  / num_procs: %d", (my_rank - 1 + i)%num_procs);
    
    int my_send_size = get_row_count(a_rows, (my_rank+i)%num_procs, num_procs);
    int my_recv_size = get_row_count(a_rows, (prev_rank + i)%num_procs, num_procs);

    //printf("Non blocking sends, send: %d, recv: %d, %d\n", my_send_size, my_recv_size, my_rank);
    // send data to (my_rank + 1) % num_procs
    MPI_Isend(a_buffers[i%2], a_cols*my_send_size, MPI_DOUBLE, next_rank, 1, 
	      MPI_COMM_WORLD, &reqs[0]);
    // receive data from (my_rank - 1) % num_procs
    MPI_Irecv(a_buffers[(i+1)%2], a_cols*my_recv_size, MPI_DOUBLE, prev_rank, 1, 
	      MPI_COMM_WORLD, &reqs[1]);

    //printf("Starting computation, %d \n", my_rank);

    // compute while waiting
    Matrix local_a(a_buffers[i%2], my_send_size, a_cols);
    // MatrixSlice local_a_slice = local_a.slice();
    
    //printf("Input pointer: %p\n", c+(result_offset * b_cols));
    size_t c_off = result_offset * my_b_cols + my_c_start * b_cols;
    Matrix this_c(c + c_off, my_send_size, my_b_cols);
    // MatrixSlice this_c_slice = this_c.slice();
    //printf("this_c pointer before mult: %p\n", this_c.data());

    // printf("%d: results offset: %d\n", my_rank, result_offset);
    //this_c.dump();
    
    mat_mult_rec(local_a, b_mat, this_c, base_case_size);

    //printf("this_c pointer after mult: %p\n", this_c.data());

    /*
    printf("a after loop %d, proc %d\n", i, my_rank);
    local_a_slice.dump();
    printf("b after loop %d, proc %d\n", i, my_rank);
    b_slice.dump();
    printf("c after loop %d, result_offset: %d, proc %d\n", i, result_offset, my_rank);
    this_c.dump();
    */

    //printf("Finished computation, %d\n", my_rank);
    // make sure the communication finished before the next loop

    MPI_Wait(&reqs[0], &status);
    MPI_Wait(&reqs[1], &status);

    result_offset += my_send_size;
    // don't forget to set it to zero when we wrap around
    if (result_offset >= a_rows) {
      result_offset = 0;
    }

    
    //printf("mat_c[0] = %g\n", this_c.at(0,0));
    //printf("my_c[0]: %g, proc: %d\n", c[0], my_rank);
    
    //printf("matrix pointer: %p, input pointer: %p\n", this_c.data(), c);

  } // main computation loop

  // printf("%d: c:", my_rank);
  // c_mat.dump();

  int sendcnt = b_cols * a_rows / num_procs;
  MPI_Gather(c + my_c_start * b_cols, sendcnt, MPI_DOUBLE, c, sendcnt, MPI_DOUBLE, 0, MPI_COMM_WORLD);

  // do I need to return everything to its original position

} // distributed version

int get_row_count(int total_rows, int my_rank, int num_procs) {
  return (total_rows / num_procs + ((total_rows % num_procs) > my_rank));
}



