
#include <stdlib.h>
#include <math.h>

#include <iostream>
#include <fstream>
using namespace std;

// MPI includes
#include <mpi.h>

#include "ComputeKernel.h"
#include "DataContainer.h"
#include "DataPacket.h"
#include "DebugLog.h"

#define MAX_BLKS_PER_PROC 16

static void safe_exit(MPI_Comm comm) {
  MPI_Abort(comm, -1);
  MPI_Finalize();
}

static int FindSourceCol(float srcX, float xLowerLeftCorner, \
    float cellSize) {
  int srcCol;
  srcCol = ceil(((srcX-xLowerLeftCorner)/cellSize))-1; 
  return srcCol;
}

static int FindSourceRow(float srcY, float yLowerLeftCorner, \
    float cellSize, int nRows) {
  int srcRow;
  srcRow = ceil(((float)nRows-((srcY-yLowerLeftCorner)/cellSize)))-1;
  return srcRow;
}

static float FindSourceSlope(float east, float west, 
    float north, float south, \
    float cellSize) {
  float slopeEW;
  slopeEW = fabs(east-west)/cellSize;

  float slopeNS;
  slopeNS = fabs(north-south)/cellSize;

  return min(slopeEW, slopeNS);
}

int main(int argc, char *argv[]) {
  int num_procs, rank, name_len, buf_len, root=0;
  char process_name[MPI_MAX_PROCESSOR_NAME];
  char log_name[32], log_buf[4096];
  MPI_File fh;
  MPI_Status status;
  MPI_Comm comm = MPI_COMM_WORLD;

  // Initializations  
  MPI_Init(&argc, &argv);
  if (!DataTypes::init()) {
    safe_exit(comm);
  }
  
  bool log_op = false;
  float mul_u = 1.f, mul_v = 1.f;
  unsigned int num_iter = 1;

  // Parse command line parameters
  for (int i=0; i<argc; i++) {
    if (strcmp(argv[i], "-log")==0) {
      log_op = true;
      break;
    }
    else if (strcmp(argv[i], "-east")==0) {
      mul_u = 1.f;
      mul_v = 0.f;  
      break;
    }
    else if (strcmp(argv[i], "-west")==0) {
      mul_u = -1.f;
      mul_v =  0.f;  
      break;
    }
    else if (strcmp(argv[i], "-north")==0) {
      mul_u = 0.f;
      mul_v = 1.f;  
      break;
    }
    else if (strcmp(argv[i], "-south")==0) {
      mul_u =  0.f;
      mul_v = -1.f;  
      break;
    }
    else if (strcmp(argv[i], "-num_iter")==0) {
      num_iter = atoi(argv[i]);
      break;
    }
  }
  
  comm = MPI_COMM_WORLD;
  
  MPI_Comm_size(comm, &num_procs);
  MPI_Comm_rank(comm, &rank);
  
  // MPI_Get_processor_name(process_name, &name_len);
  
  sprintf(log_name, "debug.log");
  DebugLog debug_log(comm, log_name);
  
  // Defined for the entire system
  vector < vector <blk_meta_data> > send_meta_data_vec;
  vector <blk_meta_data> recv_meta_data_vec(MAX_BLKS_PER_PROC);
  vector <int> send_meta_data_cnt;
  blk_meta_data meta_data_obj;
  int recv_meta_data_cnt = 0, recv_blk_data_size = 0;
  int blk_size_x = 0, blk_size_y = 0;
  int dim_x = 1024, dim_y = 1024;
  int lo[2], hi[2], buffer_size[2];
  int num_blks_x, num_blks_y; 
  
  // Create an empty data container
  DataContainer dataContainer;
  vector < vector <INT_PAIR>  > const &offset_map =
    dataContainer.getOffsetMap();
  
  // This should probably be parsed from the command line
#if 0
  // Override the actual values if need be
  blk_size_x = 256;
  blk_size_y = 256;

  dim_x = 1024;
  dim_y = 1024;
#endif

  // printf("Test-1\n");

  // Instantiate it for the root process
  dataContainer.instantiate(dim_x, dim_y, num_procs);
  
  vector< vector<int> > proc_map = dataContainer.getProcMap();

  num_blks_x = dataContainer.getNumBlocksX();
  num_blks_y = dataContainer.getNumBlocksY();
  
  // printf("Test-2\n");

  if (rank==root) {
    // Resize to the number of processes
    send_meta_data_vec.resize(num_procs); 
    send_meta_data_cnt.resize(num_procs, 0); 
    
    for (int i=0; i<num_blks_y; i++) {
      for (int j=0; j<num_blks_x; j++) {
	// The coresponding process assigned to this block
	int dest = proc_map[i][j];	
	send_meta_data_cnt[dest] += 1; // Increment the count by 1
      }
    }

    for (int i=0; i<num_blks_y; i++) {
      for (int j=0; j<num_blks_x; j++) {
	// The corresponding process and offset in the dataset
	int dest = proc_map[i][j];	
	INT_PAIR offset = offset_map[i][j];

	blk_size_x = dataContainer.getBlockSizeX(i, j);
	blk_size_y = dataContainer.getBlockSizeY(i, j);

	meta_data_obj.offset[0] = lo[0] = offset[0]; // j, x
	meta_data_obj.offset[1] = lo[1] = offset[1]; // i, y

	hi[0] = ((lo[0]+blk_size_x)>dim_x)?dim_x:lo[0]+blk_size_x; 
	hi[1] = ((lo[1]+blk_size_y)>dim_y)?dim_y:lo[1]+blk_size_y;

	meta_data_obj.extent[0] = buffer_size[0] = hi[0]-lo[0];
	meta_data_obj.extent[1] = buffer_size[1] = hi[1]-lo[1];

	meta_data_obj.id[0] = j;
        meta_data_obj.id[1] = i;	

        send_meta_data_vec[dest].push_back(meta_data_obj); 
      }
    }
 
    for (int i=0; i<num_procs; i++) {
      if (i!=root) {
	// printf("send_meta_data_cnt[%d]: %d\n", i, send_meta_data_cnt[i]);
	MPI_Send((void *)&(send_meta_data_vec[i][0]), send_meta_data_cnt[i],
	    DataTypes::MPI_BLK_META_DATA, i, 123,
	    comm);
      } 
      else {
	// Copy the data to the recv_buf
	// memcpy(recv_buf, &(send_buf[0][0]), send_cnt[i]*sizeof(int));
	recv_meta_data_vec = send_meta_data_vec[i];
	recv_meta_data_cnt = send_meta_data_cnt[i];
      }	
    }
  } // End of (rank==root)
  else {
    MPI_Recv((void *)&(recv_meta_data_vec[0]), MAX_BLKS_PER_PROC,
	DataTypes::MPI_BLK_META_DATA, root, 123, comm,
	&status);

    MPI_Get_count(&status, DataTypes::MPI_BLK_META_DATA, &recv_meta_data_cnt);
    // printf("recv_meta_data_cnt: %d\n", recv_meta_data_cnt);
  }

  MPI_Barrier(comm);
  
  // printf("Test-3\n");
  // printf("recv_meta_data_cnt: %d\n", recv_meta_data_cnt);

  /**
  Use the same log file, as a shared resource, to write to by all processes.
  */

  // Number of blocks received by this process
  int blk_buf_size = 0;
  CONTAINER_TYPE **blk_data_buf = new CONTAINER_TYPE* [recv_meta_data_cnt]; 
  
  char tmp_buf[1024];
  memset(tmp_buf, NULL, 1024);
  // char *buf_ptr = log_buf;
  
  /**
  TODO-1: Instead of creating a 2D array, create a large 1D array which can then
  be used by MPI_Recv. Also create a 2D array of pointers which point to
  locations in the large 1D array, and use that to access blocks assigned to a
  proc. This can be abstracted into a class too.

  TODO-2: Change variable names to better identify the block/ proc variables.
  */

  for (int i=0 ; i<recv_meta_data_cnt; i++) {
    meta_data_obj = recv_meta_data_vec[i];
    if (log_op) {
      sprintf(tmp_buf, "proc %.4d: (%d, %d)\n", rank,
	  meta_data_obj.extent[0], meta_data_obj.extent[1]);

      // buf_len = strlen(tmp_buf);
      // memcpy(buf_ptr, tmp_buf, buf_len);
      // buf_ptr += buf_len; 
      // memset(tmp_buf, NULL, 1024);

      debug_log << tmp_buf;
    }

    blk_buf_size += meta_data_obj.extent[0]*\
		    meta_data_obj.extent[1];
  }
  
  // printf("Test-4\n");
  // printf("blk_buf_size: %d\n", blk_buf_size); 

  CONTAINER_TYPE *blk_data_ptr = new CONTAINER_TYPE [blk_buf_size];

  blk_buf_size = 0;
  for (int i=0; i<recv_meta_data_cnt; i++) {
    meta_data_obj = recv_meta_data_vec[i];

    blk_data_buf[i] = blk_data_ptr+blk_buf_size;
    blk_buf_size += meta_data_obj.extent[0]*\
		    meta_data_obj.extent[1];
  }
  
  // printf("Test-5\n");
    
  /**
  TODO: Now distribute the dataset among all the nodes, using the mapping from
  the proc_map.
  */
 
#if 1 
  if (rank==root) {
    // Only the root process makes an allocation
    dataContainer.allocate();

    CONTAINER_TYPE const** data = dataContainer.getData();

    // Create a 1D data block from the 2D data representation
    CONTAINER_TYPE **proc_data = new CONTAINER_TYPE* [num_procs];
    vector <int> proc_buf_size(num_procs);
    vector <int> displs(num_procs);

    for (int i=0; i<num_procs; i++) {
      int tmp_buf_size = 0;
      for (int j=0; j<send_meta_data_vec[i].size(); j++) {
        meta_data_obj = send_meta_data_vec[i][j];
	buffer_size[0] = meta_data_obj.extent[0];
	buffer_size[1] = meta_data_obj.extent[1];

	tmp_buf_size += buffer_size[0]*buffer_size[1];
      }

      proc_data[i] = new CONTAINER_TYPE[tmp_buf_size];
      proc_buf_size[i] = tmp_buf_size;
      displs[i] = 0;
    }

    // Copy from 'data' -> 'proc_data'
    for (int i=0; i<num_blks_y; i++) {
      for (int j=0; j<num_blks_x; j++) {
	int dest = proc_map[i][j];	
	INT_PAIR offset = offset_map[i][j];

	lo[0] = offset[0]; // j, x
	lo[1] = offset[1]; // i, y
	
	blk_size_x = dataContainer.getBlockSizeX(i, j);
	blk_size_y = dataContainer.getBlockSizeY(i, j);

	hi[0] = ((lo[0]+blk_size_x)>dim_x)?dim_x:lo[0]+blk_size_x; 
	hi[1] = ((lo[1]+blk_size_y)>dim_y)?dim_y:lo[1]+blk_size_y;
	
	buffer_size[0] = hi[0]-lo[0];
	buffer_size[1] = hi[1]-lo[1];
	
	for (int k=lo[1]; k<hi[1]; k++) {
          CONTAINER_TYPE *dest_buf = proc_data[dest]+displs[dest];
	  memcpy(dest_buf, &(data[k][lo[0]]),
	      buffer_size[0]*sizeof(CONTAINER_TYPE));
	  displs[dest] += buffer_size[0];
	}
      }
    }

    // Send the data 
    for (int i=0; i<num_procs; i++) {
      if (i!=root) { 
        MPI_Send(proc_data[i], proc_buf_size[i], MPI_FLOAT, i, 123, comm);
      }
      else { // Copy the data into the recv buffer
	memcpy(blk_data_ptr, proc_data[root],
	    proc_buf_size[root]*sizeof(CONTAINER_TYPE));

	recv_blk_data_size = proc_buf_size[root];
      }

      delete [] proc_data[i];
    }

    delete [] proc_data;
  }
  else {
    MPI_Recv(blk_data_ptr, blk_buf_size, MPI_FLOAT, root, 123, comm, &status);
    
    MPI_Get_count(&status, MPI_FLOAT, &recv_blk_data_size);
  }
  
  MPI_Barrier(comm);
    
  if (recv_blk_data_size!=blk_buf_size) {
    sprintf(tmp_buf, "Rank %d: error in receiving data\n", rank);
    debug_log << tmp_buf;
  }
  else {
    sprintf(tmp_buf, "Rank %d: success in receiving data\n", rank);
    debug_log << tmp_buf; 
  }
#endif
  
  //  printf("Test-6\n");

  // Create DataPacket associated with this process
  meta_data_obj = recv_meta_data_vec[0];
 
  // Packet specific meta data
  int p_offset[2], p_dim[2], p_id[2];

  p_offset[0] = meta_data_obj.offset[0];
  p_offset[1] = meta_data_obj.offset[1];

  p_dim[0] = meta_data_obj.extent[0]; // width
  p_dim[1] = meta_data_obj.extent[1]; // height

  p_id[0] = meta_data_obj.id[0]; 
  p_id[1] = meta_data_obj.id[1];

  // printf("p_id[0]: %d\n", p_id[0]); 
  // printf("p_id[1]: %d\n", p_id[1]); 

  // Neighboring process map; the current process goes to the middle of the
  // 2-D array.
  vector< vector<int> >  n_procs(3, vector<int>(3, -1));
  for (int i=0; i<3; i++) {
    for (int j=0; j<3; j++) {
      int _i = p_id[1]+i-1;
      int _j = p_id[0]+j-1;

      if (_i>=0&&_i<proc_map.size()) { 
	if (_j>=0&&_j<proc_map[0].size()) {
          n_procs[i][j] = proc_map[_i][_j];
	}
      }
    }
  }

  // TODO: Create/ initialize the hydyograph values
  float hyg_values[2][100];

  INT_PAIR ghost_cells(1, 1);
  DataPacket dem_values(p_dim[0], p_dim[1], ghost_cells);
  if (!dem_values.init(blk_data_buf[0])) {
    printf("DataPacket initialization failed!!!\n");
    safe_exit(comm);
  }

  // TODO: All these variables should be placed into a global structure
  float x_ll = 0.f, y_ll = 0.f, cell_size = 1.f;
  float h_extra = 0.001f;
  float sim_time = 0.f, dt = 0.1f;
  float man_n = 0.013f;
  float g = 9.81f;
  float dx = cell_size, dy = cell_size;
  float courant_no = 0.1f;
  float src_discharge = 0.f;
  float src_slope = 0.f;
  float src_x_loc = 0.f, src_y_loc = 0.f;
  float h_src = 0, u_src = 0, v_src = 0;

  // The process which updates the source values
  int src_update_proc = -1;

  // These row and col values are wrt to the whole dataset
  int src_col = FindSourceCol(src_x_loc, x_ll, cell_size);
  int src_row = FindSourceRow(src_y_loc, y_ll, cell_size, dim_y);

  if ((src_row>=p_offset[1]&&src_row<p_offset[1]+p_dim[1])&&
      (src_col>=p_offset[0]&&src_col<p_offset[0]+p_dim[0])) {
    printf("Calculating source slope on process: %d\n", rank);
    src_update_proc = rank;

    // Adjust for row and col values relative to this block
    src_col -= p_offset[0];
    src_row -= p_offset[1];

    // Account for the ghost cells
    src_col += ghost_cells[0];
    src_row += ghost_cells[1];

    float src_e = dem_values.access_data(src_row, src_col+1);
    float src_w = dem_values.access_data(src_row, src_col-1);
    float src_n = dem_values.access_data(src_row-1, src_col);
    float src_s = dem_values.access_data(src_row+1, src_col);

    float src_slope = FindSourceSlope(src_e, src_w, src_n, src_s,
	cell_size);

    if (src_slope==0.f) {
      printf("Source slope is 0, set to %0.4f\n", h_extra);
      src_slope = h_extra;
    }

    float tmp_h_src_term =
      (hyg_values[0][0]*man_n)/(cell_size*sqrt(src_slope));

    float h_src_term = pow(tmp_h_src_term, 0.6f);

    if (h_src_term==0.f)
      v_src = src_discharge/(cell_size*(h_src_term+h_extra));
    else
      v_src = src_discharge/(cell_size*h_src_term);
  }
   
  MPI_Barrier(comm); // Not sure if needed

  DataPacket h_old(p_dim[0], p_dim[1], ghost_cells);
  DataPacket h_new(p_dim[0], p_dim[1], ghost_cells);
  
  DataPacket u_old(p_dim[0], p_dim[1], ghost_cells);
  DataPacket u_new(p_dim[0], p_dim[1], ghost_cells);

  DataPacket v_old(p_dim[0], p_dim[1], ghost_cells);
  DataPacket v_new(p_dim[0], p_dim[1], ghost_cells);

  h_old.init(h_extra);
  h_new.init(0.f);
  
  u_old.init(0.f);
  u_new.init(0.f);

  v_old.init(0.f);
  v_new.init(0.f);

  // The neighborhood info for this process
  dem_values.setProcMap(n_procs);

  h_old.setProcMap(n_procs);
  h_new.setProcMap(n_procs);
  
  u_old.setProcMap(n_procs);
  u_new.setProcMap(n_procs);
  
  v_old.setProcMap(n_procs);
  v_new.setProcMap(n_procs);

  // Array of pointers to ping-point between data packets
  DataPacket *h_buffer[2] = {&h_old, &h_new};
  DataPacket *u_buffer[2] = {&h_old, &h_new};
  DataPacket *v_buffer[2] = {&h_old, &h_new};

  /**
    Note: This step ensures ghost cell values are from neighboring cells and
    not copied from internal cells.
   */

  dem_values.interchange(comm, rank);
 
  // Update h, u, and v values on the source cell process
  if (rank==src_update_proc) {
    h_buffer[0]->access_data(src_row, src_col) = h_src;
    u_buffer[0]->access_data(src_row, src_col) = u_src;
    v_buffer[0]->access_data(src_row, src_col) = v_src;
  }
  
  MPI_Barrier(comm); // Not sure if needed
  
  // Data exchange round
  for (int i=0; i<num_iter; i++) {
    DataPacket &h_i = *h_buffer[(i+0)%2];
    DataPacket &h_o = *h_buffer[(i+1)%2];
    
    DataPacket &u_i = *u_buffer[(i+0)%2];
    DataPacket &u_o = *u_buffer[(i+1)%2];
    
    DataPacket &v_i = *v_buffer[(i+0)%2];
    DataPacket &v_o = *v_buffer[(i+1)%2];

    ComputeKernel::flood_equation_cpu(h_i, h_o, u_i, u_o, v_i, v_o, dem_values,
	dx, dy, dt); 
    
    if (!dem_values.interchange(comm, rank)) {
      if (log_op) {
	sprintf(tmp_buf, "Rank %d: Data interchange failed!!!\n", rank);
	debug_log << tmp_buf;
      }

      safe_exit(comm);
    }
    else {
      if (log_op) {
	sprintf(tmp_buf, "Rank %d: Data interchange succeded!!!\n", rank);
	debug_log << tmp_buf;
      }
    }

    MPI_Barrier(comm);
  }
 
  // Release allocated memory - keep this around for now, we may need the buffer
  // to send the data back to the root process. 
  delete [] blk_data_ptr; 
  delete [] blk_data_buf;

  debug_log.close();
  
  MPI_Finalize();

  return 0;
}
