/* cannon.c -- uses Cannon's algorithm to multiply two square matrices
 *
 * Input:
 *     n: global order of matrices
 *     A,B: the factor matrices
 * Output:
 *     C: the product matrix
 *
 * Notes:  
 *     1.  Assumes the number of processes is a perfect square
 *     2.  The array member of the matrices is statically allocated
 *     3.  Assumes the global order of the matrices is evenly
 *         divisible by sqrt(p).
 *
 * Based on code for Fox's algorithm, 
 * See Chap 7, pp. 113 & ff and pp. 125 & ff in Peter Pacheco's
 * "Parallel Progrmaming with MPI," Morgan Kaufmann, 1997
 */
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <mpi.h>
#include "cannon.h"
#include "ptools_ppf_cpp.h"
#define Order(A) ((A)->n_bar)
#define Size(A) ((A)->n_bar*(A)->n_bar)

using namespace std;


COMM_INFO_T  grid;
double *tmpBufA,  *tmpBufB;

/* Function Declarations */
LOCAL_MATRIX_T*  LocalMatAlloc(int n_bar);
void             Free_local_matrix(LOCAL_MATRIX_T** local_A);
void             Local_MM(LOCAL_MATRIX_T* local_A,
                          LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void   		 GenMatrix(LOCAL_MATRIX_T* local_B);
double 		 RunCannon(int niter, int n, LOCAL_MATRIX_T*  local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C, int nocomm);
int 		 DoVerification(LOCAL_MATRIX_T*  local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C, int n, int niter, MPI_Comm comm);
void    	 ReportStats(int niter,int n, double time, int check, int OK, int nocomm);

extern void Setup_Comm();
extern void getHost(int myid, int nodes);
extern void Stop();
/*********************************************************/
main(int argc, char* argv[]) {
    int              p;
    int              my_rank;
    int              n;
    int              n_bar;


    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&my_rank);

    Setup_Comm();

    int niter, check, nocomm;

    cmdLine(argc, argv, n, niter, check, nocomm);
    if (nocomm && check){
        if (!my_rank){
           cerr << "You've asked to check the error but have shut off communication." << endl;
           cerr << "These options are mutually exclusive. Try again." << endl;
           cerr << endl;
        }
        Stop();
     }

    if (!my_rank)
        cout << "The order of the matrices is:  " << n << endl;
    MPI_Barrier( MPI_COMM_WORLD );
// Report the nodes used for each communicator
    getHost(my_rank, grid.p);



    MPI_Barrier( MPI_COMM_WORLD );
    n_bar = n/grid.q;

    LOCAL_MATRIX_T*  local_A = LocalMatAlloc(n_bar);
    GenMatrix(local_A);
// Print matrix gather all the data to the root and is not scalable.
//    PrintMatrix("Global Matrix A",local_A);

    LOCAL_MATRIX_T*  local_B = LocalMatAlloc(n_bar);
    GenMatrix(local_B);
//    PrintMatrix("Global Matrix B",local_B);
    LOCAL_MATRIX_T*  local_C = LocalMatAlloc(n_bar);

    tmpBufA = new double[Size(local_A)];
    assert(tmpBufA);
    tmpBufB = new double[Size(local_B)];
    assert(tmpBufB);


//  Call the utility to run Cannon's algorithm "niter" times
//  Returns the total running time
//  Your matrix multiplication routine, cannon()
//  will be invoked via a callback
    MPI_Barrier(MPI_COMM_WORLD);
    double time = RunCannon(niter,n,local_A, local_B, local_C, nocomm);

//    PrintMatrix("Result Matrix C",local_C);

// Verify the result
    int OK;
    if (check)
	OK = DoVerification(local_A, local_B,local_C,n,niter,grid.comm);

    ReportStats(niter,n,time, check, OK, nocomm);

    Free_local_matrix(&local_A);
    Free_local_matrix(&local_B);
    Free_local_matrix(&local_C);
    delete [] tmpBufA;
    delete [] tmpBufB;
    MPI_Finalize();
}  /* main */


/*********************************************************/
void Cannon(
        int              n         /* in  */, 
        LOCAL_MATRIX_T*  local_A   /* in  */,
        LOCAL_MATRIX_T*  local_B   /* in  */,
        LOCAL_MATRIX_T*  local_C   /* out */,
	int nocomm)
{

    int              stage;
    int              bcast_root;
    int              n_bar;  /* n/sqrt(p)               */
    int              source;
    int              dest;
    MPI_Status       status;
    MPI_Request      reqA, reqB;

    n_bar = n/grid.q;



    const int SKEW_ROW = 5,
    	      SKEW_COL = 6;
    /* Pre-skewing */
 

// Use the parallel print facilty to generate conveniently formatted output
//    PPF_Print( MPI_COMM_WORLD,"Entering Cannon\n");
//    PPF_Print( MPI_COMM_WORLD,"nocomm: %d\n",nocomm);
//    PPF_Print( MPI_COMM_WORLD,"my_row: %d, my_col: %d\n",grid.my_row,grid.my_col);

    // Don't pre-skew if you wanted to turn off communication
    if (!nocomm){
        // Pre-skew A
        // --> Fill in your code here

//        PrintMatrix("A after pre-skew ",local_A);

        // Pre-skew B
        // --> Fill in your code here
//        PrintMatrix("B after pre-skew",local_B);
    }
	
    /* Calculate rank of next process for circular shift of A */  
    // --> Fill in your code here

    /* Calculate rank of next process for circular shift of B */  
    // --> Fill in your code here

    if (!nocomm)
	MPI_Barrier(MPI_COMM_WORLD);

    /* The multiplication stages */
    const int SHFT_ROW = 0,
    	      SHFT_COL = 1;

// zero out local_c
    for (int i = 0; i < Order(local_C); i++)
        for (int j = 0; j < Order(local_C); j++)
            Entry(local_C,i,j) = 0.0;

    for (stage = 0; stage < grid.q-1; stage++) {
        MPI_Status       s1, s2;
        MPI_Request      reqAs,reqBs;
     
        // Compute the local matrix product
        Local_MM(local_A, local_B, local_C);

// Shift A and B
// --> Fill in your code here
	if (!nocomm){
	 }
    }
    // One last MM
    Local_MM(local_A, local_B, local_C);
    
} /* Cannon */


/*********************************************************/
LOCAL_MATRIX_T* LocalMatAlloc(int local_order) {
    LOCAL_MATRIX_T* temp;
  
    assert(temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)));
    assert(temp->entries = (double *) malloc((local_order*local_order)*sizeof(double)));
    temp->n_bar = local_order;
    return temp;
}  /* LocalMatAlloc */


/*********************************************************/
void Free_local_matrix(
         LOCAL_MATRIX_T** local_A_ptr  /* in/out */) {
    free(*local_A_ptr);
}  /* Free_local_matrix */



int Verify( LOCAL_MATRIX_T*  local_C  /* out */, int N)
{
    int i, j,k;
    int OK = 1;
    char Out[250];
    const double 	EPS = 1.0e-7;		// Comparison tolerance

    // Maximum number of error messages that we'll print out
    const int MaxErrs = 10;
    int countErrs = 0;

    int I = grid.my_row*Order(local_C),
        J = grid.my_col*Order(local_C);
    for (i = 0; i < Order(local_C); i++){
	int ii = I + i;
        for (j = 0; j < Order(local_C); j++){
	    int jj = J + j;
            double C_exact = 0;
	    for  (k =1; k <=N; k++){
		C_exact +=  1.0/(double)((ii+1+k-1)*(k+jj+1-1));
	    }
	    if (fabs(C_exact-Entry(local_C,i,j)) > EPS ){
	       OK = 0;
               countErrs++;
               if (countErrs < MaxErrs){
                  sprintf(Out,"Entry [%d,%d] (Proc. %d) is: %f, should be: %f\n", ii,jj,grid.my_rank, Entry(local_C,i,j), C_exact);
	          cerr << Out << flush ;
	       }
               else if (countErrs == MaxErrs){
                   cerr << endl;
                   cerr << " *** Only the first " << MaxErrs << " entries in error have been reported.\n     Increase MaxErrs if you want to see more" << endl << endl;
	           cerr << Out << flush ;
               }

	    }
	}
    }

    MPI_Barrier(MPI_COMM_WORLD);
    int nsq = Order(local_C)*Order(local_C);
    if ((!grid.my_rank) && (countErrs > 0)){
         cerr  << "A total of " << countErrs << " of " << nsq << " entries (" << 
             100.0*(countErrs/nsq) << "%) were in error" << endl << endl;
         cerr << flush ;
    }

    MPI_Barrier(MPI_COMM_WORLD);
//    PPF_Print( MPI_COMM_WORLD,"local OK: %d\n",OK);
    return OK;

}  /* VerifyMatrix */

/*********************************************************/
