/* 
 *  Utilities for Cannon's algorithm
 */
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <mpi.h>
#include "mkl_cblas.h"
#include "cannon.h"
#include "ptools_ppf_cpp.h"

using namespace std;


extern COMM_INFO_T  grid;
extern void Cannon(int n, LOCAL_MATRIX_T* local_A,
                   LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C, int nocomm);
int Verify( LOCAL_MATRIX_T*  local_A, int N);



/*
 * Generates a Hilbert matrix H(i,j) = 1/(i+j-1)
 * It's easy to check if our results are correct:
 * H * H (i,j)  = Sum(k) { 1.0/(i+k-1)*(k+j-1) }
 *
 */


void GenMatrix( LOCAL_MATRIX_T*  local_A  /* out */)
{
    int i, j;

        for (i = 0; i < Order(local_A); i++){
    	    int globalI = grid.my_row*Order(local_A) + i + 1;
                for (j = 0; j < Order(local_A); j++){
    	            int globalJ = grid.my_col*Order(local_A) + j + 1;
                    Entry(local_A,i,j) = 1.0 / (double) (globalI+globalJ-1);
		 
    	        }
        }

}  /* GenMatrix */



/*********************************************************/

//
// Print out the global matrix, gather data in process 0,
// which prints out the matrix

void  PrintMatrix(char* title, LOCAL_MATRIX_T* A)
{
    int i, j;

    MPI_Comm  row_comm;  /* Communicator for my row      */
    MPI_Comm  col_comm;  /* Communicator for my col      */
    int nsq = Order(A)*Order(A);
    int nwds_r = grid.q*nsq;
    double* gA_r = new double[nwds_r];
    assert(gA_r);

    // Gather the rows into the root of the row (rank 0)
    int ierr1 = MPI_Gather ( A->entries, nsq, grid.local_matrix_mpi_t, 
			   gA_r, nsq,  grid.local_matrix_mpi_t, 
                           0, grid.row_comm );
    assert(ierr1 == MPI_SUCCESS);
    int nwds = nwds_r * grid.q;
    double* gA = new double[nwds];
    assert(gA);
    // Gather all the data from each root
    int ierr2 = MPI_Gather ( gA_r, nwds_r, grid.local_matrix_mpi_t,
			     gA, nwds_r,  grid.local_matrix_mpi_t, 
                 	     0, grid.col_comm );
    assert(ierr2 == MPI_SUCCESS);


//  Only the "root of roots" outputs the array
    MPI_Barrier(MPI_COMM_WORLD);
    if (!grid.my_rank){
        cout << title  << endl;
        int N = Order(A) * grid.q;
        double *pA = gA;
        if (grid.p == 1){
            for (i = 0; i < N; i++){
                for (j = 0; j < N; j++){
                      printf("%11.4e ",*pA++);
	        }
                printf("\n");
	    }
	    cout << endl;
        }
//
//  We have to skip accross the blocks in order to capture the row structure correctly
//

   else {
       int n_bar = Order(A);
       for (int r=0; r < grid.q;  r++){
	   double *pq = gA + r*grid.q*n_bar*n_bar;
           for (int rr=0; rr < n_bar;  rr++){
	       double *pr = pq + rr*n_bar;
               for (int c=0; c < grid.q;  c++){
 		   double *ps = pr + c*n_bar*n_bar;
                   for (j = 0; j < n_bar; j++){
                       printf("%11.4e ",ps[j]);
	           }
	        }
               printf("\n");
	   }
  
	}
    }
   }
    cout << flush;
    MPI_Barrier(MPI_COMM_WORLD);
}

/*********************************************************
 *  Multiplies two local matrices
 */
void Local_MM(
         LOCAL_MATRIX_T*  local_A  /* in  */,
         LOCAL_MATRIX_T*  local_B  /* in  */, 
         LOCAL_MATRIX_T*  local_C  /* out */) {

     /*
      * Users of the CBLAS interface: be aware that the CBLAS are just a C
      * interface to the BLAS, which is based on the FORTRAN standard and
      * subject to the FORTRAN standard restrictions. In particular, the output
      * parameters should not be referenced through more than one argument
      * http://software.intel.com/sites/products/documentation/hpc/compilerpro/en-us/cpp/lin/mkl/refman/appendices/mkl_appD_Intro.html
      */

     /* Set up the call for dgemm, to perform local matrix  multiplication
      *
      * You should not change the definitions for
      *         Alpha, and Beta 
      *         LDA, LDB, LDC
      *         transA, transB
      *
      * You need not change M, N, and K, unless your local sub-matrices are
      * not square
      */
     const int Alpha = 1.0;
     const int Beta  = 1.0;
     const int M = Order(local_A), N=Order(local_A), K=Order(local_B);
     const int LDA = N, LDB = K, LDC = K;
     CBLAS_TRANSPOSE transA = CblasNoTrans;
     CBLAS_TRANSPOSE transB = CblasNoTrans;
     /* Don't change this call */
     double *A = local_A->entries;
     double *B = local_B->entries;
     double *C = local_C->entries;
     cblas_dgemm( CblasRowMajor, transA, transB, M, N, K,
                  Alpha, A, LDA,
                  B, LDB, 
                  Beta, C, LDC);

}  /* Local_MM */


/*********************************************************/
void Setup_Comm( )
{
    int world_rank;

    grid.comm = MPI_COMM_WORLD;
    grid.local_matrix_mpi_t = MPI_DOUBLE; /* The type of each element */

    /* Set up Global Grid Information */
    MPI_Comm_size(grid.comm, &(grid.p));
    MPI_Comm_rank(grid.comm, &world_rank);

    /* We assume p is a perfect square */
    grid.q = (int) sqrt((double) grid.p);

    int rank_key = world_rank;
    MPI_Comm_rank(grid.comm, &(grid.my_rank));

    grid.my_row = grid.my_rank % grid.q;
    grid.my_col = grid.my_rank / grid.q;

    MPI_Comm_split(grid.comm, grid.my_row, rank_key, &(grid.row_comm));
    MPI_Comm_split(grid.comm, grid.my_col, rank_key, &(grid.col_comm));
    MPI_Comm_rank(grid.row_comm, &(grid.my_rankR));
    MPI_Comm_rank(grid.col_comm, &(grid.my_rankC));


} /* Setup_Comm */

double RunCannon(int niter, int n, LOCAL_MATRIX_T*  local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C, int nocomm){

    double time;
    int nit;
    // We but  a barrier here to make sure that everyone is ready
    // and we don't time the stragglers
    MPI_Barrier(MPI_COMM_WORLD);
    if (niter == -1){
        time = -MPI_Wtime();    // star timer
        Cannon(n, local_A, local_B, local_C,nocomm);
    }
    else
        for (nit=0;nit<=niter;nit++) {
            if (nit==1) {
                time = -MPI_Wtime();
            }

            Cannon(n, local_A, local_B, local_C,nocomm);
        }
    // We put a barrier here to measure the last process to finish
    MPI_Barrier(MPI_COMM_WORLD); // stop timer
    time += MPI_Wtime();
    time = time/(double)niter;
    return time;
}

double verifyTime = 0.0;

// If we are checking the answer, we need to recompute
// the product if we have iterated several times
int  DoVerification(LOCAL_MATRIX_T*  local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C, int n, int niter, MPI_Comm comm){
    if (grid.my_rank == 0){
        cout << endl << "Matrix Multiply  Completed ... " << endl;
        cout << endl << "Performing verification ... " << endl << endl;;
    }
    // Verify solution
        GenMatrix(local_A);
        GenMatrix(local_B);
        // Zero out local_C
        for (int i = 0; i < Order(local_C); i++)
            for (int j = 0; j < Order(local_C); j++)
                    Entry(local_C,i,j) = 0.0;
// 	Run Cannon's algorithm for one iteration
//	Don't shut off communication
//      We avoid the pre-warmup run to avoid a bug in the 
//      multi-iteration scheme
            RunCannon(-1,n,local_A, local_B, local_C,0);

//    PrintMatrix("Global Matrix C",local_C);
	    MPI_Barrier(MPI_COMM_WORLD);
	    verifyTime = -MPI_Wtime();
    int local_OK = Verify( local_C, n);
    MPI_Barrier(MPI_COMM_WORLD);
    verifyTime += MPI_Wtime();
 
//    PPF_Print( MPI_COMM_WORLD,"local OK: %d\n",local_OK);

    // Use a distinguish value to catch errors
    int OK = -99;
    const int root = 0;

    // Everyone gets a copy the reduced result

    MPI_Allreduce ( &local_OK, &OK, 1, MPI_INT, MPI_PROD,  MPI_COMM_WORLD);
    if (!grid.my_rank){
        if (OK)
            cout << "Result verified to be correct" << endl;
        else
            cout << "*** Result incorrect ***\n";
        cout << endl;
        }
    return(OK);
}

void ReportStats(int niter,int n, double time, int check, int OK, int nocomm){
    if (!grid.my_rank){
        double gflops = 2.0*n; gflops *= n; gflops *= n; 
	gflops /= 1.0e9;
        gflops /= time;
        cout << "T_{" << grid.p << "} = " << time << " sec [" << gflops << " Gflops]" << endl << endl;
	if (check){
            printf( "       p     n   iter     time  vTime  GFLOPS  GFlops/core   Correct?\n");
            printf( "># %6d  %4d  %3d  %9.4f  %6.1f  %7.1f %7.3f  ", grid.p, n, niter, time, verifyTime, gflops, gflops/grid.p);
            if (OK)
                printf("\tY \n");
            else
                printf("\tN \n");
	}
	else{
	    printf( "       p     n   iter     time    GFLOPS  GFlops/core  Commun.\n");
	    printf( "># %6d  %4d  %3d  %9.4f  %6.1f  %7.3f", grid.p, n, niter, time, gflops, gflops/grid.p);
	    if (nocomm)
	        printf("\tN \n");
	    else
	        printf("\tY \n");
	}
	printf("\n");
    }
}

void Stop(){
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();
    exit(-1);
}
