//-------------------------------------------------------------------------//
//                                                                         //
//  This benchmark is an OpenMP C version of the NPB LU code. This OpenMP  //
//  C version is developed by the Center for Manycore Programming at Seoul //
//  National University and derived from the OpenMP Fortran versions in    //
//  "NPB3.3-OMP" developed by NAS.                                         //
//                                                                         //
//  Permission to use, copy, distribute and modify this software for any   //
//  purpose with or without fee is hereby granted. This software is        //
//  provided "as is" without express or implied warranty.                  //
//                                                                         //
//  Information on NPB 3.3, including the technical report, the original   //
//  specifications, source code, results and information on how to submit  //
//  new results, is available at:                                          //
//                                                                         //
//           http://www.nas.nasa.gov/Software/NPB/                         //
//                                                                         //
//  Send comments or suggestions for this OpenMP C version to              //
//  cmp@aces.snu.ac.kr                                                     //
//                                                                         //
//          Center for Manycore Programming                                //
//          School of Computer Science and Engineering                     //
//          Seoul National University                                      //
//          Seoul 151-744, Korea                                           //
//                                                                         //
//          E-mail:  cmp@aces.snu.ac.kr                                    //
//                                                                         //
//-------------------------------------------------------------------------//

//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo,    //
//          and Jaejin Lee                                                 //
//-------------------------------------------------------------------------//

#include <math.h>
#include "applu.incl"


#include <mpi.h>

//---------------------------------------------------------------------
// to compute the l2-norm of vector v.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// To improve cache performance, second two dimensions padded by 1 
// for even number sizes only.  Only needed in v.
//---------------------------------------------------------------------

void l2norm (int ldx, int ldy, int ldz, int nx0, int ny0, int nz0,
     int ist, int iend, int jst, int jend,
     double v[][ldy/2*2+1][ldx/2*2+1][5], double sum[5])
{


MPI_Status status;
   int my_id = 0, id_num = 0,upper_bound = 0, lower_bound = 0, pros_num = 0, average_span = 0, root_process=0;
   int ierr,upper_bound_two = 0, lower_bound_two = 0, average_span_two = 0;
   int upper_bound_three = 0, lower_bound_three = 0, average_span_three = 0;

   MPI_Comm comm;

   root_process = 0;

   ierr = MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
   ierr = MPI_Comm_size(MPI_COMM_WORLD, &pros_num);

   average_span = ((nz0-1)-1)/pros_num;

        if (my_id == root_process){

        lower_bound = 1;
        upper_bound = average_span;


           if ((my_id+1)  == pros_num){
                if(upper_bound < (nz0-2) ){
                        upper_bound = (nz0-2);
                   }
                }

         }
        else{
        lower_bound = (average_span *  my_id)+1 ;
        upper_bound = average_span * (my_id+1);

          if ((my_id+1) == pros_num){
                if(upper_bound < (nz0-2) ){
                        upper_bound = (nz0-2);
                   }
                 }
             }



  //---------------------------------------------------------------------
  // local variables
  //---------------------------------------------------------------------
  double sum_local[5];
  int i, j, k, m;

  for (m = 0; m < 5; m++) {
    sum[m] = 0.0;
  }

//  #pragma omp parallel default(shared) private(i,j,k,m,sum_local)
  {
  for (m = 0; m < 5; m++) {
    sum_local[m] = 0.0;
  }
//TODO: mabey combine the sum and work share for k if the sum gather work
//int scount = 5;
//int rcount = scount;
//ierr=MPI_Allgather( &sum_local[0], scount, MPI_DOUBLE, &sum_local[0], rcount, MPI_DOUBLE, MPI_COMM_WORLD);
//int MPI_Barrier( MPI_Comm comm );
//  #pragma omp for nowait

//  for (k = 1; k <= nz0-2; k++) {
//printf("old is %u to %u and new is from %u to %u and span is..:%u and >>> %u\n",1,(nz0-2),lower_bound,upper_bound,average_span,my_id );
for(k = lower_bound; k <= upper_bound; k++){
    for (j = jst; j < jend; j++) {
      for (i = ist; i < iend; i++) {
        for (m = 0; m < 5; m++) {
          sum_local[m] = sum_local[m] + v[k][j][i][m] * v[k][j][i][m];
        }
      }
    }
  }

ierr=MPI_Allreduce( sum_local,sum, 5, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
//MPI_Bcast( &frc1, 1, MPI_DOUBLE, root_process, MPI_COMM_WORLD);



//double scount = 5;
//double rcount = scount;
//ierr=MPI_Allgather( &sum[0], scount, MPI_DOUBLE, &sum[0], rcount, MPI_DOUBLE, MPI_COMM_WORLD);
//int MPI_Barrier( MPI_Comm comm );
/*
//ierr = MPI_Allreduce(&sum, &sum,5,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD);
*/
/*
if(my_id == root_process){
  for (m = 0; m < 5; m++) {
   // #pragma omp atomic
    sum[m] += sum_local[m];
  }
  }
*/
//int MPI_Barrier( MPI_Comm comm );

//*/
  //end parallel
//int scount_two = 5;
//int rcount_two = scount_two;
//ierr=MPI_Allgather( &sum[0], scount_two, MPI_DOUBLE, &sum[0], rcount_two, MPI_DOUBLE, MPI_COMM_WORLD);

//MPI_Bcast( sum, 5, MPI_DOUBLE, root_process, MPI_COMM_WORLD);
//int MPI_Barrier( MPI_Comm comm );
}
  for (m = 0; m < 5; m++) {
    sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
  }

//double scount_three = 5;
//double rcount_three = scount_three;
//ierr=MPI_Allgather( &sum[0], scount_three, MPI_DOUBLE, &sum[0], rcount_three, MPI_DOUBLE, MPI_COMM_WORLD);
//int MPI_Barrier( MPI_Comm comm );


}

