/*test if simdization will give performance, by chenli */
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "slave.h"
#include "assert.h"

#ifdef SIMD
#include "simd.h"
#endif

__thread_local volatile unsigned long get_reply,put_reply;
__thread_local double A_lhs[BLKY][BLKZ];//you cannot use __attribute__((align(32))) which is  for main memory alignment
__thread_local double A_rhs[3][BLKY+2][BLKZ+2];

extern double A[2][SIZEX][SIZEY][SIZEZ];
extern int thread_num, iter, N, ntiley,ntilez;
extern int startk[64], startj[64];
extern int reuse_num;

#define ELE sizeof(double) //bytes
#define SIMD_WIDTH 256 //simd len, bits
//for simd ,even if both BLKY and BLKZ even numbers, BLKY*BLKZ and (BLKy+2)*(BLKZ+2) multiple of 4, but i*4p+j*(BLKZ+2)+k %4 is not constant, so making simd much complcated
//so we let BLKZ+2==multiple of 4, simplifing simdization of loops
#define alpha_d 0.0876
#define beta_d  0.0765

void func()
{
   int i,j,k,n,jj,kk,my_id;//N ~ average num tiles that each thread assigned
   int dest,src,my_tiles;
   int cur_blky, cur_blkz;
#ifdef SIMD
   int align=SIMD_WIDTH/8/sizeof(double);
   assert((BLKZ+2)%4==0); //simplify manual SIMDization 
#endif
   my_id = athread_get_id(-1);
   //printf("my_id = %d \n",my_id);
      dest = iter % 2 ;
      src = 1-dest;
      n = 0;
      if(my_id<thread_num-1)
         my_tiles=N;
      else
         my_tiles=ntiley*ntilez-N*(thread_num-1);
      //printf("my_tiles:%d\n",my_tiles);
      //my_nums: 
      for(j = startj[my_id]; n < my_tiles && j < SIZEY-2; j += BLKY)
      for(k = startk[my_id]; n < my_tiles && k < SIZEZ-2 ; k += BLKZ)
      {
         int stride,len,bsize;
//         printf("j:%d,k:%d\n",j,k);
         ++n;
         //average tile size: <SIZEX, BLXY,BLKZ>
         cur_blkz=BLKZ;
         if(k+BLKZ>SIZEZ-2) //last tile at dim Z
            cur_blkz=(SIZEZ-2)%BLKZ;
         cur_blky=BLKY;
         if(j+BLKY>SIZEY-2) //last tile at dim Y
            cur_blky=(SIZEY-2)%BLKY;
  //       printf("cur_blkz,%d,cur_blky%d,\n",cur_blkz,cur_blky);
             //step 1: prelude (start-up)
              get_reply = 0;
              //update low plane 
              stride = (SIZEZ-(cur_blkz+2))*ELE;
              len = (cur_blky+2)*(cur_blkz+2)*ELE;
              bsize = (cur_blkz+2)*ELE;
              athread_get(PE_MODE,&A[src][0][j][k]
                 , &A_rhs[0][0][0]
                 , len, (void*)&get_reply
                 , 0, stride, bsize);
             //update middle plane 
             stride = (SIZEZ-(cur_blkz+2))*ELE;
             len = (cur_blky+2)*(cur_blkz+2)*ELE;
             bsize = (cur_blkz+2)*ELE;
             athread_get(PE_MODE,&A[src][1][j][k]
                 , &A_rhs[1][0][0]
                 , len, (void*)&get_reply
                 , 0, stride, bsize);
            while(get_reply!=2);
       //    printf("slave : %20.20f,%20.20f,%20.20f,%20.20f,%20.20f,%20.20f,%20.20f\n",A_rhs[1][1][1],A_rhs[0][1][1],A_rhs[2][0][0],A_rhs[1][0][1],A_rhs[1][2][1],A_rhs[1][1][0],A_rhs[1][1][2]);
        //step 2: stream in and computation 
         for(i = 1; i < SIZEX-1; i += 1)
         {
      //      reuse_num++;
           get_reply = 0;
           //update down plane 
           stride = (SIZEZ-(cur_blkz+2))*ELE;
           len = (cur_blky+2)*(cur_blkz+2)*ELE;
           bsize = (cur_blkz+2)*ELE;
           athread_get(PE_MODE,&A[src][i+1][j][k]
                 , &A_rhs[(i+1)%3][0][0]
                 , len, (void*)&get_reply
                 , 0, stride, bsize); 
           while(get_reply!=1);
       //step 2.2: computation
       for(jj = 1; jj < cur_blky+1; ++jj) 
#ifdef SIMD
       {
          doublev4 v1,v2,v3,v4; //simd
          int split=(cur_blkz+1)/align*align;
           printf("align=%d,split=%d\n",align, split);
          //return;
          //1/3.preloop
          //we should fully  unroll it 
#ifdef FULLY_UNROLL
         //only apply for double
         assert(align==4);  //only correct for double
             A_lhs[jj-1][0] = alpha_d * (A_rhs[i%3][jj][1]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][1] + A_rhs[(i+1)%3][jj][1] +\
                        A_rhs[i%3][jj-1][1] + A_rhs[i%3][jj+1][1] +\
                        A_rhs[i%3][jj][0] + A_rhs[i%3][jj][2]);
             A_lhs[jj-1][1] = alpha_d * (A_rhs[i%3][jj][2]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][2] + A_rhs[(i+1)%3][jj][2] +\
                        A_rhs[i%3][jj-1][2] + A_rhs[i%3][jj+1][2] +\
                        A_rhs[i%3][jj][1] + A_rhs[i%3][jj][3]);
             A_lhs[jj-1][2] = alpha_d * (A_rhs[i%3][jj][3]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][3] + A_rhs[(i+1)%3][jj][3] +\
                        A_rhs[i%3][jj-1][3] + A_rhs[i%3][jj+1][3] +\
                        A_rhs[i%3][jj][2] + A_rhs[i%3][jj][4]);

#else
          for(kk = 1; kk <= align-1; ++kk)  
             A_lhs[jj-1][kk-1] = alpha_d * (A_rhs[i%3][jj][kk]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][kk] + A_rhs[(i+1)%3][jj][kk] +\
                        A_rhs[i%3][jj-1][kk] + A_rhs[i%3][jj+1][kk] +\
                        A_rhs[i%3][jj][kk-1] + A_rhs[i%3][jj][kk+1]);
#endif  //fully unroll
          //2/3.main part of the loop kk
          for(kk = align; kk < split; kk+=align) { 
             //aligned load A_rhs[(i-1)%3][jj][kk], for double, offset are (i-1)*4x+jj*4y+kk, multiple of 256b
             simd_load(v1,&A_rhs[(i-1)%3][jj][kk]);
             //aligned load A_rhs[(i+1)%3][jj][kk], 256b align for  double
             simd_load(v2,&A_rhs[(i+1)%3][jj][kk]);
             //aligned load A_rhs[i%3][jj-1][kk], 256b align for double
             simd_load(v3,&A_rhs[i%3][jj-1][kk]);
             v4=v1+v2+v3;
             //aligned load A_rhs[i%3][jj+1][kk], 256b align for double 
             simd_load(v2,&A_rhs[i%3][jj+1][kk]);
             v1=v2+v4;
             //two un-aligned vector, if simd_set may use more time, further shuffle/vextf/vinsf not apply either
             //using loadu,  A_rhs[i%3][jj][kk-1/+1]
             simd_loadu(v2,&A_rhs[i%3][jj][kk-1]);
             simd_loadu(v3,&A_rhs[i%3][jj][kk+1]);
             //ERROR, the next line will cause error!  WHY???
             v4=v1+v2+v3;

             v1=(doublev4) beta_d;
             //aligned load A_rhs[i%3][jj][kk], 256b align for  double
             simd_load(v2,&A_rhs[i%3][jj][kk]);
             v3=v2+v1*v4;
             v2=(doublev4) alpha_d;
             v1=v2*v3;
             simd_storeu(v1,&A_lhs[jj-1][kk-1]);//maybe unligned store 
          }//main part of kk loop
          //3/3: remainder loop
          for(kk = split; kk < cur_blkz+1; ++kk)  
             A_lhs[jj-1][kk-1] = alpha_d * (A_rhs[i%3][jj][kk]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][kk] + A_rhs[(i+1)%3][jj][kk] +\
                        A_rhs[i%3][jj-1][kk] + A_rhs[i%3][jj+1][kk] +\
                        A_rhs[i%3][jj][kk-1] + A_rhs[i%3][jj][kk+1]);

      }//simd version
#else
          for(kk = 1; kk < cur_blkz+1; ++kk)  
             A_lhs[jj-1][kk-1] = alpha_d * (A_rhs[i%3][jj][kk]) + \
                        beta_d * (A_rhs[(i-1)%3][jj][kk] + A_rhs[(i+1)%3][jj][kk] +\
                        A_rhs[i%3][jj-1][kk] + A_rhs[i%3][jj+1][kk] +\
                        A_rhs[i%3][jj][kk-1] + A_rhs[i%3][jj][kk+1]);
#endif
    //   printf("1,1 %20.20f\n",A_lhs[0][0]);
       //step 3: write back
       put_reply=0;
       stride = (SIZEZ-cur_blkz)*ELE;
       bsize = cur_blkz*ELE;
       len = bsize*cur_blky;
       athread_put(PE_MODE,&A_lhs[0][0]
                     , &A[dest][i][j+1][k+1]
                     ,len,(void*)&put_reply,stride,bsize);
       while(put_reply!=1);
       }//of i loop, pipeline loop
    }//of k loop, partitioned loop
}
