#include <omp.h>
#define index_yl(i,k,j,m) (i-1+(k-1)*ni+(j-1)*ni*nk +m*(ni*nk*nj))
#define index_b(i,k,j,m) (i-1+(k-1)*ni+(j-1)*ni*nk+(m-1)*ni*nk*nj)

void svrasr( float*  __restrict__ yl,int m, float* __restrict__ b, int ni, int nk, int nj){
      int ng=ni*nk*nj;
      int i,k,j;   
  #pragma omp parallel for private(j,k,i) schedule(runtime) 
        for(j=1;j<=nj;j++){
          for(i=2;i<=ni;i++){
            yl[index_yl(i,1,j,m)]=yl[index_yl(i,1,j,m)]-b[index_b(i,1,j,2)]*yl[index_yl(i-1,1,j,m)];
          }
          for(k=2;k<=nk;k++){
            for(i=1;i<=ni;i++){
              yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,4)]*yl[index_yl(i,k-1,j,m)];
            }

            for(i=2;i<=ni;i++){
              yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,2)]*yl[index_yl(i-1,k,j,m)];
            }
          }
        }

           yl[index_yl(ni,nk,nj,m)]=yl[index_yl(ni,nk,nj,m)]*b[index_b(ni,nk,nj,1)];
  #pragma omp parallel for private(j,k,i) schedule(runtime) 
         for(j=nj;j>=1;j-=1){
           for(i=ni-1;i>=1;i-=1){
             yl[index_yl(i,nk,j,m)]=(yl[index_yl(i,nk,j,m)]-b[index_b(i,nk,j,3)]*yl[index_yl(i+1,nk,j,m)])*b[index_b(i,nk,j,1)];
           }
           for(k=nk-1;k>=1;k-=1){
             for(i=1;i<=ni;i++){
               yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,5)]*yl[index_yl(i,k+1,j,m)];
             }
             yl[index_yl(ni,k,j,m)]=yl[index_yl(ni,k,j,m)]*b[index_b(ni,k,j,1)];
             for(i=ni-1;i>=1;i-=1){
                yl[index_yl(i,k,j,m)]=(yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,3)]*yl[index_yl(i+1,k,j,m)])*b[index_b(i,k,j,1)];
             }
           }
         }
        return;
      }

void svrasr_opt_c( float*  __restrict__ yl,int m, float* __restrict__  b, int ni, int nk, int nj){
      int ng=ni*nk*nj;
      int i,k,j;  
      float yl_bench,left;
//#pragma omp parallel for schedule(dynamic)
//        #pragma omp parallel for num_threads(2)
        for(j=1;j<=nj;j++){
              left=yl[index_yl(1,1,j,m)];
          for(i=2;i<=ni;i++){
              yl_bench=yl[index_yl(i,1,j,m)];
              yl_bench+= -b[index_b(i,1,j,2)]*left;
              left=yl_bench;
              yl[index_yl(i,1,j,m)]=left;
          }
          for(k=2;k<=nk;k++){
              
              //yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,2)]*yl[index_yl(i-1,k,j,m)];
//            #pragma omp simd
            for(i=1;i<=ni;i++){
              yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,4)]*yl[index_yl(i,k-1,j,m)];
            }

              left=yl[index_yl(1,k,j,m)];
            for(i=2;i<=ni;i++){
              yl_bench=yl[index_yl(i,k,j,m)];
              yl_bench+= -b[index_b(i,k,j,2)]*left;
              left=yl_bench;
              yl[index_yl(i,k,j,m)]=left;
              //yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,2)]*yl[index_yl(i-1,k,j,m)];
            }
          }
        }

           yl[index_yl(ni,nk,nj,m)]=yl[index_yl(ni,nk,nj,m)]*b[index_b(ni,nk,nj,1)];
//        #pragma omp parallel for num_threads(2)
         for(j=nj;j>=1;j-=1){
              left=yl[index_yl(ni,nk,j,m)];
           for(i=ni-1;i>=1;i-=1){
              yl_bench=yl[index_yl(i,nk,j,m)];
              yl_bench=(yl_bench -b[index_b(i,nk,j,3)]*left)*b[index_b(i,nk,j,1)];
              left=yl_bench;
              yl[index_yl(i,nk,j,m)]=left;
             //yl[index_yl(i,nk,j,m)]=(yl[index_yl(i,nk,j,m)]-b[index_b(i,nk,j,3)]*yl[index_yl(i+1,nk,j,m)])*b[index_b(i,nk,j,1)];
           }
           for(k=nk-1;k>=1;k-=1){
//            #pragma omp simd
             for(i=1;i<=ni;i++){
               yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,5)]*yl[index_yl(i,k+1,j,m)];
             }
              yl[index_yl(ni,k,j,m)]=yl[index_yl(ni,k,j,m)]*b[index_b(ni,k,j,1)];

              left=yl[index_yl(ni,k,j,m)];
             for(i=ni-1;i>=1;i-=1){
              yl_bench=yl[index_yl(i,k,j,m)];
              yl_bench=(yl_bench -b[index_b(i,k,j,3)]*left)*b[index_b(i,k,j,1)];
              left=yl_bench;
              yl[index_yl(i,k,j,m)]=left;
               // yl[index_yl(i,k,j,m)]=(yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,3)]*yl[index_yl(i+1,k,j,m)])*b[index_b(i,k,j,1)];
             }
           }
         }
        return;
      }

/*
void svrasr_fix( float* __restrict__ yl,int m, float* __restrict__ b, int ni, int nk, int nj){
      int ng=ni*nk*nj;
     int i,j,k,ii,in,kn,kk; 
    
    for(int rr=0;rr<=1;rr++){
        for(j=1;j<=nj;j++){
          for(kk=1;kk<=nk;kk++){
            k=kk;kn=kk+1;
            if(rr==1) k=nk+1-k;
            if(rr==1) kn=nk+1-kn;
          
            for(ii=2;ii<=ni;ii++){
                i=ii;in=ii-1;
                if(rr==1)i=ni+1-ii;
                if(rr==1)in=ni+1-in;
                yl[index_yl(i,k,j,m)]=yl[index_yl(i,k,j,m)]-b[index_b(i,k,j,2+rr)]*yl[index_yl(in,k,j,m)];
                if(rr==1) yl[index_yl(i,k,j,m)] *=b[index_b(i,k,j,1)];
            }
            if(kk<nk){
                for(i=1;i<=ni;i++){
                    yl[index_yl(i,kn,j,m)]=yl[index_yl(i,kn,j,m)]-b[index_b(i,kn,j,4+rr)]*yl[index_yl(i,k,j,m)];
                }
                if(rr==1) yl[index_yl(ni,kn,j,m)]*=b[index_b(ni,kn,j,1)];
            }
          }
        }
        if(rr==0){
            yl[index_yl(ni,nk,nj,m)]=yl[index_yl(ni,nk,nj,m)]*b[index_b(ni,nk,nj,1)];
        }
    }
      }
#ifdef CUDA

//#define left_mask 0xFFFFFFFE
#define full_mask 0xFFFFFFFF


__global__ void cu_svrasr( float* __restrict__ yl,int m, float* __restrict__ b, int ni, int nk, int nj){
//#include "defindex.hpp"
    
    int ii= blockDim.x * blockIdx.x + threadIdx.x+1;
    int j= blockDim.y * blockIdx.y + threadIdx.y +1;
    //int warpid=(ii-1) / 32;
    int ng=ni*nk*nj;
    int i=ii,k,offset=1,l,ll,ii1=ii,kn; 
    float yl_bench,yl_bench_new,yl_left,yl_right;
        yl_bench=0;
        int warplen=32;
        int warplen1=31;
        int cnt=(ni-2) / warplen1 ;
        int ni_fix=(cnt+1)* (warplen1) +1;
    if(j>=1&&j<=nj){
    for(int rr=0;rr<=1;rr++){
        for(int kk=1;kk<=nk;kk++){ 
            k=kk;kn=kk+1;
            if(rr==1){
                k=nk+1-k;kn=nk+1-kn;
            }
            for(int ll=0;ll<=cnt;ll++){
                i=ii1+warplen1*ll;
                yl_bench=0;
                if(rr==1) i=ni+1-i;
                __syncthreads();
                if( i>=1&&i<=ni) {
                    yl_bench=yl[index_yl(i,k,j,m)];
                }
                float bl1,bl2,bl1n;
                yl_bench_new=0;yl_left=0;
                int l_fix=0;
                if(ll==cnt) l_fix=(ni_fix-ni);
                if(i>=1&&i<=ni){
                    bl2=b[index_b(i,k,j,2+rr)];
                    bl1=b[index_b(i,k,j,1)];
                }
                for(l=1;l<=(warplen-1)-l_fix;l++){
                    yl_left=__shfl_up(yl_bench, offset,32);
                    //yl_left=__shfl_up_sync(full_mask, yl_bench, offset,32);
                    if(i>=1&&i<=ni){
                    if(threadIdx.x==l){
                        yl_bench_new=-yl_left*bl2;
                        yl_bench+=yl_bench_new;
                        if(rr==1) yl_bench *=bl1;
                    }}
                } 
                if(i>=1&&i<=ni) yl[index_yl(i,k,j,m)]=yl_bench;
                __syncthreads();
            }
            if(kk<nk){
                yl[index_yl(i,kn,j,m)]=yl[index_yl(i,kn,j,m)]-yl[index_yl(i,k,j,m)]*b[index_b(i,kn,j,4+rr)];
                if(rr==1&&i==ni) yl[index_yl(i,kn,j,m)] *=b[index_b(i,kn,j,1)];
                
            }
        }//k
        __syncthreads();
        if(rr==0){
            if(j==nj&&i==1){
            yl[index_yl(ni,nk,nj,m)]=yl[index_yl(ni,nk,nj,m)]*b[index_b(ni,nk,nj,1)];
            }
        }
        __syncthreads();
    }//r
    }//j
  //  }
    }
*/
