#ifndef _CUDA_CONV2_VALID_R_CU_
#define _CUDA_CONV2_VALID_R_CU_

#include "cuda_matrix.cuh"
#include "cuda_store_method.cuh"
#include "../base/cuda_base_utils.cuh"
#include "../base/cuda_map.cuh"

/* convolution valid data small version , A is the best kernel */
namespace cuda_matrix{
    namespace __conv2_r_valid{
    /* load matrix into shared memory */
        template<int x_size, int y_size, int dim_x, int dim_y, int s_stride,int check_bound>
        __device__ void __load_mat_shared( float *m_shared, const Mat m_global, 
                                           int x_start ,int y_start ){
            for( int y = 0 ;y < y_size / dim_y; y ++ ){
                for( int x = 0 ; x < x_size / dim_x ; x ++ ){
                    int yy =  y * dim_y + threadIdx.y; // consant in warp
                    int xx =  x * dim_x + threadIdx.x; // stride = 1
                    
                    if( !check_bound || 
                        (  ( x_start + xx ) < m_global.x_max 
                           && ( y_start + yy ) < m_global.y_max )){
                        // share  write stride = 1
                        // global read  stride = 1 if start is aligned by 16
                        m_shared[ yy*s_stride +  xx ] = m_global[ y_start+yy ][ x_start+xx ];
                    }
                }
            }  
        }
    };
    /* 
       note: block_x and block_y are virtual block id,
             they may not equal to blockIdx.x and blockIdx.y
     */
    template<int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __device__ void __conv2_r_valid_procedure( float &sum,
                                               int block_x  , int block_y,                                    
                                               float s_ft[f_y_size][f_x_size] ,
                                               float s_mm[f_y_size+dim_x][f_x_size+dim_y] ,
                                               int ans_x_max, int ans_y_max,
                                               const Mat mat,
                                               const Mat filter ){
        // load filter into shared memory
        __conv2_r_valid::__load_mat_shared< f_x_size, f_y_size, dim_x , dim_y, f_y_size , 1 >
            ( s_ft[0] , filter, 0, 0 ); 
        // load matrix into shared memory
        __conv2_r_valid::__load_mat_shared< f_x_size+dim_x, f_y_size+dim_y, dim_x, dim_y, f_y_size+dim_y,check_bound>
            ( s_mm[0] , mat, block_x*dim_x, block_y*dim_y );
       
        __syncthreads();

        const int y_idx = dim_y*block_y + threadIdx.y;
        const int x_idx = dim_x*block_x + threadIdx.x;

        if( !check_bound || (x_idx < ans_x_max && y_idx < ans_y_max ) ){
            for( int dy = 0 ; dy < filter.y_max ; dy ++ ){
                float ss = 0.0f;
                for( int dx = 0 ; dx < filter.x_max ; dx ++ ){
                    // s_ft[dy,dx] get by broadcast
                    // s_mm get by stride = 1
                    ss += s_mm[ threadIdx.y + dy ][ threadIdx.x + dx ] * s_ft[ dy ][ dx ];
                }
                // for better accuracy
                sum += ss;
            }
        }
    }

    /* do valid convoluion  ans = mat*(~filter) */
    template<int map_m, int sm, int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __global__ void __conv2_r_valid_kernel_A( Mat ans,
                                              const Mat mat,
                                              const Mat filter  ){
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_y][f_x_size+dim_x];

        float sum = 0.0f;
        
        // convolution procedure
        __conv2_r_valid_procedure<f_x_size,f_y_size,dim_x,dim_y,check_bound>
            ( sum, 
              blockIdx.x, blockIdx.y,
              s_ft, s_mm, 
              ans.x_max, ans.y_max , 
              mat, filter );
        
        // write back is aligned 
        const int y_idx = dim_y*blockIdx.y + threadIdx.y;
        const int x_idx = dim_x*blockIdx.x + threadIdx.x;
        const bool is_valid = x_idx < ans.x_max && y_idx < ans.y_max;   
 
        // do mapping operation
        cuda_map::__map<map_m>( sum, is_valid );     
        
        // store back
        if( is_valid ){ 
            store_method::__store<sm>( ans[ y_idx ][ x_idx ] , sum );    
        }
    }
    
    
    template<int map_m,int sm>
    inline void conv2_r_valid_A( Mat ans,
                                 const Mat mat,
                                 const Mat filter ){
        // only 16,16 block is allowed to support maxpooling
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_r_valid_kernel_A <map_m,sm,16,16,16,16,1> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_r_valid_kernel_A <map_m,sm,32,32,16,16,1> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }

    /*----------- Mat3D conv based on previous code ------------*/
    template<int f_x_size, int f_y_size, int dim_x, int dim_y, int check_bound>
    __device__ void __conv2_r_valid_procedure( float &sum,
                                               int   block_x, int block_y, int h,
                                               float s_ft[f_y_size][f_x_size] ,
                                               float s_mm[f_y_size+dim_x][f_x_size+dim_y] ,
                                               int ans_x_max, int ans_y_max,
                                               const Mat3D mat,
                                               const Mat4D filter ){        
        // unzip virtual block index 
        for( int v = 0 ;  v < filter.v_max ; v ++ ){                
            float ss = 0.0f; 
            __conv2_r_valid_procedure<f_x_size,f_y_size,dim_x,dim_y,check_bound>
                ( ss , 
                  block_x   ,  block_y, 
                  s_ft , s_mm, 
                  ans_x_max ,  ans_y_max, 
                  mat[v] , filter[v][ h ] );
            //wait for other threads
            __syncthreads();  
            // use this way to increase floating point accuracy
            sum += ss;
        }
    }
        
    template<int map_m,int sm,int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __device__ void __conv2_r_valid_procedure_A( float s_ft[f_y_size][f_x_size] ,
                                                 float s_mm[f_y_size+dim_x][f_x_size+dim_y] ,
                                                 Mat3D ans,
                                                 const Mat3D mat,
                                                 const Mat4D filter ){
        for( int h = 0 ; h < filter.h_max ; h ++ ){
            float sum = 0.0f;
            __conv2_r_valid_procedure<f_x_size,f_y_size,dim_x,dim_y,check_bound>
                    ( sum,
                      blockIdx.x, blockIdx.y, h ,
                      s_ft, s_mm,
                      ans.x_max, ans.y_max,
                      mat, filter );
            
            const int y_idx = dim_y*blockIdx.y + threadIdx.y;
            const int x_idx = dim_x*blockIdx.x + threadIdx.x;
            const bool is_valid =  x_idx < ans.x_max && y_idx < ans.y_max;
     
            cuda_map::__map<map_m>( sum, is_valid );                 

            if( is_valid ){
                Mat aa = ans[ h ]; 
                store_method::__store<sm>( aa[ y_idx ][ x_idx ] , sum );    
            }
        } 
    }

    /* do valid convoluion  ans = mat*(~filter)*/
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __global__ void __conv2_r_valid_kernel_A( Mat3D ans,
                                              const Mat3D mat,
                                              const Mat4D filter  ){
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_y][f_x_size+dim_x];
        __conv2_r_valid_procedure_A<map_m,sm,f_x_size,f_y_size,dim_x,dim_y,check_bound>
            ( s_ft, s_mm, ans, mat, filter );
    }
    
    template<int map_m,int sm>
    inline void conv2_r_valid_A( Mat3D ans,
                                 const Mat3D mat,
                                 const Mat4D filter ){
        // only 16,16 block is allowed to support maxpooling
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_r_valid_kernel_A <map_m,sm,16,16,16,16,1> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_r_valid_kernel_A <map_m,sm,32,32,16,16,1> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }

    /*---------we use virtual 3D grid to compute the result--------*/
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __global__ void __conv2_r_valid_kernel_3DGrid( int grid_width, 
                                                   Mat3D ans,                                                   
                                                   const Mat3D mat,
                                                   const Mat4D filter ){
        // unzip the block index
        const int block_z = blockIdx.y;
        const int block_x = blockIdx.x % grid_width;
        const int block_y = blockIdx.x / grid_width;

        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_y][f_x_size+dim_x];

        float sum = 0.0f;
        __conv2_r_valid_procedure<f_x_size,f_y_size,dim_x,dim_y,check_bound>
            ( sum,
              block_x, block_y, block_z,
              s_ft, s_mm, ans.x_max, ans.y_max, mat, filter );
        
        // do map first
        cuda_map::__map<map_m>( sum );  
        
        const int y_idx = dim_y*block_y + threadIdx.y;
        const int x_idx = dim_x*block_x + threadIdx.x;
        if( x_idx < ans.x_max && y_idx < ans.y_max ){
            store_method::__store<sm>( ans[ block_z ][ y_idx ][ x_idx ] , sum );    
        }   
    }

    /* 
       use 3D grid to compute group convolution,
       this is faster than method A
    */
    template<int map_m,int sm>
    inline void conv2_r_valid_3DGrid( Mat3D ans,
                                      const Mat3D mat,
                                      const Mat4D filter ){
        // only 16,16 block is allowed to support maxpooling
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            int grid_width  = (ans.x_max+15) >> 4;           
            int grid_height = (ans.y_max+15) >> 4;
            dim3 dimGrid ( grid_width * grid_height ,  filter.h_max , 1 );
            __conv2_r_valid_kernel_3DGrid <map_m,sm,16,16,16,16,1> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            int grid_width  = (ans.x_max+15) >> 4;
            int grid_height = (ans.y_max+15) >> 4;            
            dim3 dimGrid ( grid_width * grid_height ,  filter.h_max , 1 );
            
            __conv2_r_valid_kernel_3DGrid <map_m,sm,32,32,16,16,1> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
            
        } else{
            cuda_utils::error("too large filter size");
        }
    }

    /* compute with biased data */
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_x, int dim_y,int check_bound>
    __global__ void __conv2_r_valid_kernel_3DGrid( int grid_width, 
                                                   Mat3D ans,                                                   
                                                   const Mat3D mat,
                                                   const Mat4DBias filter ){
        // unzip the block index
        const int block_z = blockIdx.y;
        const int block_x = blockIdx.x % grid_width;
        const int block_y = blockIdx.x / grid_width;
        
        __shared__ float bias;
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_y][f_x_size+dim_x];
        
        // load the bias from data 
        if( threadIdx.x == (dim_x -1) && threadIdx.y == (dim_y-1) ){
            // we use last thread to do the job, since
            // last thread may more likely to be idle
            bias = filter.h_bias[ block_z ];
            // we don't sync threads here, note we may sync it in the latter operaton
        }
        
        float sum = 0.0f;
        __conv2_r_valid_procedure<f_x_size,f_y_size,dim_x,dim_y,check_bound>
            ( sum,
              block_x, block_y, block_z,
              s_ft, s_mm, ans.x_max, ans.y_max, mat, filter.mat );

        sum += bias;
        
        const int  y_idx    = dim_y*block_y + threadIdx.y;
        const int  x_idx    = dim_x*block_x + threadIdx.x;
        const bool is_valid =  x_idx < ans.x_max && y_idx < ans.y_max;

        // map the result
        cuda_map::__map<map_m>( sum , is_valid );  
       
        if( is_valid ){
            store_method::__store<sm>( ans[ block_z ][ y_idx ][ x_idx ] , sum );    
        }   
    }

    template<int map_m,int sm>
    inline void conv2_r_valid_3DGrid( Mat3D ans,
                                      const Mat3D mat,
                                      const Mat4DBias filter ){
        // only 16,16 block is allowed to support maxpooling
        if( filter.mat.x_max <= 16 && filter.mat.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            int grid_width  = (ans.x_max+15) >> 4;           
            int grid_height = (ans.y_max+15) >> 4;
            dim3 dimGrid ( grid_width * grid_height ,  filter.mat.h_max , 1 );
            __conv2_r_valid_kernel_3DGrid <map_m,sm,16,16,16,16,1> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
        }
        else
        if( filter.mat.x_max <= 32 && filter.mat.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            int grid_width  = (ans.x_max+15) >> 4;
            int grid_height = (ans.y_max+15) >> 4;            
            dim3 dimGrid ( grid_width * grid_height ,  filter.mat.h_max , 1 );
            
            __conv2_r_valid_kernel_3DGrid <map_m,sm,32,32,16,16,1> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
            
        } else{
            cuda_utils::error("too large filter size");
        }
    }
    
};

#endif
