#ifndef _CUDA_CONV2_FULL_CU_
#define _CUDA_CONV2_FULL_CU_

#include <ctime>
#include "cuda_matrix.cuh"
#include "cuda_store_method.cuh"
#include "../base/cuda_base_utils.cuh"
#include "../base/cuda_map.cuh"

/* convolution valid data small version , A is the best kernel */
namespace cuda_matrix{
    namespace __conv2_full{
        // load matrix into shared memory, dim_x = dim_y
        // pad exceeding dimsions with 0
        template<int x_size, int y_size, int dim_xy, int s_stride,int check_bound>
        __device__ void __load_mat_shared_pad( float *m_shared, const Mat m_global, 
                                                int x_start ,int y_start ){
            for( int y = 0 ;y < y_size / dim_xy; y ++ ){
                for( int x = 0 ; x < x_size / dim_xy ; x ++ ){
                    int yy =  y * dim_xy + threadIdx.y; // consant in warp
                    int xx =  x * dim_xy + threadIdx.x; // stride = 1
                    int cx =  x_start + xx;
                    int cy =  y_start + yy;
                    if( !check_bound || 
                        (     cx < m_global.x_max 
                           && cy < m_global.y_max 
                           && cx >= 0 
                           && cy >= 0   )){
                        // share  write stride = 1
                        // global read  stride = 1 if start is aligned by 16
                        m_shared[ yy*s_stride + xx ] = m_global[ cy ][ cx ];
                    }else{
                        m_shared[ yy*s_stride + xx ] = 0.0f; 
                    }
                }
            }  
        }

        // reverse load , the dimesion should be dim_x = dim_y, used to reverse the filter 
        template<int x_size, int y_size, int dim_xy, int s_stride,int check_bound>
        __device__ void __load_mat_shared_reverse( float *m_shared, const Mat g_filter ){
            for( int y = 0 ;y < y_size / dim_xy; y ++ ){
                for( int x = 0 ; x < x_size / dim_xy ; x ++ ){

                    int yy =  y * dim_xy + threadIdx.y; // consant in warp
                    int xx =  x * dim_xy + threadIdx.x; // stride = 1
                    
                    if( ! check_bound ||
                        (     xx < g_filter.x_max 
                           && yy < g_filter.y_max )){
                        // stride = 1 aligned
                        m_shared[( g_filter.y_max- yy - 1 )*s_stride +  g_filter.x_max - xx - 1 ] = g_filter[ yy ][ xx ];
                    }
                }
            }
        }  
    };
    
    /* constraint: dim_x = dim_y */    
    template<int f_x_size, int f_y_size, int dim_xy>
    __device__ void __conv2_full_procedure( float &sum,
                                            int block_x, int block_y,
                                            float s_ft[f_x_size][f_y_size] ,
                                            float s_mm[f_y_size+dim_xy][f_x_size+dim_xy] ,
                                            int ans_x_max, int ans_y_max,
                                            const Mat mat,
                                            const Mat filter ){
        // load filter into shared memory
        __conv2_full::__load_mat_shared_reverse< f_x_size, f_y_size, dim_xy, f_y_size , 1  >
            ( s_ft[0] , filter );
 
        // load matrix into shared memory
        const int y_start = block_y*dim_xy - filter.y_max + 1;
        const int x_start = block_x*dim_xy - filter.x_max + 1;
        __conv2_full::__load_mat_shared_pad< f_x_size+dim_xy, f_y_size+dim_xy, dim_xy, f_y_size+dim_xy , 1>
                ( s_mm[0] , mat, x_start , y_start );
        
        __syncthreads();

        const int y_idx = dim_xy*block_y + threadIdx.y;
        const int x_idx = dim_xy*block_x + threadIdx.x;

        if( x_idx < ans_x_max && y_idx < ans_y_max ){
            for( int dy = 0 ; dy < filter.y_max ; dy ++ ){
                for( int dx = 0 ; dx < filter.x_max ; dx ++ ){
                    // s_ft[dy,dx] get by broadcast
                    // s_mm get by stride = 1
                    sum += s_mm[ threadIdx.y+dy ][ threadIdx.x + dx ] * s_ft[ dy ][ dx ];
                }
            }
        }
    }
                                               
    /* do full convoluion  ans = mat*(~filter)*/
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_xy>
    __global__ void __conv2_full_kernel_A( Mat ans,
                                           const Mat mat,
                                           const Mat filter  ){
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_xy][f_x_size+dim_xy];
        
        float sum = 0.0f;
        // convolution procedure
        __conv2_full_procedure<f_x_size,f_y_size,dim_xy>
            ( sum, 
              blockIdx.x, blockIdx.y,
              s_ft, s_mm, ans.x_max, ans.y_max , mat, filter ); 
        
        // do map function
        cuda_map::__map<map_m>( sum );  
        
        // write back is aligned 
        const int y_idx = dim_xy*blockIdx.y + threadIdx.y;
        const int x_idx = dim_xy*blockIdx.x + threadIdx.x;
        if( x_idx < ans.x_max && y_idx < ans.y_max ) { 
            store_method::__store<sm>( ans[ y_idx ][ x_idx ] , sum );    
        }
    }

    template<int map_m,int sm>
    inline void conv2_full_A( Mat ans,
                              const Mat mat,
                              const Mat filter ){
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_full_kernel_A <map_m,sm,16,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
            
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_full_kernel_A <map_m,sm,32,32,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }
    
    /*-----------------group convoltion based on the single implementation----------------------*/
    /* constraint: dim_x = dim_y */    
    template<int f_x_size, int f_y_size, int dim_xy>
    __device__ void __conv2_full_procedure( float &sum,
                                            int block_x, int block_y, int v,
                                            float s_ft[f_y_size][f_x_size] ,
                                            float s_mm[f_y_size+dim_xy][f_x_size+dim_xy] ,
                                            Mat3D ans,
                                            const Mat3D mat,
                                            const Mat4D filter ){
        
        // by random experiment, this way is more accurate
        // however, i don't understand why currently ^_^
        for( int h = 0 ; h < filter.h_max ; h ++ ){
            //  float ss = 0.0f;
            __conv2_full_procedure<f_x_size,f_y_size,dim_xy>
                ( sum , 
                  block_x, block_y,
                  s_ft , s_mm , ans.x_max, ans.y_max, mat[h] , filter[v][h] ); 
            __syncthreads();
            //sum += ss;
        }
    }

    template<int map_m,int sm,int f_x_size, int f_y_size, int dim_xy>
    __device__ void __conv2_full_procedure_A( float s_ft[f_y_size][f_x_size] ,
                                              float s_mm[f_y_size+dim_xy][f_x_size+dim_xy] ,
                                              Mat3D ans,
                                              const Mat3D mat,
                                              const Mat4D filter ){
        for( int v = 0 ; v < filter.v_max ; v ++ ){
            float sum = 0.0f;
            __conv2_full_procedure<f_x_size,f_y_size,dim_xy>
                ( sum, 
                  blockIdx.x, blockIdx.y, v,    
                  s_ft , s_mm , ans, mat, filter );        
            
            // map the result
            cuda_map::__map<map_m>( sum );  

            // write back the result 
            const int y_idx = dim_xy*blockIdx.y + threadIdx.y;
            const int x_idx = dim_xy*blockIdx.x + threadIdx.x;
            if( x_idx < ans.x_max && y_idx < ans.y_max ) { 
                Mat aa = ans[ v ];
                store_method::__store<sm>( aa[ y_idx ][ x_idx ] , sum );    
            }
        }
    }

    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_xy>
    __global__ void __conv2_full_kernel_A( Mat3D ans,
                                           const Mat3D mat,
                                           const Mat4D filter  ){
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_xy][f_x_size+dim_xy];
        
        __conv2_full_procedure_A<map_m,sm,f_x_size,f_y_size,dim_xy>
            ( s_ft , s_mm , ans, mat, filter );        
    }
    
    template<int map_m,int sm>
    inline void conv2_full_A( Mat3D ans,
                              const Mat3D mat,
                              const Mat4D filter ){
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_full_kernel_A <map_m,sm,16,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
            
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( (ans.x_max+15) >> 4 , (ans.y_max+15) >> 4 );
            __conv2_full_kernel_A <map_m,sm,32,32,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }

    /* computing using 3D grid */
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_xy>
    __global__ void __conv2_full_kernel_3DGrid( int grid_width,
                                                    Mat3D ans,
                                                    const Mat3D mat,
                                                    const Mat4D filter  ){
        int block_z = blockIdx.y;
        int block_y = blockIdx.x / grid_width;
        int block_x = blockIdx.x % grid_width;
        
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_xy][f_x_size+dim_xy];
        
        float sum = 0.0f;
        __conv2_full_procedure<f_x_size,f_y_size,dim_xy>
            ( sum,
              block_x, block_y, block_z,
              s_ft , s_mm , ans, mat, filter );        
        
        // map the result
        cuda_map::__map<map_m>( sum );  


        const int y_idx = dim_xy*block_y + threadIdx.y;
        const int x_idx = dim_xy*block_x + threadIdx.x;
        if( x_idx < ans.x_max && y_idx < ans.y_max ){ 
            store_method::__store<sm>( ans[ block_z ][ y_idx ][ x_idx ] , sum );    
        }
    }
    /* using 3DGrid to compute answer, this is faster than method A */
    template<int map_m,int sm>
    inline void conv2_full_3DGrid( Mat3D ans,
                                   const Mat3D mat,
                                   const Mat4D filter ){
        if( filter.x_max <= 16 && filter.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            // pack 3D grid into 2D
            int  grid_width = (ans.x_max+15) >> 4;
            int  grid_height= (ans.y_max+15) >> 4 ;
            dim3 dimGrid ( grid_width*grid_height , filter.v_max );

            __conv2_full_kernel_3DGrid <map_m,sm,16,16,16> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
            
        }
        else
        if( filter.x_max <= 32 && filter.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            // pack 3D grid into 2D
            int  grid_width = (ans.x_max+15) >> 4;
            int  grid_height= (ans.y_max+15) >> 4 ;
            dim3 dimGrid ( grid_width*grid_height , filter.v_max );
           
            __conv2_full_kernel_3DGrid <map_m,sm,32,32,16> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }

    /* convolution with bias */
    template<int map_m,int sm, int f_x_size, int f_y_size, int dim_xy>
    __global__ void __conv2_full_kernel_3DGrid( int grid_width,
                                                Mat3D ans,
                                                const Mat3D mat,
                                                const Mat4DBias filter  ){
        int block_z = blockIdx.y;
        int block_y = blockIdx.x / grid_width;
        int block_x = blockIdx.x % grid_width;
        
        __shared__ float bias;
        __shared__ float s_ft[f_y_size][f_x_size];
        __shared__ float s_mm[f_y_size+dim_xy][f_x_size+dim_xy];
        //load the bias
        if( threadIdx.x == (dim_xy-1) && threadIdx.y == (dim_xy-1) ){
            // we use last thread because last thread seems more likely to be idle
            // no need to sync because sync will occur in latter procedure
            bias = filter.v_bias[ block_z ];
        }
        
        float sum = 0.0f;
        __conv2_full_procedure<f_x_size,f_y_size,dim_xy>
            ( sum,
              block_x, block_y, block_z,
              s_ft , s_mm , ans, mat, filter.mat );        

        sum += bias;

        const int  y_idx = dim_xy*block_y + threadIdx.y;
        const int  x_idx = dim_xy*block_x + threadIdx.x;
        const bool is_valid =  x_idx < ans.x_max && y_idx < ans.y_max;
        
        // map the result
        cuda_map::__map<map_m>( sum, is_valid );  
        
        if( is_valid ){ 
            store_method::__store<sm>( ans[ block_z ][ y_idx ][ x_idx ] , sum );    
        }
    }

    template<int map_m,int sm>
    inline void conv2_full_3DGrid( Mat3D ans,
                                   const Mat3D mat,
                                   const Mat4DBias filter ){
        if( filter.mat.x_max <= 16 && filter.mat.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            // pack 3D grid into 2D
            int  grid_width = (ans.x_max+15) >> 4;
            int  grid_height= (ans.y_max+15) >> 4 ;
            dim3 dimGrid ( grid_width*grid_height , filter.mat.v_max );

            __conv2_full_kernel_3DGrid <map_m,sm,16,16,16> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
            
        }
        else
        if( filter.mat.x_max <= 32 && filter.mat.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            // pack 3D grid into 2D
            int  grid_width = (ans.x_max+15) >> 4;
            int  grid_height= (ans.y_max+15) >> 4 ;
            dim3 dimGrid ( grid_width*grid_height , filter.mat.v_max );
           
            __conv2_full_kernel_3DGrid <map_m,sm,32,32,16> <<<dimGrid,dimBlock>>> ( grid_width, ans , mat, filter );
        } else{
            cuda_utils::error("too large filter size");
        }
    }
};

#endif
