#ifndef _CUDA_CONV2_R_BIG_FILTER_CU_
#define _CUDA_CONV2_R_BIG_FILTER_CU_

#include "cuda_matrix.cuh"
#include "cuda_store_method.cuh"
#include "../base/cuda_base_utils.cuh"
#include "../base/cuda_map.cuh"
#include "../base/cuda_reduce.cuh"

/* 
   valid convolution with big filter 
   
   target = M * F

   where F is bigger than half of M
*/

/* current method C Grid is the best for grid convolution*/

namespace cuda_matrix{      
    namespace __conv2_r_big_filter{
        template<int x_size, int y_size, int dim_x , int dim_y, int chk_bound_x, int chk_bound_y>
        __device__ void __load_mat_shared_pad( float s_mm[y_size][x_size],
                                               Mat   m_global, 
                                               int   x_start , int y_start ){
            for( int yy = 0 ; yy < y_size ; yy += dim_y )
                for( int xx = 0 ; xx < x_size ; xx += dim_x ){
                    // no bank conflict with x stride = 1
                    const int y_idx = yy + threadIdx.y;
                    const int x_idx = xx + threadIdx.x;
                    
                    if( (!chk_bound_x || x_idx+x_start < m_global.x_max ) &&
                        (!chk_bound_y || y_idx+y_start < m_global.y_max )   ){
                        s_mm[ y_idx ][ x_idx ] = 
                            m_global[ y_start + y_idx ][ x_start + x_idx ];                 
                    }else{
                        s_mm[ y_idx ][ x_idx ] = 0.0f;
                    }
                }
             } 
    };
    /* calculate a block of convolution */
    template<int map_m,int sm,int x_size, int y_size, int dim_x , int dim_y>
    __device__ void __conv2_r_big_filter_block_procedure_C( int x_start , int y_start,
                                                            float s_ft [dim_y][dim_x],
                                                            float s_mat[dim_y+y_size][dim_x+x_size],
                                                            float s_ans[y_size][x_size],
                                                            const Mat ans,
                                                            const Mat mat, 
                                                            const Mat filter ){
        // load in file
        __conv2_r_big_filter::__load_mat_shared_pad<dim_x,dim_y,dim_x,dim_y,1,1>
            ( s_ft, filter, x_start, y_start );
        // load in matrix 
        __conv2_r_big_filter::__load_mat_shared_pad<dim_x+x_size,dim_y+y_size,dim_x,dim_y,1,1>
            ( s_mat, mat  , x_start, y_start );
        
        __syncthreads();
        
        // check bound in calculation
        for( int yy = 0 ; yy < y_size ; yy += dim_y )
            for( int xx = 0 ; xx < x_size ; xx += dim_x ){
                const int y_idx = yy + threadIdx.y;
                const int x_idx = xx + threadIdx.x;
                
                float sum = 0.0f;
                for( int dy = 0 ; dy < dim_y ; dy ++ ){
                    // float ss = 0.0f;
                    for( int dx = 0 ; dx < dim_x ; dx ++ ){
                        /* s_ft get by broadcase, mat has no bank conflit  */
                        sum += s_ft[ dy ][ dx ] *  s_mat[ y_idx + dy ][ x_idx + dx ]; 
                    }
                        //sum += ss;
                }
                store_method::__store<sm>( s_ans[ y_idx ][ x_idx ] , sum );
            }     
    }
    
    template<int map_m,int sm,int x_size, int y_size, int dim_x , int dim_y>
    __device__ void __conv2_r_big_filter_procedure_C( float s_ft [dim_y][dim_x],
                                                      float s_mat[dim_y+y_size][dim_x+x_size],
                                                      float s_ans[y_size][x_size],
                                                      Mat ans,
                                                      const Mat mat, 
                                                      const Mat filter ){
        __conv2_r_big_filter_block_procedure_C<map_m,store_method::SAVE,x_size,y_size,dim_x,dim_y>
            ( 0, 0 , s_ft, s_mat, s_ans , ans, mat, filter );   

        for( int xx = dim_x ; xx < filter.x_max ; xx += dim_x ){
            __syncthreads();
            __conv2_r_big_filter_block_procedure_C<map_m,store_method::ADD,x_size,y_size,dim_x,dim_y>
                ( xx, 0 , s_ft, s_mat, s_ans , ans, mat, filter );   

        }
        for( int yy = dim_y ; yy < filter.y_max ; yy += dim_y )
            for( int xx = 0 ; xx < filter.x_max ; xx += dim_x ){
                __syncthreads();
                __conv2_r_big_filter_block_procedure_C<map_m,store_method::ADD,x_size,y_size,dim_x,dim_y>
                    ( xx, yy , s_ft, s_mat, s_ans , ans, mat, filter );   
            }
        
        __syncthreads();
        // write the data back
        for( int yy = 0 ; yy < y_size ; yy += dim_y )
            for( int xx = 0 ; xx < x_size ; xx += dim_x ){
                const int y_idx = yy + threadIdx.y;
                const int x_idx = xx + threadIdx.x;
                
                float sum = s_ans[ y_idx ][ x_idx ];

                const bool  is_valid = x_idx < ans.x_max && y_idx < ans.y_max; 

                cuda_map::__map<map_m>( sum, is_valid ); // do map to sum 

                if(is_valid){
                    // aligned write    
                    store_method::__store<sm>( ans[ y_idx ][ x_idx ], sum );
                }
            }        
    }

    template<int map_m,int sm,int x_size, int y_size, int dim_x , int dim_y>
    __global__ void __conv2_r_big_filter_kernel_C( Mat ans,
                                                   const Mat mat,
                                                   const Mat filter ){
        __shared__ float s_ft  [ dim_y ][ dim_x ];
        __shared__ float s_mat [ dim_y+y_size ][ dim_x+x_size ];
        __shared__ float s_ans [ y_size ][ x_size ];
        __conv2_r_big_filter_procedure_C<map_m,map_m,sm,x_size,y_size,dim_x,dim_y>
            ( s_ft, s_mat, s_ans, ans, mat, filter );
    }    
    
    template<int map_m,int sm>
    inline void conv2_r_big_filter_C( Mat ans,
                                      const Mat mat,
                                      const Mat filter ){
        if( ans.x_max <= 16 && ans.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( 1, 1, 1  );
            __conv2_r_big_filter_kernel_C <map_m,map_m,sm,16,16,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        }
        else
        if( ans.x_max <= 32 && ans.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( 1, 1, 1);
            __conv2_r_big_filter_kernel_C  <map_m,map_m,sm,32,32,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large answer size");
        }
    }

    /* Grid computing for group convolution based on C */
    template<int map_m,int sm,int x_size, int y_size, int dim_x , int dim_y>
    __global__ void __conv2_r_big_filter_kernel_C_Grid( Mat4D ans,
                                                        const Mat3D mat, 
                                                        const Mat3D filter ){
        __shared__ float s_ft  [ dim_y ][ dim_x ];
        __shared__ float s_mat [ dim_y+y_size ][ dim_x+x_size ];
        __shared__ float s_ans [ y_size ][ x_size ];

        const int block_y = blockIdx.y;
        const int block_x = blockIdx.x;

        __conv2_r_big_filter_procedure_C<map_m,sm,x_size,y_size,dim_x,dim_y>
            ( s_ft, s_mat, s_ans, ans[ block_y ][ block_x ], mat[ block_y ], filter[ block_x ] );
    }

    template<int map_m,int sm>
    inline void conv2_r_big_filter_C_Grid( Mat4D ans,
                                           const Mat3D mat,
                                           const Mat3D filter ){
        if( ans.x_max <= 16 && ans.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );
            dim3 dimGrid ( ans.h_max, ans.v_max, 1  );

            __conv2_r_big_filter_kernel_C_Grid <map_m,sm,16,16,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        }
        else
        if( ans.x_max <= 32 && ans.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );
            dim3 dimGrid ( ans.h_max , ans.v_max , 1 );
            
            __conv2_r_big_filter_kernel_C_Grid <map_m,sm,32,32,16,16> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large answer size");
        }
    }

    /* calculate sum  of all the elements in matrix and store back */
    template<int map_m,int sm, int x_bits , int y_bits>
    __device__ void __matrix_reduce_sum_procedure( float &ans,
                                                   float s_ft [1<<y_bits][1<<x_bits],
                                                   const Mat mat  ){
        s_ft[ threadIdx.y ][ threadIdx.x ] = 0;
        const int dim_x = 1 << x_bits;
        const int dim_y = 1 << y_bits;
        
        for( int yy = 0 ; yy < mat.y_max ; yy += dim_y )
            for( int xx = 0 ; xx < mat.x_max ; xx += dim_x ){
                __syncthreads();                
                const int x_idx = threadIdx.x + xx;
                const int y_idx = threadIdx.y + yy;                
                
                if( x_idx < mat.x_max && y_idx < mat.y_max ){
                    s_ft[ threadIdx.y ][ threadIdx.x ] += mat[ y_idx ][ x_idx ];                 
                }
            }
        __syncthreads();
        
        cuda_reduce::reduce_2D<cuda_reduce::SUM,x_bits,y_bits>( s_ft );
        
        __syncthreads();

        if( threadIdx.x == 0 && threadIdx.y == 0 ){
            float sum = s_ft[0][0];
            // map and store 
            cuda_map::__map<map_m>( sum, true );
            store_method::__store<sm>( ans, sum );
        }
    } 

    /* 
       calculate convolution as well as sum as bias,
       this method is specially designed for CRBM
    */
    template<int map_m,int sm,int x_size, int y_size, int x_bits , int y_bits>
    __global__ void __conv2_r_big_filter_kernel_C_Grid( Mat4DBias ans,
                                                        const Mat3D mat, 
                                                        const Mat3D filter ){
        const int dim_x = 1 << x_bits;
        const int dim_y = 1 << y_bits; 
        __shared__ float s_ft  [ dim_y ][ dim_x ];
        __shared__ float s_mat [ dim_y+y_size ][ dim_x+x_size ];
        __shared__ float s_ans [ y_size ][ x_size ];
        
        if( blockIdx.x >= ans.mat.v_max + ans.mat.h_max ){
            const int idx = blockIdx.x - ans.mat.v_max- ans.mat.h_max;
            const int block_x = idx % ans.mat.h_max;
            const int block_y = idx / ans.mat.h_max;
            // calculate matrix conv2
            __conv2_r_big_filter_procedure_C<map_m,sm,x_size,y_size,dim_x,dim_y>
                ( s_ft, s_mat, s_ans, ans.mat[ block_y ][ block_x ], mat[ block_y ], filter[ block_x ] );
        }else{
            if( blockIdx.x < ans.mat.h_max ){
                // calculate sum of matrix h_bias 
                __matrix_reduce_sum_procedure<map_m,sm,x_bits,y_bits>( ans.h_bias[blockIdx.x], s_ft, filter[blockIdx.x] );
            }else{
                // calculate v_bias
                const int idx = blockIdx.x - ans.mat.h_max;
                __matrix_reduce_sum_procedure<map_m,sm,x_bits,y_bits>( ans.v_bias[ idx ] , s_ft, mat[ idx ] );
            }            
        }
    }

    /* 
       calculate convolution as well as sum as bias,
       this method is specially designed for CRBM
    */
    template<int map_m,int sm>
    inline void conv2_r_big_filter_C_Grid( Mat4DBias ans,
                                           const Mat3D mat,
                                           const Mat3D filter ){

        // we alloc threads to calculate convolution as well as sum of matrix  
        dim3 dimGrid ( ans.mat.h_max*ans.mat.v_max + ans.mat.h_max + ans.mat.v_max , 1  );

        if( ans.mat.x_max <= 16 && ans.mat.y_max <= 16 ){
            dim3 dimBlock( 16,16, 1 );

            __conv2_r_big_filter_kernel_C_Grid <map_m,sm,16,16,4,4> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        }
        else
        if( ans.mat.x_max <= 32 && ans.mat.y_max <= 32 ){
            dim3 dimBlock( 16 ,16, 1 );            
            
            __conv2_r_big_filter_kernel_C_Grid <map_m,sm,32,32,4,4> <<<dimGrid,dimBlock>>> ( ans , mat, filter );
        } else{
            cuda_utils::error("too large answer size");
        }
    }

    /* 1 D kernel, 
       each block take charge of 1 data in the axis
       load the big matrix by line, 
       apply filter and reduce to one sum */
    template<int map_m,int sm, int buf_bits>
    __device__ void __conv2_r_big_filter_procedure_B( int block_x,
                                                      int block_y,
                                                      float buf_line[1<<buf_bits],
                                                      float buf_sum [1<<buf_bits],                                                    
                                                      Mat ans,
                                                      const Mat mat,
                                                      const Mat filter ){
        
        const int buf_size = 1 << buf_bits;

        buf_sum [ threadIdx.x ] = 0.0f;
        // required to fill all the buffers to 0
        if( threadIdx.x + (buf_size/2) < buf_size ){
            buf_sum [ threadIdx.x + (buf_size/2) ] = 0.0f;
        }

        for( int y = 0 ; y < filter.y_max ; y ++ ){
            // load line into shared memory,
            // aligned load
            buf_line[ threadIdx.x ] = mat[ y + block_y ][ threadIdx.x ];
           
            __syncthreads();
 
            if( threadIdx.x < filter.x_max ){
                // no bank conflict, aligned access
                buf_sum[ threadIdx.x ] += buf_line[ block_x + threadIdx.x ] * filter[y][threadIdx.x];
            }
            __syncthreads();
        }
        
        
        cuda_reduce::reduce_1D< cuda_reduce::SUM , buf_bits >( buf_sum );

        __syncthreads();
       
        // write back a single result
        if( threadIdx.x == 0 ){
            float sum = buf_sum[0];
            // perform map
            cuda_map::__map<map_m>( sum );  
            
            store_method::__store<sm>( ans[ block_y ][ block_x ], sum );
        }
    }    

    template<int map_m,int sm, int buf_bits>
    __global__ void __conv2_r_big_filter_kernel_B( Mat ans,
                                                   const Mat mat,
                                                   const Mat filter ){
        __shared__ float buf_line[1<<buf_bits];
        __shared__ float buf_sum [1<<buf_bits];
        __conv2_r_big_filter_procedure_B<map_m,sm,buf_bits>
            ( blockIdx.x , blockIdx.y , buf_line, buf_sum,ans,mat,filter);
    }
    
    template<int map_m,int sm>
    inline void conv2_r_big_filter_B( Mat ans, 
                                      const Mat mat,
                                      const Mat filter ){
        dim3 dimGrid ( ans.x_max, ans.y_max, 1 );
        dim3 dimBlock( mat.x_max, 1 , 1 );

        if( mat.x_max > 256 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,9> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        }
        else
        if( mat.x_max > 128 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,8> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        }
        else 
        if( mat.x_max > 64 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,7> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        }
        else
        if( mat.x_max > 32 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,6> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        }
        else
        if( mat.x_max > 16 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,5> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        } 
        else
        if( mat.x_max > 8 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,4> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        } 
        else
        if( mat.x_max > 4 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,3> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        } 
        else
        if( mat.x_max > 2 ){
            __conv2_r_big_filter_kernel_B<map_m,sm,2> <<<dimGrid,dimBlock>>>( ans, mat, filter );
        } else{
            cuda_utils::error("too small matrix size");
        }
    } 
};

#endif
