#ifndef _CUDA_MAP_RBM_CU_
#define _CUDA_MAP_RBM_CU_

#include "cuda_base_utils.cuh"
#include "cuda_map_rbm.cuh"
#include "cuda_reduce.cuh"


/* scale in RBM_SCALE*/
__constant__ float __rbm_constant_scale;
/* sigma square in RBM_SCALE_SIGMA_SQR */
__constant__ float __rbm_constant_sigma_sqr;
/* size of maxpooling layer */
__constant__ int   __rbm_constant_pool_bits;

namespace cuda_map{

    /* set the constant scale */
    inline void set_scale( float sc ){
        cudaMemcpyToSymbol( "__rbm_constant_scale" , &sc, sizeof(float) );
    }

    /* 
       set the sigma^2 in gaussian unit, RBM_SCALE_SIGMA_SQR
    */
    inline void set_sigma_sqr( float sigma_sqr ){
        cudaMemcpyToSymbol( "__rbm_constant_sigma_sqr" , &sigma_sqr, sizeof(float) );
    }

    /* set pooling size to be (2^pool_bits) */
    inline void set_maxpooling_size( int pool_bits ){
        cudaMemcpyToSymbol( "__rbm_constant_pool_bits" , &pool_bits, sizeof(int));
    }

    template<>
    inline __device__  void  __map<RBM_SCALE>( float &a , bool is_valid ){
        a *= __rbm_constant_scale; 
    }    

    template<>
    inline __device__  void  __map<RBM_SCALE_SIGMA_SQR>( float &a, bool is_valid ){
        a *= __rbm_constant_sigma_sqr; 
    }    

    template<>
    inline __device__  void  __map<RBM_SIGMOID>( float &a, bool is_valid ){
        a = 1.0f / ( 1 + __expf(-a) );
    }    
    namespace __maxpooling{
        /* 
           maxpooling normalization , we must ensure the thread block is < 16, 16 > 
           very important 
        */
        template<int pool_bits>
        inline __device__  void  __maxpooling_1616( float &a,  float buf[16][17] ){
            
            // write to buffer 
            buf[threadIdx.y][threadIdx.x] = a;
            __syncthreads();
            
            // to ensure accuracy, get the maximum value of each block and do normalization
            cuda_reduce::reduce_block_with_bp_1616<cuda_reduce::MAX,pool_bits,pool_bits>( buf );
            __syncthreads();

            // load the maximum value
            float a_max = buf[ (threadIdx.y>>pool_bits) <<pool_bits ][ threadIdx.x ];
            __syncthreads();

            a = __expf( a - a_max );
            // write the  expf
            buf[threadIdx.y][threadIdx.x] = a;
            __syncthreads();
        
            cuda_reduce::reduce_block_with_bp_1616<cuda_reduce::SUM,pool_bits,pool_bits>( buf );
            __syncthreads();
            
            float a_sum = buf[ (threadIdx.y>>pool_bits) <<pool_bits ][ threadIdx.x ];
            // normalize 
            a = a /( expf( -a_max ) + a_sum );  
        }
    };

    template<>
    inline __device__  void  __map<RBM_MAXPOOL_1616>( float &a, bool is_valid ){
        if( !is_valid ){
            // note in maxpooling, setting a to big negative will cause it deactivate forever
            a = -1e30;
        }

        const int pool_bits = __rbm_constant_pool_bits;
        __shared__   float buf[16][17];
        
        switch( pool_bits ){
        case 0: __map<RBM_SIGMOID>( a , true ); break;
        case 1: __maxpooling::__maxpooling_1616<1>( a,buf ); break;
        case 2: __maxpooling::__maxpooling_1616<2>( a,buf ); break;
        case 3: __maxpooling::__maxpooling_1616<3>( a,buf ); break;
        case 4: __maxpooling::__maxpooling_1616<4>( a,buf ); break;
        default: cuda_utils::error_device();  break; //error    
        }
    } 
};

#endif

