#include <algorithm>
#include <cfloat>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"

namespace caffe {

using std::min;
using std::max;

template <typename Dtype>
void inline RegionSort(Dtype * eles,int size,int *map){
    //CHECK_GT(size,2 * alpha) << "alpha is lager than size";
    map[0] = 0;
    for(int i = 1; i < size; i++){
        Dtype tmp = eles[i];
        map[i] = i;
        for (int j = i - 1; j >= 0; j--) {
            if (eles[j] <= tmp) {
                eles[j+1] = tmp;
                map[j+1] = i;
                break;
            }else{
                eles[j+1] = eles[j];
                eles[j] = tmp;
                map[j+1] = map[j];
                map[j] = i;
            }
        }
    }
    
    //result = eles[0];//.......
}
    

template <typename Dtype>
void PMPoolLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
    PMPoolParameter pool_param = this->layer_param_.pmpool_param();
    alpha = pool_param.alpha();
    beta = pool_param.beta();
    if (pool_param.has_kernel_h() && pool_param.has_kernel_w()) {
      kernel_h_ = pool_param.kernel_h();
      kernel_w_ = pool_param.kernel_w();
    } else {
      CHECK(pool_param.has_kernel()) << "Kernel need to be set";
      kernel_h_ = kernel_w_ = kernel = pool_param.kernel();
    }
    if (pool_param.has_stride_h() && pool_param.has_stride_w()) {
      stride_h_ = pool_param.stride_h();
      stride_w_ = pool_param.stride_w();
    } else {
      CHECK(pool_param.has_stride()) << "Stride must be set";
      stride_h_ = stride_w_ = stride = pool_param.stride();
    }
    pad   = pool_param.pad();
    pad_h_=pad_w_=pad;
    CHECK_LT(alpha + beta ,kernel * kernel)<<"alpha beta is too large";
    int len = pool_param.weights().size();
    for ( int i = 0 ; i < len; i++) {
      weights.push_back(pool_param.weights(i));
    }
    if (weights.size() > 0 ) {
      CHECK_EQ(weights.size(),kernel_w_ * kernel_h_) << "weight is a mapping template";
    }
  
}

template <typename Dtype>
void PMPoolLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
    CHECK_EQ(4,bottom[0]->num_axes()) << "Input must have 4 axes, "
        <<"namely (num,channels,height,width)";
    channels_ = bottom[0]->channels();
    height_   = bottom[0]->height();
    width_    = bottom[0]->width();
    pooled_height_ = static_cast<int>(ceil(static_cast<float>(
            height_ + 2 * pad - kernel) / stride)) + 1;
    pooled_width_  = static_cast<int>(ceil(static_cast<float>(
            width_ + 2 * pad - kernel) / stride)) + 1;
    //some pad operation ignored(need code)
    top[0]->Reshape(bottom[0]->num(),channels_,pooled_height_,pooled_width_);
  
    pooled_idx_.Reshape(bottom[0]->num(),channels_,pooled_height_ * kernel_h_,
                      pooled_width_ * kernel_w_);

}

// TODO(Yangqing): Is there a faster way to do pooling in the channel-first
// case?
template <typename Dtype>
void PMPoolLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
    const Dtype* bottom_data = bottom[0]->cpu_data();
    Dtype* top_data = top[0]->mutable_cpu_data();
    const int top_count = top[0]->count();
    const bool use_top_mask = top.size() > 1;
    Dtype* mask = NULL;
    Dtype* top_mask = NULL;
    switch (this->layer_param_.pmpool_param().pool()) {
        case PMPoolParameter_PMPoolMethod_AVG:
            //
            if (use_top_mask) {
                top_mask = top[1]->mutable_cpu_data();
                caffe_set(top_count,Dtype(-1),top_mask);
            } else {
                mask = pooled_idx_.mutable_cpu_data();
                caffe_set(top_count,Dtype(-1),mask); 
            }
            caffe_set(top_count, Dtype(-FLT_MAX), top_data);
            
            //the loop
{            Dtype *eles = new Dtype(kernel_h_*kernel_w_);
            int   *map  = new int(kernel_h_ * kernel_w_);
            
            for (int n = 0; n < bottom[0]->num(); ++n) {
                for (int c = 0; c < channels_; ++c) {
                    for (int ph = 0; ph < pooled_height_; ++ph) {
                        for (int pw = 0; pw < pooled_width_; ++pw) {
                            int hstart = ph * stride_h_ - pad_h_;
                            int wstart = pw * stride_w_ - pad_w_;
                            int hend = min(hstart + kernel_h_, height_);
                            int wend = min(wstart + kernel_w_, width_);
                            hstart = max(hstart, 0);
                            wstart = max(wstart, 0);
                            const int pool_index = ph * pooled_width_ + pw;

                            int i = 0;
                            for (int h = hstart; h < hend; ++h) {
                                for (int w = wstart; w < wend; ++w) {
                                    const int index = h * width_ + w;
                                    eles[i] = bottom_data[index];
                                    i++;
                                }
                            }
                          //normal condition

                          //int regionsize = kernel_h_ * kernel_w_;//kernel*kernel>=i because some cat
                            RegionSort<Dtype>(eles, i, map);
                            CHECK_GT(i,alpha + beta)<<"kernel region is smaller than 2*alpha";
                          Dtype forward_val = static_cast<Dtype>(0.0f);
                          
                          int base = pool_index * kernel_h_ * kernel_w_;
                          
                          Dtype sum = static_cast<Dtype>(0.0f);
                          
                                   // we don't trim boundary data
                                   if ( kernel_h_ > hend - hstart || kernel_w_ > wend - wstart )
                                   {
                                    //h
                                    for (int j = 0 ; j < i ; j++ ){
                                      sum += eles[j];
                                    }
                                   for (int k_h = 0,k_i = 0 ; k_h < kernel_h_ * kernel_w_; k_h++ ){
                                      if( kernel_h_ > hend - hstart && k_h % kernel_w_> hend - hstart){
                                        mask[base + k_h] = static_cast<Dtype>(0.0f);
                                      } else if( kernel_w_ > wend - wstart && k_h / kernel_w_ > wend - wstart) {
                                        mask[base + k_h] = static_cast<Dtype>(0.0f);
                                   
                                      }else {
                                        mask[base + k_h] = static_cast<Dtype>(eles[ k_i ] / sum);
                                        k_i ++ ;
                                      }
                                    }
                                    forward_val = sum / i;
                                   } else {
                                     for (int j = alpha; j < i - beta; j++) {
                                       sum += eles[map[j]];
                                     }
                                     forward_val = sum / ( i - alpha - beta );
                                     
                                     
                                     for (int j = 0; j < i; j++) {
                                       if (j < alpha) {
                                         mask[ base + map[j] ] = static_cast<Dtype>(0.0f);
                                       } else if ( j >= i - beta){
                                         mask[ base + map[j] ] = static_cast<Dtype>(0.0f);
                                       } else {
                                          mask[base + map[j]] = static_cast<Dtype>(eles[map[j]] / sum);
                                         }
                                      }
                                   }
                          top_data[pool_index] = forward_val;
                        }
                    }
                    // compute offset
                    bottom_data += bottom[0]->offset(0, 1);
                    top_data += top[0]->offset(0, 1);
                    if (use_top_mask) {
                        top_mask += top[0]->offset(0, 1);
                    } else {
                        mask += pooled_idx_.offset(0, 1);
                    }
                }
            }
	delete eles;eles = NULL;
	delete map;map = NULL;
}
            break;
        case PMPoolParameter_PMPoolMethod_MED:
        
        //
        if (use_top_mask) {
          top_mask = top[1]->mutable_cpu_data();
          caffe_set(top_count,Dtype(-1),top_mask);
        } else {
          mask = pooled_idx_.mutable_cpu_data();
          caffe_set(top_count,Dtype(-1), mask);
        }
        caffe_set(top_count, Dtype(-FLT_MAX), top_data);
        
        //the loop
      {            Dtype *eles = new Dtype(kernel_h_*kernel_w_);
        int   *map  = new int(kernel_h_ * kernel_w_);
        
        for (int n = 0; n < bottom[0]->num(); ++n) {
          for (int c = 0; c < channels_; ++c) {
            for (int ph = 0; ph < pooled_height_; ++ph) {
              for (int pw = 0; pw < pooled_width_; ++pw) {
                int hstart = ph * stride_h_ - pad_h_;
                int wstart = pw * stride_w_ - pad_w_;
                int hend = min(hstart + kernel_h_, height_);
                int wend = min(wstart + kernel_w_, width_);
                hstart = max(hstart, 0);
                wstart = max(wstart, 0);
                const int pool_index = ph * pooled_width_ + pw;
                
                int i = 0;
                for (int h = hstart; h < hend; ++h) {
                  for (int w = wstart; w < wend; ++w) {
                    const int index = h * width_ + w;
                    eles[i] = bottom_data[index];
                    i++;
                  }
                }
                //normal condition
                
                //int regionsize = kernel_h_ * kernel_w_;//kernel*kernel>=i because some cat
                RegionSort<Dtype>(eles, i, map);
                CHECK_GT(i,alpha + beta)<<"kernel region is smaller than 2*alpha";
                Dtype forward_val = static_cast<Dtype>(0.0f);
                
                int base = pool_index * kernel_h_ * kernel_w_;
                
                Dtype sum = static_cast<Dtype>(0.0f);
                
                // we don't trim boundary data
                if ( kernel_h_ > hend - hstart || kernel_w_ > wend - wstart )
                {
                  //h
                  for (int j = 0 ; j < i ; j++ ){
                    sum += eles[j];
                  }
                  for (int k_h = 0 ; k_h < kernel_h_ * kernel_w_; k_h++ ){
                    if( kernel_h_ > hend - hstart && k_h % kernel_w_> hend - hstart){
                      mask[base + k_h] = static_cast<Dtype>(0.0f);
                    } else if( kernel_w_ > wend - wstart && k_h / kernel_w_ > wend - wstart) {
                      mask[base + k_h] = static_cast<Dtype>(0.0f);
                      
                    }else if((i - beta - alpha) % 2 == 0 && k_h != ((i - beta + alpha) / 2)
                                                        && k_h != ((i - beta + alpha) / 2) + 1){
                      mask[base + k_h] = static_cast<Dtype>(0.0f);
                    }else if((i - beta - alpha) % 2 == 1 && k_h != ((i - beta + alpha) / 2) ){
                      mask[base + k_h] = static_cast<Dtype>(0.0f);
                    }else{
                      if ((i - beta - alpha) % 2 == 1 && k_h == ((i - beta + alpha) / 2)) {
                        forward_val += eles[map[k_h]];
                        mask[base + k_h] =  static_cast<Dtype>(1.0f);
                      }else{
                        forward_val += eles[map[k_h]] /2;
                        mask[base + k_h] = static_cast<Dtype>(0.5f);
                      }
                    }
                  }
                  
                } else {
                  
                  for (int j = 0; j < i; j++) {
                    int k_h = j;
                    if (( i - beta - alpha) % 2 == 0 && k_h != ((i - beta + alpha) / 2)
                      && k_h != ((i - beta + alpha) / 2) + 1 ){
                        mask[base + j] = static_cast<Dtype>(0.0f);
                      }else if((i - beta - alpha) % 2 == 0){
                        mask[base + j] = static_cast<Dtype>(0.5f);
                        forward_val += eles[map[j]];
                      }else if((i - beta - alpha) % 2 == 1 && k_h != ((i - beta + alpha) / 2)){
                        mask[base + j] = static_cast<Dtype>(0.0f);
                      }else{
                        mask[base + j] = static_cast<Dtype>(1.0f);
                        forward_val += eles[map[j]];
                      }
                  }
                }
                top_data[pool_index] = forward_val;
              }
            }
            // compute offset
            bottom_data += bottom[0]->offset(0, 1);
            top_data += top[0]->offset(0, 1);
            if (use_top_mask) {
              top_mask += top[0]->offset(0, 1);
            } else {
              mask += pooled_idx_.offset(0, 1);
            }
          }
        }
        delete eles;eles = NULL;
        delete map;map = NULL;
      }

            ;break;
        case PMPoolParameter_PMPoolMethod_TEMPLATE:
        //
        CHECK_EQ(weights.size(),kernel_w_ * kernel_h_)<<"weights not equal to kernel size";
        
        if (use_top_mask) {
          top_mask = top[1]->mutable_cpu_data();
          caffe_set(top_count,Dtype(-1),top_mask);
        } else {
          mask = pooled_idx_.mutable_cpu_data();
          caffe_set(top_count, Dtype(-1), mask);
        }
        caffe_set(top_count, Dtype(-FLT_MAX), top_data);
        
        //the loop
      {
        Dtype *eles = new Dtype(kernel_h_*kernel_w_);
        int   *map  = new int(kernel_h_ * kernel_w_);
        
        for (int n = 0; n < bottom[0]->num(); ++n) {
          for (int c = 0; c < channels_; ++c) {
            for (int ph = 0; ph < pooled_height_; ++ph) {
              for (int pw = 0; pw < pooled_width_; ++pw) {
                int hstart = ph * stride_h_ ;
                int wstart = pw * stride_w_ ;
                int hend = min(hstart + kernel_h_, height_);
                int wend = min(wstart + kernel_w_, width_);
                hstart = max(hstart, 0);
                wstart = max(wstart, 0);
                const int pool_index = ph * pooled_width_ + pw;

                Dtype forward_val = static_cast<Dtype>(0.0f);
                
                for (int h = hstart; h < hend; ++h) {
                  for (int w = wstart; w < wend; ++w) {
                    const int index = h * width_ + w;
                    int j = h * kernel_w_ + w;
                    forward_val += bottom_data[index] * weights[j];
                  }
                }
                //normal condition
                top_data[pool_index] = forward_val;
              }
            }
            // compute offset
            bottom_data += bottom[0]->offset(0, 1);
            top_data += top[0]->offset(0, 1);
            if (use_top_mask) {
              top_mask += top[0]->offset(0, 1);
            } else {
              mask += pooled_idx_.offset(0, 1);
            }
          }
        }
        delete eles;eles = NULL;
        delete map;map = NULL;
      }

          break;
        case PMPoolParameter_PMPoolMethod_MIX:
          break;
        default:
            break;
    }
}

template <typename Dtype>
void PMPoolLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
    if (!propagate_down[0]) {
        return;
    }
    const Dtype* top_diff = top[0]->cpu_diff();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    // Different pooling methods. We explicitly do the switch outside the for
    // loop to save time, although this results in more codes.
    caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
    // We'll output the mask to top[1] if it's of size >1.
    const bool use_top_mask = top.size() > 1;
    const Dtype* mask = NULL;  // suppress warnings about uninitialized variables
    const Dtype* top_mask = NULL;
    switch (this->layer_param_.pooling_param().pool()) {
      case PMPoolParameter_PMPoolMethod_AVG:
      case PMPoolParameter_PMPoolMethod_MED:
            // The main loop
            if (use_top_mask) {
                top_mask = top[1]->cpu_data();
            } else {
                mask = pooled_idx_.cpu_data();
            }
            for (int n = 0; n < top[0]->num(); ++n) {
                for (int c = 0; c < channels_; ++c) {
                    for (int ph = 0; ph < pooled_height_; ++ph) {
                        for (int pw = 0; pw < pooled_width_; ++pw) {
                            const int pool_index = ph * pooled_width_ + pw;
                            int base = pool_index * kernel_h_ * kernel_w_;
                          
                            //const int index = ph * pooled_width_ + pw;
                          
                          
                          int hstart = ph * stride_h_ - pad_h_;
                          int wstart = pw * stride_w_ - pad_w_;
                          int hend = min(hstart + kernel_h_, height_ + pad_h_);
                          int wend = min(wstart + kernel_w_, width_ + pad_w_);
                          //int pool_size = (hend - hstart) * (wend - wstart);
                          hstart = max(hstart, 0);
                          wstart = max(wstart, 0);
                          hend = min(hend, height_);
                          wend = min(wend, width_);
                          for (int h = hstart; h < hend; ++h) {
                            for (int w = wstart; w < wend; ++w) {
                              bottom_diff[h * width_ + w] +=
                              top_diff[ph * pooled_width_ + pw] * mask[base + h * kernel_w_ + w];
                            }
                          }
                        }
                    }
                  
                    bottom_diff += bottom[0]->offset(0, 1);
                    top_diff += top[0]->offset(0, 1);
                    if (use_top_mask) {
                        top_mask += top[0]->offset(0, 1);
                    } else {
                        mask += pooled_idx_.offset(0, 1);
                    }
                }
            }
            break;
        case PMPoolParameter_PMPoolMethod_TEMPLATE:
       { 
        Dtype sum = static_cast<Dtype>(0.0f);
        for (int i = 0 ; i < weights.size() ; i ++){
          sum += weights[i];
        }
        
        for (int n = 0; n < top[0]->num(); ++n) {
          for (int c = 0; c < channels_; ++c) {
            for (int ph = 0; ph < pooled_height_; ++ph) {
              for (int pw = 0; pw < pooled_width_; ++pw) {
                //const int pool_index = ph * pooled_width_ + pw;
                //int base = pool_index * kernel_h_ * kernel_w_;
                //const int index = ph * pooled_width_ + pw;
                
                int hstart = ph * stride_h_ - pad_h_;
                int wstart = pw * stride_w_ - pad_w_;
                int hend = min(hstart + kernel_h_, height_ + pad_h_);
                int wend = min(wstart + kernel_w_, width_ + pad_w_);
                //int pool_size = (hend - hstart) * (wend - wstart);
                hstart = max(hstart, 0);
                wstart = max(wstart, 0);
                hend = min(hend, height_);
                wend = min(wend, width_);
                
                
                for (int h = hstart; h < hend; ++h) {
                  for (int w = wstart; w < wend; ++w) {
                    bottom_diff[h * width_ + w] +=
                    top_diff[ph * pooled_width_ + pw] * weights[ h * kernel_w_ + w];
                  }
                }
              }
            }
            
            bottom_diff += bottom[0]->offset(0, 1);
            top_diff += top[0]->offset(0, 1);
            
          }
        }
}
          break;
        case PMPoolParameter_PMPoolMethod_MIX:
	 NOT_IMPLEMENTED;
          break;
        default:
            LOG(FATAL) << "Unknown pooling method.";
    }

}


#ifdef CPU_ONLY
STUB_GPU(PMPoolLayer);
#endif


INSTANTIATE_CLASS(PMPoolLayer);
REGISTER_LAYER_CLASS(PMPool);
}  // namespace caffe
