#include <algorithm>
#include <cfloat>
#include <vector>

#include "caffe/layers/reorg_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {

template <typename Dtype>
void ReorgLayer<Dtype>::Forward_gpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->gpu_data();
  Dtype* top_data = top[0]->mutable_gpu_data();
  if (reverse_) {
    reorg_gpu(bottom_data, width_, height_, channels_,
            bottom[0]->num(), stride_, 1, top_data);
  } else {
    reorg_gpu(bottom_data, width_, height_, channels_,
            bottom[0]->num(), stride_, 0, top_data);
  }
}

template <typename Dtype>
void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (!propagate_down[0]) {
    return;
  }
  const Dtype* top_diff = top[0]->gpu_diff();
  Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
  // Different pooling methods. We explicitly do the switch outside the for
  // loop to save time, although this results in more codes.
  caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
  if (reverse_) {
    reorg_gpu(top_diff, width_, height_, channels_,
            bottom[0]->num(), stride_, 0, bottom_diff);
  } else {
    reorg_gpu(top_diff, width_, height_, channels_,
            bottom[0]->num(), stride_, 1, bottom_diff);
  }
}

INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer);

}
