#include <algorithm>
#include <functional>
#include <utility>
#include <vector>

#include "caffe/layers/reorg_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {

template <typename Dtype>
void ReorgLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const ReorgParameter& reorg_param = this->layer_param_.reorg_param();
  stride_ = reorg_param.stride();
  reverse_ = reorg_param.reverse();
}

template <typename Dtype>
void ReorgLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
      << "corresponding to (num, channels, height, width)";
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  CHECK_EQ(0, height_ % stride_) << "Input size must be the scale of " << stride_
      << ", but now is " << height_;
  CHECK_EQ(0, width_ % stride_) << "Input size must be the scale of " << stride_
      << ", but now is " << width_;
  if (reverse_ == false) {
    reorg_channels_ = channels_ * stride_ * stride_;
    reorg_height_ = height_ / stride_;
    reorg_width_ = width_ / stride_;
  } else {
    reorg_channels_ = channels_ / (stride_ * stride_);
    reorg_height_ = height_ * stride_;
    reorg_width_ = width_ * stride_;
  }
  top[0]->Reshape(bottom[0]->num(), reorg_channels_, reorg_height_,
          reorg_width_);
}

template <typename Dtype>
void ReorgLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  if (reverse_) {
    reorg_cpu(bottom_data, width_, height_, channels_,
            bottom[0]->num(), stride_, 1, top_data);
  } else {
    reorg_cpu(bottom_data, width_, height_, channels_,
            bottom[0]->num(), stride_, 0, top_data);
  }
}

template <typename Dtype>
void ReorgLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (!propagate_down[0]) {
    return;
  }
  const Dtype* top_diff = top[0]->cpu_diff();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  // Different pooling methods. We explicitly do the switch outside the for
  // loop to save time, although this results in more codes.
  caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
  if (reverse_) {
    reorg_cpu(top_diff, width_, height_, channels_,
            bottom[0]->num(), stride_, 0, bottom_diff);
  } else {
    reorg_cpu(top_diff, width_, height_, channels_,
            bottom[0]->num(), stride_, 1, bottom_diff);
  }
}

#ifdef CPU_ONLY
STUB_GPU(ReorgLayer);
#endif

INSTANTIATE_CLASS(ReorgLayer);
REGISTER_LAYER_CLASS(Reorg);

}
