#include <vector>

#include "caffe/layers/conv_layer.hpp"

// HGP4NN settings.
// Added by Hao FU.
#include "caffe/util/kernel_analyzer.hpp"
#include "caffe/util/gpu_manager.hpp"
#include "caffe/util/info_log.hpp"

#ifndef USE_PROF
DEFINE_int32(parallelDeg, 1,
             "Optional. Static loop unrolling flag (>=1).");
#endif
// End. Added by Hao FU.

namespace caffe {

  // Used to synchronize all kernels in the same thread.
  // Added by Hao FU.
  __global__ void conv_sync() { }

  void StreamSync(int num_streams) {
    for (auto i = 0; i < num_streams; ++ i) {
      CUDA_CHECK(cudaStreamSynchronize(GpuStreamPool::Get().CudaStreamGet(i)));
    }
  }
  // End. Added by Hao FU.

  template <typename Ftype, typename Btype>
  void ConvolutionLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
                                                   const vector<Blob*>& top) {
    const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>();
    for (int i = 0; i < bottom.size(); ++i) {
      const Ftype* bottom_data = bottom[i]->gpu_data<Ftype>();
      Ftype* top_data = top[i]->mutable_gpu_data<Ftype>();

      const Ftype* bias = nullptr;
      const Ftype* bias_multiplier __attribute__((unused)) = nullptr;
      if (this->bias_term_) {
        bias = this->blobs_[1]->template gpu_data<Ftype>();
        bias_multiplier = this->bias_multiplier_.template gpu_data<Ftype>();
        conv_sync<<<1, 1>>>();
      }
      // Parallelize the forward computation.
      // Added by Hao FU.
#ifdef USE_PROF
      if (this->phase_ == Phase::TRAIN and this->fw_analyzer_ < 2) {
        KernelAnalyzer::Get().Start(this->name(), "FW",
                                    this->parallel_degree_);
        if (this->fw_analyzer_ == 0) {
          if (this->parallel_degree_) {
            GpuStreamPool::Get().SetPoolSize(this->parallel_degree_);
            if (!this->is_1x1_) {
              this->template SetColBufferNum<Ftype>(this->parallel_degree_);
            }
            this->fw_analyzer_ ++;
          } else {
            this->fw_analyzer_ += (this->parallel_degree_ ? 1 : 0);
            this->parallel_degree_ = 1;
          }
        } else {
          LOG(INFO) << "Recording concurrent kernel execution information. pdegree_degree_ = "
                    << this->parallel_degree_ << ".";
        }
      }
#else
      if (this->parallel_degree_ < FLAGS_parallelDeg) {
        this->parallel_degree_ = FLAGS_parallelDeg;
        if (this->parallel_degree_ > GpuStreamPool::Get().GetPoolSize()) {
          GpuStreamPool::Get().SetPoolSize(this->parallel_degree_);
          if (!this->is_1x1_) {
            this->template SetColBufferNum<Ftype>(this->parallel_degree_);
          }
        }
      }
#endif // USE_PROF
      // End. Added by Hao FU.

      for (int n = 0; n < this->num_; n += this->parallel_degree_) {
        for (int stream = 0 ; (stream < this->parallel_degree_) and
               ((n + stream) < this->num_); ++ stream) {
          auto curr_idx = n + stream;
          this->forward_gpu_gemm(bottom_data + curr_idx * this->bottom_dim_,
                                 weight, top_data + curr_idx * this->top_dim_,
                                 stream);
          if (this->bias_term_) {
            this->forward_gpu_bias(top_data + curr_idx * this->top_dim_,
                                   bias, bias_multiplier, stream);
          }
        }
      }

      // Parallelize the forward computation.
      // Added by Hao FU.
#ifdef USE_PROF
      if (this->phase_ == Phase::TRAIN and this->fw_analyzer_ < 2) {
        KernelAnalyzer::Get().Stop();
        if (this->fw_analyzer_ == 1) {
          this->fw_analyzer_ ++;
        }
      }
#endif  // USE_PROF
      if (this->parallel_degree_ > 1) {
        StreamSync(this->parallel_degree_); // Synchronize all streams in the same thread.
      }
      // End. Added by Hao FU.
    }
  }

  template <typename Ftype, typename Btype>
  void ConvolutionLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
        const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
    const Btype* weight = this->blobs_[0]->template gpu_data<Btype>();
    Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();

    for (int i = 0; i < top.size(); ++i) {
      const Btype* top_diff = top[i]->gpu_diff<Btype>();
      // Bias gradient, if necessary.
      if (this->bias_term_ && this->param_propagate_down_[1]) {
        Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
        for (int n = 0; n < this->num_; ++n) {
          this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
        }
      }
      if (this->param_propagate_down_[0]) {
        const Btype* bottom_data = bottom[i]->gpu_data<Btype>();

        // gradient w.r.t. weight. Note that we will accumulate diffs.
        for (int n = 0; n < this->num_; ++ n) {
          this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
                                top_diff + n * this->top_dim_, weight_diff);
        }
      }
      conv_sync<<<1, 1>>>();

      if (propagate_down[i]) {
        Btype* bottom_diff = bottom[i]->mutable_gpu_diff<Btype>();
        // Added by Hao FU.
#ifdef USE_PROF
        if (this->phase_ == TRAIN and this->bp_analyzer_ < 2) {
          KernelAnalyzer::Get().Start(this->name(), "BP_G2B",
                                      this->bp_parallel_degree_);
          if (this->bp_analyzer_ == 0) {
            if (this->bp_parallel_degree_) {
              GpuStreamPool::Get().SetPoolSize(this->bp_parallel_degree_);
              if (!this->is_1x1_) {
                this->template SetColBufferNum<Ftype>(this->bp_parallel_degree_);
              }
              this->bp_analyzer_ ++;
            } else {
              this->bp_analyzer_ += (this->bp_parallel_degree_ ? 1 : 0);
              this->bp_parallel_degree_ = 1;
            }
          }
        }
#else
        if (this->bp_parallel_degree_ < FLAGS_parallelDeg) {
          this->bp_parallel_degree_ = FLAGS_parallelDeg;
          if (this->bp_parallel_degree_ > GpuStreamPool::Get().GetPoolSize()) {
            GpuStreamPool::Get().SetPoolSize(this->bp_parallel_degree_);
            if (!this->is_1x1_) {
              this->template SetColBufferNum<Ftype>(this->bp_parallel_degree_);
            }
          }
        }
#endif // USE_PROF

        for (int n = 0; n < this->num_; n += this->bp_parallel_degree_) {
          for (int stream = 0; (stream < this->bp_parallel_degree_) and
                 ((n + stream) < this->num_); ++ stream) {
            // gradient w.r.t. bottom data, if necessary.
            int curr_idx = n + stream;
            this->backward_gpu_gemm(top_diff + curr_idx * this->top_dim_, weight,
                                    bottom_diff + curr_idx * this->bottom_dim_, stream);
          }
        }

        // Parallelize the gradient w.r.t. bottom data backpropagation computation.
        // Added by Hao FU.
#ifdef USE_PROF
        if (this->phase_ == TRAIN and this->bp_analyzer_ < 2) {
          KernelAnalyzer::Get().Stop();
          if (this->bp_analyzer_ == 1) {
            this->bp_analyzer_ ++;
          }
        }
#endif // USE_PROF
        if (this->bp_parallel_degree_ > 1) {
          StreamSync(this->bp_parallel_degree_); // Synchronize all streams in the same thread.
        }
        // End. Added by Hao FU.
      }
    }
  }

  // Auxiliary functions for the HGP4NN framework.
  // Added by Hao FU.
  template <typename Ftype, typename Btype>
  void ConvolutionLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
                                                   const vector<Blob*>& top,
                                                   BatchBoundary_t& boundary) {
    CHECK_LE(boundary.end_idx_, this->num_);
    // Start position of the current macro-batch data.
    int macro_size = (boundary.end_idx_ - boundary.start_idx_ + 1);
    const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>();
    for (int i = 0; i < bottom.size(); ++ i) {
      const Ftype* bottom_data = bottom[i]->gpu_data<Ftype>() + \
        boundary.start_idx_ * this->bottom_dim_;
      Ftype* top_data = top[i]->mutable_gpu_data<Ftype>() + \
        boundary.start_idx_ * this->top_dim_;

      if (this->parallel_degree_ > GpuStreamPool::Get().GetPoolSize()) {
        GpuStreamPool::Get().SetPoolSize(this->parallel_degree_);
        this->template SetColBufferNum<Ftype>(this->parallel_degree_);
      }

      for (int n = 0; n < macro_size; ++ n) {
        this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight,
                               top_data + n * this->top_dim_, n, false);

        if (this->bias_term_) {
          const Ftype* bias = this->blobs_[1]->template gpu_data<Ftype>();
          const Ftype* bias_multiplier = this->bias_multiplier_.template gpu_data<Ftype>();
          this->forward_gpu_bias(top_data + n * this->top_dim_, bias, bias_multiplier, n);
        }
      }
      StreamSync(macro_size);   // Synchronization.
    }
  }

  template <typename Ftype, typename Btype>
  void ConvolutionLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
                                                    const vector<bool>& propagate_down,
                                                    const vector<Blob*>& bottom,
                                                    BatchBoundary_t& boundary) {
    CHECK_LE(boundary.end_idx_, this->num_);
    int macro_size = (boundary.end_idx_ - boundary.start_idx_ + 1);
    // Obtain the weight and weight_diff data.
    const Btype* weight = this->blobs_[0]->template gpu_data<Btype>();
    Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();

    for (int i = 0; i < top.size(); ++ i) {
      const Btype* top_diff = top[i]->gpu_diff<Btype>() + \
        boundary.start_idx_ * this->top_dim_;
      // Bias gradient, if necessary.
      if (this->bias_term_ and this->param_propagate_down_[1]) {
        Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
        for (int n = 0; n < macro_size; ++ n) {
          this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
        }
      }

      if (this->param_propagate_down_[0]) {
        const Btype* bottom_data = bottom[i]->gpu_data<Btype>() + \
          boundary.start_idx_ * this->bottom_dim_;
        // gradient w.r.t. weight. Note that we will accumulate diffs.
        for (int n = 0; n < macro_size; ++ n) {
          this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
                                top_diff + n * this->top_dim_, weight_diff);
        }
      }

      if (this->bp_parallel_degree_ > GpuStreamPool::Get().GetPoolSize()) {
        GpuStreamPool::Get().SetPoolSize(this->bp_parallel_degree_);
        this->template SetColBufferNum<Ftype>(this->bp_parallel_degree_);
      }

      if (propagate_down[i]) {
        Btype* bottom_diff = bottom[i]->mutable_gpu_diff<Btype>() + \
          boundary.start_idx_ * this->bottom_dim_;
        // Parallelize the gradient w.r.t. bottom data backpropagation computation.
        for (int n = 0; n < macro_size; ++ n) {
          // gradient w.r.t. bottom data, if necessary.
          this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
                                  bottom_diff + n * this->bottom_dim_, n);
        }
        StreamSync(macro_size); // Synchronization.
      }
    }
  }
  // End. Added by Hao FU.

INSTANTIATE_LAYER_GPU_FUNCS_FB(ConvolutionLayer);

// Auxiliary instantiation of boundary-based functions for the HGP4NN framework.
// Added by Hao FU.
INSTANTIATE_LAYER_GPU_BOUNDARY_FUNCS_FB(ConvolutionLayer);
// End. Added by Hao FU.

}  // namespace caffe
