#ifdef PIPELINE

#include <algorithm>
#include <limits>

#include <cstring>

#include <glog/logging.h>

#include "caffe/type.hpp"
#include "caffe/pipeline.hpp"
#include "caffe/layers/conv_layer.hpp"

DEFINE_bool(pipeline, false,
            "Optional. Pipeline execution on a single GPU device.");

namespace caffe {
  // PipeStage member implementation.
  PipeStage::PipeStage(boost::shared_ptr<Net> net, const int base_idx,
                       const int start_layer, const int end_layer,
                       const int batch_size) :
    net_(net), base_idx_(base_idx), start_layer_idx_(start_layer),
    end_layer_idx_(end_layer), batch_size_(batch_size) {
    this->direction_ = Direction::FORWARD;
    this->next_stage_ = nullptr;
    this->stage_thread_ = boost::make_shared<boost::thread>();
    // this->stop_flag_ = false;
  }

  PipeStage::PipeStage(boost::shared_ptr<Net> net, const int base_idx,
                       const int start_layer, const int end_layer,
                       const int batch_size, Direction direction):
    net_(net), base_idx_(base_idx), start_layer_idx_(start_layer),
    end_layer_idx_(end_layer), batch_size_(batch_size), direction_(direction) {
    this->next_stage_ = nullptr;
    this->stage_thread_ = boost::make_shared<boost::thread>();
    // this->stop_flag_ = false;
  }

  PipeStage::~PipeStage() {
    this->net_.reset();
    this->stage_inputs_.reset();
    this->stage_outputs_.reset();
    this->next_stage_.reset();
    if (is_started()) {
      this->stage_thread_->join();
    }
    this->stage_thread_.reset();
  }

  std::string PipeStage::StageString() {
    std::string result = "(";

    for (int i = start_layer_idx_; i <= end_layer_idx_; ++ i) {
      if (i != end_layer_idx_) {
        result += this->net_->layers()[i]->name() + ", ";
      } else {
        result += this->net_->layers()[i]->name() + ")";
      }
    }

    return result;
  }

  void PipeStage::StageStart(vector<float>* loss, bool main_thread) {
    CHECK(!is_started()) << "Thread should persist and not be restarted.";
    try {
      if (main_thread) {
        StageThreadEntry(loss);
      } else {
        stage_thread_.reset(new boost::thread(&PipeStage::StageThreadEntry, this, loss));
      }
    } catch (std::exception& e) {
      LOG(FATAL) << "Thread exception: " << e.what();
    }
  }

  void PipeStage::StageThreadEntry(vector<float>* loss) {
    auto current_boundary = stage_inputs_->pop();
    bool stop_flag = false;

    // Set the number of GPU streams.

    while (!stop_flag) { // or !this->stop_flag_
      if (this->direction_ == Direction::FORWARD) {
        // LOG(INFO) << "Thread ID " << boost::this_thread::get_id() << ": start="
        //           << this->start_layer_idx_ << ", end=" << this->end_layer_idx_
        //           << ", current_boundary(" << current_boundary.start_idx_ << ","
        //           << current_boundary.end_idx_ << "), base_idx_=" << base_idx_;
        this->net_->PipeForwardFromTo(this->start_layer_idx_,
                                      this->end_layer_idx_,
                                      current_boundary, base_idx_, loss);

        this->stage_outputs_->push(current_boundary);
      } else {
        this->net_->PipeBackwardFromToAu(this->start_layer_idx_,
                                     this->end_layer_idx_,
                                     current_boundary);

        this->stage_outputs_->push(current_boundary);
      }
      stop_flag = (current_boundary.end_idx_ == (batch_size_ - 1));
      if (stop_flag) {
        break;
      }

      current_boundary = stage_inputs_->pop();
    }
  }
  // End of the PipeStage class member implementation.

  Pipeline::~Pipeline() {
    for (auto i = 0; i < stages_.size(); ++ i) {
      stages_[i].reset();
    }
    this->stages_.clear();

    this->net_.reset();
    this->layer_loss_.clear();
    this->pipe_inputs_.reset();
    this->pipe_outputs_.reset();
  }

  std::string Pipeline::PipelineString() {
    std::string result = "";
    for (int i = 0; i < stages_.size(); ++ i) {
      if (i != (stages_.size() - 1)) {
        result += stages_[i]->StageString() + " -> ";
      } else {
        result += stages_[i]->StageString();
      }
    }

    return result;
  }

  void Pipeline::Init() {
    bool conv_exists = false;  // Used to identify whether a convolution layer is found.

    // Obtain the macro_batch_size_.
    if (this->direction_ == Direction::FORWARD) {
      CHECK_GT(end_layer_idx_, start_layer_idx_);
      for (int i = start_layer_idx_; i <= end_layer_idx_; ++ i) {
        // Find the largest parallel degree for all convolution layers in this pipeline.
        if (strcmp(this->net_->layers()[i]->type(), "Convolution") == 0) {
          Type ftype = this->net_->layers()[i]->forward_type();
          Type btype = this->net_->layers()[i]->backward_type();

          if (ftype == FLOAT) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else {
              LOG(FATAL) << "Invalid type.";
            }
          } else if (ftype == FLOAT16) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else {
              LOG(FATAL) << "Invalid btype.";
            }
          } else if (ftype == DOUBLE) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetFWParallelDegree());
            } else {
              LOG(FATAL) << "Invalid btype";
            }
          } else {
            LOG(FATAL) << "Invalid ftype.";
          }
        }
      }

      if (!conv_exists) {
        macro_batch_size_ = this->batch_size_ >> 1;
      }
    } else {
      CHECK_GT(start_layer_idx_, end_layer_idx_);
      for (int i = start_layer_idx_; i >= end_layer_idx_; -- i) {
        if (strcmp(this->net_->layers()[i]->type(), "Convolution") == 0) {
          Type ftype = this->net_->layers()[i]->layer_param().forward_type();
          Type btype = this->net_->layers()[i]->layer_param().backward_type();

          if (ftype == FLOAT) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
             } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else {
              LOG(FATAL) << "Invalid type.";
            }
          } else if (ftype == FLOAT16) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<float16, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<float16, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else {
              LOG(FATAL) << "Invalid btype.";
            }
          } else if (ftype == DOUBLE) {
            if (btype == FLOAT) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, float>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, float>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else if (btype == FLOAT16) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, float16>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, float16>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else if (btype == DOUBLE) {
              conv_exists = true;
              shared_ptr<ConvolutionLayer<double, double>> conv_layer =
                boost::static_pointer_cast<ConvolutionLayer<double, double>>(this->net_->layers()[i]);
              macro_batch_size_ = std::max(macro_batch_size_, conv_layer->GetBPParallelDegree());
            } else {
              LOG(FATAL) << "Invalid btype";
            }
          } else {
            LOG(FATAL) << "Invalid ftype.";
          }
        }
      }

      if (!conv_exists) {
        macro_batch_size_ = this->batch_size_ >> 1;
      }
    }

    InitStages();               // Used to intialize the pipeline stages.

    LOG(INFO) << "Pipeline initialzied: " << this->PipelineString();
  }

  void Pipeline::InitStages() {
    std::vector<int> conv_idx;

    // Find all convolution layers.
    if (this->direction_ == Direction::FORWARD) {
      for (int i = start_layer_idx_; i <= end_layer_idx_; ++ i) {
        if (strcmp(this->net_->layers()[i]->type(), "Convolution") == 0) {
          conv_idx.push_back(i);
        }
      }
    } else {
      for (int i = start_layer_idx_; i >= end_layer_idx_; -- i) {
        if (strcmp(this->net_->layers()[i]->type(), "Convolution") == 0) {
          conv_idx.push_back(i);
        }
      }
    }

    // Initialize all pipeline stages.
    if (conv_idx.empty()) {
      auto mid_layer_idx = ((start_layer_idx_ + end_layer_idx_) >> 1);
      stages_.push_back(boost::make_shared<PipeStage>(net_, 0, start_layer_idx_, mid_layer_idx,
                                                      batch_size_, direction_));
      stages_.push_back(boost::make_shared<PipeStage>(net_, (mid_layer_idx - start_layer_idx_ + 1),
                                                      mid_layer_idx + 1, end_layer_idx_,
                                                      batch_size_, direction_));
    } else {
      if (start_layer_idx_ < conv_idx[0]) {
        stages_.push_back(boost::make_shared<PipeStage>(net_, 0, start_layer_idx_, conv_idx[0] - 1,
                                                        batch_size_, direction_));
      }
      for (int i = 0; i < conv_idx.size(); ++ i) {
        stages_.push_back(boost::make_shared<PipeStage>(net_, (conv_idx[i] - start_layer_idx_),
                                                        conv_idx[i], conv_idx[i],
                                                        batch_size_, direction_));
        if (i != (conv_idx.size() - 1)) {
          if (conv_idx[i] != conv_idx[i + 1]) {
            stages_.push_back(boost::make_shared<PipeStage>(net_, (conv_idx[i] + 1 - start_layer_idx_),
                                                            conv_idx[i] + 1, conv_idx[i + 1] - 1,
                                                            batch_size_, direction_));
          }
        }
      }
      if (end_layer_idx_ > conv_idx.back()) {
        stages_.push_back(boost::make_shared<PipeStage>(net_, (conv_idx.back() + 1 - start_layer_idx_),
                                                        conv_idx.back() + 1, end_layer_idx_,
                                                        batch_size_, direction_));
      }
    }

    // Initialize the input vector and the output vector of each stage.
    stages_[0]->stage_inputs_ = this->pipe_inputs_;
    for (int s = 1; s < stages_.size(); ++ s) {
      stages_[s]->stage_inputs_ = boost::make_shared<BlockingQueue<BatchBoundary_t>>();

      stages_[s - 1]->next_stage_ = stages_[s];
      stages_[s - 1]->stage_outputs_ = stages_[s]->stage_inputs_;
    }
    stages_.back()->next_stage_ = nullptr;
    stages_.back()->stage_outputs_ = this->pipe_outputs_;
  }

  void Pipeline::InitPipeInputs() {
    for (int i = 0; i < this->batch_size_; i += macro_batch_size_) {
      BatchBoundary_t temp;
      temp.start_idx_ = i;
      temp.end_idx_ = ((i + macro_batch_size_ - 1) >= batch_size_) ? (batch_size_ - 1) :
        (i + macro_batch_size_ - 1);
      pipe_inputs_->push(temp);
    }
  }
} // namespace caffe

#endif  // Pipeline
