#ifndef SRC_MPI_CIPHER_3D
#define SRC_MPI_CIPHER_3D

#include "mpi/cipher_mpi_base.hpp"

template <typename Scheme, typename ExecOutput>
class cipher_3d : public cipher_mpi_base<Scheme, ExecOutput> {
protected:
  /* @brief     Illustrates `AXIS`
   * @details   Consider pgrid_index[0] = M-idx,
   *                     pgrid_index[1] = N-idx,
   *                     pgrid_index[2] = K-idx,
   * Fixed: N = 0
   * +----> K                              +----------+----------+
   * |                                     |          |          |
   * |     +----------+----------+         | P(0,1,0) | P(0,1,1) |
   * v     |          |          |         |          |          |
   * M     | P(0,0,0) | P(0,0,1) |         +----------+----------+
   *       |          |          |         |          |          |
   *       +----------+----------+    N    | P(0,0,0) | P(0,0,1) |
   *       |          |          |    ^    |          |          |
   *       | P(1,0,0) | P(1,0,1) |    |    +----------+----------+
   *       |          |          |    |
   *       +---------------------+    +----> K
   *                                  Fixed: M = 0
   **/
  enum AXIS { M, N, K, AXIS_NUM };

  struct metric : public cipher_mpi_base<Scheme, ExecOutput>::metric {
    // * exec_time
    //   * broadcast
    //   * local cgemm
    //   * reduction
    double cbcast_time = 0.0;
    double serialize_time = 0.0;
    double cgemm_time = 0.0;
    double cgemm_ra_time = 0.0;
    double creduce_ra_time = 0.0;
    double cgemm_shift_time = 0.0;
    double cgather_sc_time = 0.0;
    double cgemm_compute_time = 0.0;
    double creduce_time = 0.0;

    void clear() override {
      cbcast_time = 0.0;
      serialize_time = 0.0;
      cgemm_time = 0.0;
      cgemm_ra_time = 0.0;
      creduce_ra_time = 0.0;
      cgemm_shift_time = 0.0;
      cgather_sc_time = 0.0;
      cgemm_compute_time = 0.0;
      creduce_time = 0.0;
      cipher_mpi_base<Scheme, ExecOutput>::metric::clear();
    }
  };

private:
  cipher_3d() = delete;

protected:
  int index_to_rank_exec(const int pgrid_index[AXIS_NUM] = nullptr);

public:
  cipher_3d(int root, MPI_Comm comm, long m, long n, long k, bool fhe4d = false,
            int mp = 0, int np = 0, int kp = 0, int opt = 0);

  long mp() const { return pgrid_size_[M]; }
  long np() const { return pgrid_size_[N]; }
  long kp() const { return pgrid_size_[K]; }

  long bvm() const { return bv_[M]; }
  long bvn() const { return bv_[N]; }
  long bvk() const { return bv_[K]; }

  long block_m() const { return blk_size_[M]; }
  long block_n() const { return blk_size_[N]; }
  long block_k() const { return blk_size_[K]; }

  void report(std::ostream &out) const override;

  void clear() override { stat_.clear(); }

protected:
  int np_ = 0;
  int rp_ = 0;
  long dims_[AXIS_NUM] = {0, 0, 0};
  long bv_[AXIS_NUM] = {0, 0, 0};
  long blk_size_[AXIS_NUM] = {0, 0, 0};

  metric stat_;

  int pgrid_size_[AXIS_NUM];
  int pgrid_index_[AXIS_NUM];
  MPI_Comm comm_bcast_A_;
  MPI_Comm comm_bcast_B_;
  MPI_Comm comm_reduce_C_;
};

//* template specialization
// for single-ciphertext version
template class cipher_3d<helib::BGV, std::string>;
// for multi-ciphertext version
template class cipher_3d<helib::BGV, std::vector<std::string>>;

// * shhmm_3d
//   * s      : single-ciphertext
//   * h      : hypercube packing
//   * hmm    : homomorphic matrix multiplication
//   * 3d     : 3D PGEMM algorithm
// * only BGV supports hypercube packing
class shhmm_3d : public cipher_3d<helib::BGV, std::string> {
private:
  template <typename T>
  void init_data_p2p(int root, int rank, MPI_Comm comm, std::string &buf,
                     const T *mat, AXIS X, AXIS Y);

public:
  shhmm_3d(const params<helib::BGV> &params,
           const hmm_status<shmm_engine> &status, int root, MPI_Comm comm,
           long m, long n, long k, bool fhe4d = false, int mp = 0, int np = 0,
           int kp = 0);

  void init_data(const int *A, const int *B) override;

  void collect_data(const std::string &partial_C,
                    std::vector<NTL::mat_ZZ> &result,
                    bool local = false) override;

  void exec(std::string &partial_C, bool fhe4d = false) override;
  void exec_base(std::string &partial_C);
  void exec_4d(std::string &partial_C);

  //* hypercube HMM engine: hmm_status<hypercube_hmme>
  hmm_status<hypercube_hmme> *hmme_status() override {
    return static_cast<hypercube_hmme *>(hmm_engine_)->status();
  }
};

// * mhhmm_3d
//   * m      : multi-ciphertext
//   * h      : hypercube packing
//   * hmm    : homomorphic matrix multiplication
//   * 3d     : 3D PGEMM algorithm
// * only BGV supports hypercube packing
class mhhmm_3d : public cipher_3d<helib::BGV, std::vector<std::string>> {
private:
  typedef std::vector<std::vector<helib::Ctxt>> mat_ctxt;

  template <typename T>
  void init_data_p2p(int root, int rank, MPI_Comm comm,
                     std::vector<std::string> &buf, const T *mat, AXIS X,
                     AXIS Y);

  void stdblk(std::vector<helib::Ctxt> &C,
              const std::vector<std::vector<helib::Ctxt>> &A,
              const std::vector<std::vector<helib::Ctxt>> &B);
  void stdblk(std::vector<helib::Ctxt> &C,
              const std::vector<std::vector<helib::Ctxt>> &A,
              const std::vector<std::vector<helib::Ctxt>> &B, int m1, int m2,
              int k1, int k2, int n1, int n2);
  void strassen(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                const mat_ctxt &B, int m1, int m2, int k1, int k2, int n1,
                int n2);
  void strassen_seq(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                    const mat_ctxt &B, int m1, int m2, int k1, int k2, int n1,
                    int n2);
  void strassen_r(std::vector<helib::Ctxt> &C, const mat_ctxt &A,
                  const mat_ctxt &B, int m1, int m2, int k1, int k2, int n1,
                  int n2);

public:
  mhhmm_3d(const params<helib::BGV> &params,
           const hmm_status<shmm_engine> &status, int root, MPI_Comm comm,
           long m, long n, long k, long opt, bool fhe4d = false, int mp = 0,
           int np = 0, int kp = 0);

  void init_data(const int *A, const int *B) override;

  void collect_data(const std::vector<std::string> &partial_C,
                    std::vector<NTL::mat_ZZ> &result,
                    bool local = false) override;

  void exec(std::vector<std::string> &partial_C, bool fhe4d = false) override;
  void exec_base(std::vector<std::string> &partial_C);
  void exec_4d(std::vector<std::string> &partial_C);
  void exec_4d_shared(std::vector<std::string> &partial_C,
                      bool lagre_mem = false);
  void _exec_4d_shared(std::vector<std::string> &partial_C, bool lagre_mem);

  //* hypercube HMM engine: hmm_status<hypercube_hmme>
  hmm_status<hypercube_hmme> *hmme_status() override {
    return static_cast<hypercube_hmme *>(hmm_engine_)->status();
  }

  bool nest() const { return nested_; }
  void set_nest(bool val) { nested_ = val; }

  long mbb() const { return hmm_szie_[M]; }
  long nbb() const { return hmm_szie_[N]; }
  long kbb() const { return hmm_szie_[K]; }

  long m_nblk() const { return hmm_grid_size_[M]; }
  long n_nblk() const { return hmm_grid_size_[N]; }
  long k_nblk() const { return hmm_grid_size_[K]; }

private:
  bool nested_ = true;
  long hmm_grid_size_[AXIS_NUM];
  long hmm_szie_[AXIS_NUM];
};

#endif /* SRC_MPI_CIPHER_3D */
