#ifndef SRC_HYPERCUBE_HYPERCUBE_HMMSQ
#define SRC_HYPERCUBE_HYPERCUBE_HMMSQ
#include "h_mat_mul.hpp"
#include "helib_atm_wrapper.hpp"
#include "log_helper.hpp"
#include <array>

class hypercube2d_rotate_align_ch : public constant_handle {
public:
  hypercube2d_rotate_align_ch(const helib::EncryptedArray &ea)
      : constant_handle(ea) {
    ASSERT_PRINTF(ea_.dimension() == 2,
                  "Only support two-dimensional hypercube\n");
  }

  void init(hmm_metric_collector *collector) override;

  const helib::zzX &get_left(std::size_t i) const { return lefts_[i]; }
  const helib::zzX &get_right(std::size_t i) const { return rights_[i]; }
  const std::vector<helib::zzX> &get_lefts() const { return lefts_; }
  const std::vector<helib::zzX> &get_rights() const { return rights_; }
  const helib::zzX &get_correct_part_one() const { return correct_parts_[0]; }
  const helib::zzX &get_correct_part_two() const { return correct_parts_[1]; }

private:
  std::vector<helib::zzX> lefts_;
  std::vector<helib::zzX> rights_;

  std::array<helib::zzX, 2> correct_parts_;
};

class hypercube2d_shift_compute_ch : public constant_handle {
public:
  hypercube2d_shift_compute_ch(const helib::EncryptedArray &ea)
      : constant_handle(ea) {
    ASSERT_PRINTF(ea_.dimension() == 2,
                  "Only support two-dimensional hypercube\n");
  }

  //* This version reuses encoded plaintexts in RotateAlign
  void init(hmm_metric_collector *collector,
            const std::vector<helib::zzX> &cols);

  //! This version has been deprecated.
  //! The reason is in the function's internal comments
  void init(hmm_metric_collector *collector) override {
    hmmsq_collector *hmmsq = static_cast<hmmsq_collector *>(collector);
    ASSERT_PRINTF(hmmsq != nullptr,
                  "Failed to static_cast to `hmmsq_collector *`\n");

    const long h_row = ea_.sizeOfDimension(0);
    const long h_col = ea_.sizeOfDimension(1);
    const auto threads = std::min(threads_, static_cast<int>(h_row));

    //! Encoding in ShiftCompute (Correct) - Start
    correct_parts_.resize(h_row);
    // i: left shift offset
    // original           :  1   2  ... d-i d-i+1 d-i+2 ...
    // leftShift(i):=L    : i+1 i+2 ...  d    #     #   ... # # ...
    // rightShift(d-i):=R :  #   #  ...  #    1     2   ... i # ...
    // What we need       : i+1 i+2 ...  d    1     2   ... i # ...
    // slots1: select the first (dim - i) cols in `L` // [0, d-i-1]
    // slots2: Select col(d-i+1) to col(d) in `R`     // [d-i, d-1]

#if 0
#pragma omp parallel num_threads(threads)
    {
      auto tid = omp_get_thread_num();
#pragma omp for
      for (long i = 1; i < h_row; i++) {
        // i: left shift offset
        hmmsq->start_encoding_sc_raw2vec(tid); //! Raw 2 Vec - Start
        std::vector<long> slots1(ea_.size(), 0);
        std::vector<long> slots2(ea_.size(), 0);
        for (long ii = 0; ii < h_row; ii++) {
          for (long jj = 0; jj < h_row; jj++) {
            const long idx = ii * h_col + jj;
            jj < h_row - i ? slots1[idx] = 1l : slots2[idx] = 1l;
          }
        }
        hmmsq->stop_encoding_sc_raw2vec(tid); //! Raw 2 Vec - Completed

        hmmsq->start_encoding_sc_vec2ptxt(tid); //! Vec 2 Ptxt - Start
        //? why is encoding so slow here when m is large?
        //! Problems found on 2023.12.06:
        //! CRT_reconstruct is expensive when there are too many non-zero slots
        //! specifically, h_row^3 * \phi(m) times NTL::AddMod

        //! `helib::PtxtArray` workingaround
        // correct_parts_[i][0] = std::make_shared<helib::PtxtArray>(ea_);
        // correct_parts_[i][1] = std::make_shared<helib::PtxtArray>(ea_);
        // correct_parts_[i][0]->load(slots1);
        // correct_parts_[i][1]->load(slots2);

        ea_.encode(correct_parts_[i][0], slots1);
        ea_.encode(correct_parts_[i][1], slots2);
        hmmsq->stop_encoding_sc_vec2ptxt(tid); //! Vec 2 Ptxt - Completed
      }
    }
#else
    //* we compute on "encoded" plaintexts to avoid encoding full data
    std::vector<NTL::zz_pX> col_i(h_row);
#pragma omp parallel num_threads(threads)
    {
      auto tid = omp_get_thread_num();

      //!! `zz_pPush` must have a copy in each thread
      NTL::zz_pPush push(ea_.getPAlgebra().getP());
#pragma omp for
      //* Step 1
      // Generate a constant polynomial corresponding to only column i as 1
      //* h_row^2 * \phi(m) times NTL::AddMod
      for (long i = 0; i < h_row; i++) {
        hmmsq->start_encoding_sc_raw2vec(tid); //! Raw 2 Vec - Start
        std::vector<long> slots(ea_.size(), 0);
        for (long j = 0; j < h_row; j++) {
          slots[j * h_col + i] = 1l;
        }
        hmmsq->stop_encoding_sc_raw2vec(tid); //! Raw 2 Vec - Completed

        hmmsq->start_encoding_sc_vec2ptxt(tid); //! Vec 2 Ptxt - Start
        helib::zzX col;
        ea_.encode(col, slots);
        helib::convert(col_i[i], col);
        hmmsq->stop_encoding_sc_vec2ptxt(tid); //! Vec 2 Ptxt - Completed
      }

#pragma omp for
      //* Step 2
      // Using "after encoded" plaintexts to construct targets
      //* h_row^2 * \phi(m) times NTL::AddMod
      for (long i = 1; i < h_row; i++) {
        hmmsq->start_encoding_sc_re_ptxt(tid); //! Re/Ptxt - Start
        // See https://libntl.org/doc/lzz_pX.cpp.html
        // zz_pX(); // initial value 0
        NTL::zz_pX L, R;
        for (long j = 0; j < h_row; j++) {
          j < h_row - i ? L += col_i[j] : R += col_i[j];
        }
        correct_parts_[i][0] = helib::balanced_zzX(L);
        correct_parts_[i][1] = helib::balanced_zzX(R);
        hmmsq->stop_encoding_sc_re_ptxt(tid); //! Re/Ptxt - Stop
      }
    }
#endif
    //! Encoding in ShiftCompute (Correct) - Completed
  }

  const helib::zzX &get_correct_part_one(std::size_t i) const {
    return correct_parts_[i][0];
  }
  const helib::zzX &get_correct_part_two(std::size_t i) const {
    return correct_parts_[i][1];
  }

private:
  //! See `encode` templates in HElib:EncryptedArray.cpp
  //! We find `encode` for `helib::zzX` calls only `tab.embedInSlots()`
  //! without additional conversion.
  //! But such conversion has almost no impact on performance....
  // std::vector<std::array<NTL::ZZX, 2>> correct_parts_;

  //! `helib::PtxtArray` workingaround
  //! Using `helib::PtxtArray` will delay encoding into `EncodedPtxt`
  //! in `multiplyByConstant`, but such encoding is really required only once
  // clang-format off
  // // std::vector<std::array<std::shared_ptr<helib::PtxtArray>, 2>> correct_parts_;
  // clang-format on

  std::vector<std::array<helib::zzX, 2>> correct_parts_;
};

class hhmmsq_constant_multipler : public hmm_constant_multipler {
public:
  hhmmsq_constant_multipler(const helib::EncryptedArray &ea,
                            hmm_metric_collector *collector)
      : ea_(ea), racm_(new hypercube2d_rotate_align_ch(ea)),
        sccm_(new hypercube2d_shift_compute_ch(ea)) {
    collector_ = static_cast<hmmsq_collector *>(collector);
    ASSERT_PRINTF(collector_ != nullptr,
                  "Failed to static_cast to `hmmsq_collector *`\n");

    collector_->start_encoding_ra(); //! Encoding in RotateAlign - Start
    racm_->init(collector);
    collector_->stop_encoding_ra(); //! Encoding in RotateAlign - Completed

    collector_->start_encoding_sc(); //! Encoding in ShiftCompute - Start
    sccm_->init(collector, racm_->get_rights());
    collector_->stop_encoding_sc(); //! Encoding in ShiftCompute - Completed
  }

  ~hhmmsq_constant_multipler() override {
    delete racm_;
    delete sccm_;
  }

  void set_no_encoding_reuse(bool val) override { no_encoding_reuse_ = val; }

  void rotate_align_mask_left(helib::Ctxt &ctxt, std::size_t i) {
    if (no_encoding_reuse_) {
      collector_->start_no_encoding_reuse(omp_get_thread_num());
      const long h_nrow = ea_.sizeOfDimension(0);
      const long h_ncol = ea_.sizeOfDimension(1);
      helib::zzX poly;
      std::vector<long> slots(ea_.size(), 0);
      for (long j = 0; j < h_ncol; j++) {
        slots[i * h_ncol + j] = 1l;
      }
      ea_.encode(poly, slots);
      collector_->stop_no_encoding_reuse(omp_get_thread_num());
      ctxt.multByConstant(poly);
      return;
    }
    ctxt.multByConstant(racm_->get_left(i));
  }

  void rotate_align_mask_right(helib::Ctxt &ctxt, std::size_t i) {
    if (no_encoding_reuse_) {
      collector_->start_no_encoding_reuse(omp_get_thread_num());
      const long h_nrow = ea_.sizeOfDimension(0);
      const long h_ncol = ea_.sizeOfDimension(1);
      helib::zzX poly;
      std::vector<long> slots(ea_.size(), 0);
      for (long j = 0; j < h_nrow; j++) {
        slots[j * h_ncol + i] = 1l;
      }
      ea_.encode(poly, slots);
      collector_->stop_no_encoding_reuse(omp_get_thread_num());
      ctxt.multByConstant(poly);
      return;
    }
    ctxt.multByConstant(racm_->get_right(i));
  }

  void rotate_align_correct_mask(helib::Ctxt &c1, helib::Ctxt &c2) {
    if (no_encoding_reuse_) {
      collector_->start_no_encoding_reuse(omp_get_thread_num());
      const long h_nrow = ea_.sizeOfDimension(0);
      const long h_ncol = ea_.sizeOfDimension(1);
      std::vector<long> slots1(ea_.size(), 0);
      std::vector<long> slots2(ea_.size(), 0);
      for (long i = 0; i < h_nrow; i++) {
        for (long j = 0; j < h_ncol; j++) {
          if (i + j >= h_ncol) {
            slots1[i * h_ncol + j] = 1l;
          } else if (i + j < h_nrow) {
            slots2[i * h_ncol + j] = 1l;
          }
        }
      }
      helib::zzX poly1, poly2;
      ea_.encode(poly1, slots1);
      ea_.encode(poly2, slots2);
      collector_->stop_no_encoding_reuse(omp_get_thread_num());
      c1.multByConstant(poly1);
      c2.multByConstant(poly2);
      return;
    }
    c1.multByConstant(racm_->get_correct_part_one());
    c2.multByConstant(racm_->get_correct_part_two());
  }

  void shift_compute_correct_mask(helib::Ctxt &c1, helib::Ctxt &c2,
                                  std::size_t i) {
    if (no_encoding_reuse_) {
      collector_->start_no_encoding_reuse(omp_get_thread_num());
      const long h_nrow = ea_.sizeOfDimension(0);
      const long h_ncol = ea_.sizeOfDimension(1);
      std::vector<long> slots1(ea_.size(), 0);
      std::vector<long> slots2(ea_.size(), 0);
      for (long j = 0; j < h_nrow; j++) {
        for (long k = 0; k < h_nrow; k++) {
          j < h_nrow - i ? slots1[k * h_ncol + j] = 1l
                         : slots2[k * h_ncol + j] = 1l;
        }
      }
      helib::zzX poly1, poly2;
      ea_.encode(poly1, slots1);
      ea_.encode(poly2, slots2);
      collector_->stop_no_encoding_reuse(omp_get_thread_num());
      c1.multByConstant(poly1);
      c2.multByConstant(poly2);
      return;
    }
    c1.multByConstant(sccm_->get_correct_part_one(i));
    c2.multByConstant(sccm_->get_correct_part_two(i));
  }

private:
  const helib::EncryptedArray &ea_;
  hmmsq_collector *collector_ = nullptr;
  hypercube2d_rotate_align_ch *racm_ = nullptr;
  hypercube2d_shift_compute_ch *sccm_ = nullptr;
};

// * hhmm_sq
//   * h    : hypercube
//   * hmm  : homomorphic matrix multiplication
//   * sq   : sqaure matrix
//   + bsgs : baby step/gaint step algorithm
//   + mt   : multi-thread
void hhmm_sq(hmm_metric_collector *collector, hmm_constant_multipler *cm,
             const helib::EncryptedArray &ea, helib::Ctxt &AB,
             const helib::Ctxt &A, const helib::Ctxt &B);
void hhmm_sq_bsgs(hmm_metric_collector *collector, hmm_constant_multipler *cm,
                  const helib::EncryptedArray &ea, helib::Ctxt &AB,
                  const helib::Ctxt &A, const helib::Ctxt &B);

void hhmm_sq_mt(hmm_metric_collector *collector, hmm_constant_multipler *cm,
                int threads, const helib::EncryptedArray &ea, helib::Ctxt &AB,
                const helib::Ctxt &A, const helib::Ctxt &B);
void hhmm_sq_bsgs_mt(hmm_metric_collector *collector,
                     hmm_constant_multipler *cm, int threads,
                     const helib::EncryptedArray &ea, helib::Ctxt &AB,
                     const helib::Ctxt &A, const helib::Ctxt &B);
void hhmm_sq_bsgs_mt2(hmm_metric_collector *collector,
                      hmm_constant_multipler *cm, int threads,
                      const helib::EncryptedArray &ea, helib::Ctxt &AB,
                      const helib::Ctxt &A, const helib::Ctxt &B);

void hhmm_sq_rotate_align_A_batch(hmm_constant_multipler *cm,
                                  const helib::EncryptedArray &ea,
                                  int rot_pos_A, int rot_size_A,
                                  helib::Ctxt &Ap, const helib::Ctxt &A);
void hhmm_sq_rotate_align_B_batch(hmm_constant_multipler *cm,
                                  const helib::EncryptedArray &ea,
                                  int rot_pos_B, int rot_size_B,
                                  helib::Ctxt &Bp, const helib::Ctxt &B);
void hhmm_sq_shift_compute_A_batch(hmm_constant_multipler *cm,
                                   const helib::EncryptedArray &ea,
                                   int rot_pos_A, std::vector<helib::Ctxt> &Ais,
                                   const helib::Ctxt &A);
void hhmm_sq_shift_compute_B_batch(hmm_constant_multipler *cm,
                                   const helib::EncryptedArray &ea,
                                   int rot_pos_B, std::vector<helib::Ctxt> &Bis,
                                   const helib::Ctxt &B);

void hhmm_sq_rotate_align_A(hmm_constant_multipler *hmmsqcm,
                            const helib::EncryptedArray &ea, helib::Ctxt &Ap,
                            const helib::Ctxt &A);
void hhmm_sq_rotate_align_B(hmm_constant_multipler *hmmsqcm,
                            const helib::EncryptedArray &ea, helib::Ctxt &Bp,
                            const helib::Ctxt &B);
helib::Ctxt hhmm_sq_shift_compute_A(
    hmm_constant_multipler *cm, const helib::EncryptedArray &ea,
    const std::shared_ptr<helib::GeneralAutomorphPrecon_FULL> &hoisted_A,
    int r);

#endif /* SRC_HYPERCUBE_HYPERCUBE_HMMSQ */
