#include <ATen/ATen.h>
#include <algorithm>
#include <cmath>
#include <immintrin.h>
#include <iostream>
#include <limits>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <torch/extension.h>
#include <vector>

// Uncomment for ISPC
#include "module_ispc.h"
using namespace ispc;

// ------------------------------------ //
// 	WARM-UP: ACCESSING TENSORS      //
// ------------------------------------ //

// Step #1: Understand Read/Write Accessors for a 2D Tensor
inline float twoDimRead(std::vector<float> &tensor, int &x, int &y,
                        const int &sizeX) {
  // Note that sizeX is the size of a Row, not the number of rows
  return tensor[x * (sizeX) + y];
}

inline void twoDimWrite(std::vector<float> &tensor, int &x, int &y,
                        const int &sizeX, float &val) {
  tensor[x * (sizeX) + y] = val;
}

// Step #2: Implement Read/Write Accessors for a 4D Tensor
inline float fourDimRead(std::vector<float> &tensor, int &x, int &y, int &z,
                         int &b, const int &sizeX, const int &sizeY,
                         const int &sizeZ) {
  return tensor[(((x * (sizeX) + y) * sizeY + z) * sizeZ) + b];
}

inline void fourDimWrite(std::vector<float> &tensor, int &x, int &y, int &z,
                         int &b, const int &sizeX, const int &sizeY,
                         const int &sizeZ, float &val) {
  tensor[(((x * (sizeX) + y) * sizeY + z) * sizeZ) + b] = val;
}

// DO NOT EDIT THIS FUNCTION //
std::vector<float> formatTensor(torch::Tensor tensor) {
  tensor = tensor.flatten();
  tensor = tensor.contiguous();
  std::vector<float> vec(tensor.data_ptr<float>(),
                         tensor.data_ptr<float>() + tensor.numel());
  return vec;
}

/* Programming Your Attention Modules.
 *
 * You are given Q, K, and V Tensors as inputs that are formatted as vectors. We
 * have also created O and QK^t Tensors that are formatted as vectors. After you
 * have implemented your accessors in the Warm-Up you should be able to
 * read/write to these tensors via the read/write functions above.
 *
 * You are also given 4 integers as parameters: B, H, N, d:
 *
 * B (Batch Size) - The number of samples for your attention layer. Think of it
 * this way - if I asked my dnn a question and it output 5 different answers it
 * had a batch size of 5. These samples are independent of each other and thus
 * can be parallelized.
 *
 * H (Number of Heads) - Each head runs on its own set of Q, K, V matrices. This
 * effectively allows each head to operate the same attention algorithm, but
 * each with each head using different hyperparameters. These allow each head to
 * have their own definition of what relevance is when looking at a token. These
 * heads can operate independently of one another and thus can be parallized.
 *
 * N (Sequence Length) - The number of tokens. You may think of this as the
 * number of words in a sample.
 *
 * d (Embedding Dimensionality) - The number of features each token encodes per
 * attention head. Let's say I encoded a word using the follow (length, number
 * of vowels, has a capital letters). The emvedded dimensionaliy would be 3.
 * */

// ---------------------------------------------------------- //
//                  PART 1: NAIVE ATTENTION                   //
// ---------------------------------------------------------- //

const bool SAFE_SOFTMAX = 1;
torch::Tensor myNaiveAttention(torch::Tensor QTensor, torch::Tensor KTensor,
                               torch::Tensor VTensor, torch::Tensor QK_tTensor,
                               int B, int H, int N, int d) {

  // Q, K, V are passed in with Shape: (B, H, N, d)
  // QK^t Intermediate Tensor has Shape (N, N)

  // Make O Tensor with Shape (B, H, N, d)
  at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

  // Format O, Q, K, and V tensors into 4D vectors
  std::vector<float> O = formatTensor(OTensor);
  std::vector<float> Q = formatTensor(QTensor);
  std::vector<float> K = formatTensor(KTensor);
  std::vector<float> V = formatTensor(VTensor);

  // Format QK_t Tensor into a 2D vector.
  std::vector<float> QK_t = formatTensor(QK_tTensor);

  /* Here is an example of how to read/write 0's to  Q (B, H, N, d) using the 4D
     accessors

      //loop over Batch Size
       for (int b = 0; b < B; b++) {

           //loop over Heads
           for (int h = 0; h < H; h++) {

               //loop over Sequence Length
               for (int i = 0; i < N; i++) {

                   //loop over Embedding Dimensionality
                   for (int j = 0; j < d; j++) {
                      float val = fourDimRead(Q, b, h, i, j, H, N, d);
                      val = 0.f;
                      fourDimWrite(Q, b, h, i, j, H, N, d, val);
                   }
               }
           }
       }
  */

  /* Here is an example of how to read/write 0's to  QK_t (N, N) using the 2D
     accessors

         for (int i = 0; i < N; i++) {
             for (int j = 0; j < N; j++) {
                 float val = twoDimRead(QK_t, i, j, N);
             val = 0.f;
                 twoDimWrite(QK_t, i, j, N, val);
           }
       }
  */

  // -------- YOUR CODE HERE  -------- //
  // batch
  for (int b = 0; b < B; b++) {
    // head
    for (int h = 0; h < H; h++) {

      // step 1
      for (int i = 0; i < N; i++) {
        for (int j = 0; j < N; j++) {
          float sum = 0.f;
          for (int k = 0; k < d; k++) {
            sum += fourDimRead(Q, b, h, i, k, H, N, d) *
                   fourDimRead(K, b, h, j, k, H, N, d);
          }
          twoDimWrite(QK_t, i, j, N, sum);
        }
      }

      // step 2
      std::vector<float> row(N, 0.f);
      for (int i = 0; i < N; i++) {
        std::fill(row.begin(), row.end(), 0.f);
        float expo_sum{0.f}, max_val{0.f};
        if (SAFE_SOFTMAX) {
          max_val = std::numeric_limits<float>::min();
          for (int j = 0; j < N; j++) {
            if (float sample = twoDimRead(QK_t, i, j, N) > max_val) {
              max_val = sample;
            }
          }
        }
        for (int j = 0; j < N; j++) {
          float expo = std::exp(twoDimRead(QK_t, i, j, N) - max_val);
          expo_sum += expo;
          row[j] = expo;
        }
        for (int k = 0; k < N; k++) {
          float val = row[k] / expo_sum;
          twoDimWrite(QK_t, i, k, N, val);
        }
      }

      // step 3
      for (int i = 0; i < N; i++) {
        for (int j = 0; j < d; j++) {
          float sum = 0.f;
          for (int k = 0; k < N; k++) {
            sum +=
                twoDimRead(QK_t, i, k, N) * fourDimRead(V, b, h, k, j, H, N, d);
          }
          fourDimWrite(O, b, h, i, j, H, N, d, sum);
        }
      }
    }
  }
  // DO NOT EDIT THIS RETURN STATEMENT //
  // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and
  // returns it //
  return torch::from_blob(O.data(), {B, H, N, d},
                          torch::TensorOptions().dtype(torch::kFloat32))
      .clone();
}

// ---------------------------------------------------------- //
//     PART 2: BLOCKED MATRIX MULTIPLY AND UNFUSED SOFTMAX    //
// ---------------------------------------------------------- //

torch::Tensor myUnfusedAttentionBlocked(torch::Tensor QTensor,
                                        torch::Tensor KTensor,
                                        torch::Tensor VTensor,
                                        torch::Tensor QK_tTensor, int B, int H,
                                        int N, int d) {

  // Q, K, V are passed in with Shape: (B, H, N, d)
  // QK^t Intermediate Tensor has Shape (N, N)

  // Make O Tensor with Shape (B, H, N, d)
  at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

  // Format O, Q, K, and V tensors into 4D vectors
  std::vector<float> O = formatTensor(OTensor);
  std::vector<float> Q = formatTensor(QTensor);
  std::vector<float> K = formatTensor(KTensor);
  std::vector<float> V = formatTensor(VTensor);

  // Format QK_t Tensor into a 2D vector.
  std::vector<float> QK_t = formatTensor(QK_tTensor);

  // -------- YOUR CODE HERE  -------- //
  // batch
  const int block = 8;
  float sum = 0.f;

  for (int b = 0; b < B; b++) {
    // head
    for (int h = 0; h < H; h++) {

      std::fill(QK_t.begin(), QK_t.end(), 0.f);

      // step 1 block mat mul
      for (int ii = 0; ii < N; ii += block) {
        uint i_bound = std::min(ii + block, N);
        for (int jj = 0; jj < N; jj += block) {
          uint j_bound = std::min(jj + block, N);
          for (int kk = 0; kk < d; kk += block) {
            uint k_bound = std::min(kk + block, d);
            for (int i = ii; i < i_bound; i++) {
              for (int j = jj; j < j_bound; j++) {
                sum = twoDimRead(QK_t, i, j, N);
                for (int k = kk; k < k_bound; k++) {
                  sum += fourDimRead(Q, b, h, i, k, H, N, d) *
                         fourDimRead(K, b, h, j, k, H, N, d);
                }
                twoDimWrite(QK_t, i, j, N, sum);
              }
            }
          }
        }
      }

      // step 2 SOFTMAX
      std::vector<float> row(N, 0.f);
      for (int i = 0; i < N; i++) {
        std::fill(row.begin(), row.end(), 0.f);
        float expo_sum{0.f}, max_val{0};
        if (SAFE_SOFTMAX) {
          max_val = std::numeric_limits<float>::min();
          for (int j = 0; j < N; j++) {
            if (float sample = twoDimRead(QK_t, i, j, N) > max_val) {
              max_val = sample;
            }
          }
        }

        for (int j = 0; j < N; j++) {
          float expo = std::exp(twoDimRead(QK_t, i, j, N) - max_val);
          expo_sum += expo;
          row[j] = expo;
        }
        for (int k = 0; k < N; k++) {
          float val = row[k] / expo_sum;
          twoDimWrite(QK_t, i, k, N, val);
        }
      }

      // step 3 block mat mul
      for (int ii = 0; ii < N; ii += block) {
        uint i_bound = std::min(ii + block, N);
        for (int jj = 0; jj < d; jj += block) {
          uint j_bound = std::min(jj + block, d);
          for (int kk = 0; kk < N; kk += block) {
            uint k_bound = std::min(kk + block, N);
            for (int i = ii; i < i_bound; i++) {
              for (int j = jj; j < j_bound; j++) {
                sum = fourDimRead(O, b, h, i, j, H, N, d);
                for (int k = kk; k < k_bound; k++) {
                  sum += twoDimRead(QK_t, i, k, N) *
                         fourDimRead(V, b, h, k, j, H, N, d);
                }
                fourDimWrite(O, b, h, i, j, H, N, d, sum);
              }
            }
          }
        }
      }
    }
  }
  // DO NOT EDIT THIS RETURN STATEMENT //
  // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and
  // returns it //
  return torch::from_blob(O.data(), {B, H, N, d},
                          torch::TensorOptions().dtype(torch::kFloat32))
      .clone();
}

// ---------------------------------------------------------- //
//                 PART 3: FUSED ATTENTION     	              //
// ---------------------------------------------------------- //

torch::Tensor myFusedAttention(torch::Tensor QTensor, torch::Tensor KTensor,
                               torch::Tensor VTensor, torch::Tensor temp, int B,
                               int H, int N, int d) {

  // Q, K, V are passed in with Shape: (B, H, N, d)

  // Make O Tensor with Shape (B, H, N, d)
  // and O Row Tensor with Shape (N)
  at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);
  at::Tensor ORowTensor = at::zeros({N}, at::kFloat);

  // Format Y, Q, K, and V tensors into 4D vectors
  std::vector<float> O = formatTensor(OTensor);
  std::vector<float> Q = formatTensor(QTensor);
  std::vector<float> K = formatTensor(KTensor);
  std::vector<float> V = formatTensor(VTensor);

  // Format ORow Tensor into a 1D vector
  //  You can simply access this as ORow[i]
  std::vector<float> ORow = formatTensor(ORowTensor);
  const int block = 8;

// -------- YOUR CODE HERE  -------- //
// We give you a template of the first three loops for your convenience
#pragma omp parallel for collapse(3)
  // loop over batch
  for (int b = 0; b < B; b++) {

    // loop over heads
    for (int h = 0; h < H; h++) {

      for (int i = 0; i < N; i++) {

        // YRow is moved inside so each OpenMP thread gets a local copy.
        at::Tensor ORowTensor = temp.index({torch::indexing::Slice(
            omp_get_thread_num(), torch::indexing::None)});
        std::vector<float> ORow = formatTensor(ORowTensor);
        std::fill(ORow.begin(), ORow.end(), 0.f);
        // YOUR CODE HERE

        float sum;

        // mat mul
        for (int jj = 0; jj < N; jj += block) {
          uint j_bound = std::min(jj + block, N);
          for (int kk = 0; kk < N; kk += block) {
            uint k_bound = std::min(kk + block, d);
            for (int j = jj; j < j_bound; j++) {
              sum = ORow[j];
              for (int k = kk; k < k_bound; k++) {
                sum += fourDimRead(Q, b, h, i, k, H, N, d) *
                       fourDimRead(K, b, h, j, k, H, N, d);
              }
              ORow[j] = sum;
            }
          }
        }
        // SOFTMAX
        sum = 0.f;
        float max_val = 0.f;
        if (SAFE_SOFTMAX) {
          max_val = std::numeric_limits<float>::min();
          for (int j = 0; j < N; j++) {
            if (float sample = ORow[j] > max_val) {
              max_val = sample;
            }
          }
        }
        for (int j = 0; j < N; j++) {
          sum += std::exp(ORow[j] - max_val);
        }
        for (int j = 0; j < N; j++) {
          ORow[j] = std::exp(ORow[j] - max_val) / sum;
        }

        // mat mul
        for (int jj = 0; jj < d; jj += block) {
          uint j_bound = std::min(jj + block, d);
          for (int kk = 0; kk < N; kk += block) {
            uint k_bound = std::min(kk + block, N);
            for (int j = jj; j < j_bound; j++) {
              sum = fourDimRead(O, b, h, i, j, H, N, d);
              for (int k = kk; k < k_bound; k++) {
                sum += fourDimRead(V, b, h, k, j, H, N, d) * ORow[k];
              }
              fourDimWrite(O, b, h, i, j, H, N, d, sum);
            }
          }
        }
      }
    }
  }

  // DO NOT EDIT THIS RETURN STATEMENT //
  // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and
  // returns it //
  return torch::from_blob(O.data(), {B, H, N, d},
                          torch::TensorOptions().dtype(torch::kFloat32))
      .clone();
}

// ---------------------------------------------------------- //
//                PART 4: FLASH ATTENTION 		      //
// ---------------------------------------------------------- //

torch::Tensor myFlashAttention(torch::Tensor QTensor, torch::Tensor KTensor,
                               torch::Tensor VTensor, torch::Tensor QiTensor,
                               torch::Tensor KjTensor, torch::Tensor VjTensor,
                               torch::Tensor SijTensor, torch::Tensor PijTensor,
                               torch::Tensor PVTensor, torch::Tensor OiTensor,
                               torch::Tensor LTensor, torch::Tensor LiTensor,
                               torch::Tensor LijTensor,
                               torch::Tensor LnewTensor, int Bc, int Br, int B,
                               int H, int N, int d) {

  // Q, K, V are passed in with Shape: (B, H, N, d)
  // Sij, Pij are passed in with Shape: (Br, Bc)
  // Kj, Vj are passed in with Shape: (Bc, d)
  // Qi, Oi, and PV  are passed in with Shape: (Br, d)
  // L in passed in with Shape: (N)
  // Li, Lij, and Lnew are passed in with shape (Br)

  // Make O Tensor with Shape (B, H, N, d)
  at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

  // Format All Tensors into Vectors
  std::vector<float> O = formatTensor(OTensor);
  std::vector<float> Q = formatTensor(QTensor);
  std::vector<float> K = formatTensor(KTensor);
  std::vector<float> V = formatTensor(VTensor);
  std::vector<float> Sij = formatTensor(SijTensor);
  std::vector<float> Pij = formatTensor(PijTensor);
  std::vector<float> Kj = formatTensor(KjTensor);
  std::vector<float> Vj = formatTensor(VjTensor);
  std::vector<float> Qi = formatTensor(QiTensor);
  std::vector<float> Oi = formatTensor(OiTensor);
  std::vector<float> l = formatTensor(LTensor);
  std::vector<float> PV = formatTensor(PVTensor);
  std::vector<float> li = formatTensor(LiTensor);
  std::vector<float> lij = formatTensor(LijTensor);
  std::vector<float> lnew = formatTensor(LnewTensor);

  // -------- YOUR CODE HERE  -------- //
  for (int b = 0; b < B; b++) {
    for (int h = 0; h < H; h++) {
      int base = b * H * N * d + h * N * d;
      std::fill(l.begin(), l.end(), 0.f);

      for (int jj = 0; jj < N; jj += Bc) {
        uint j_block = std::min(Bc, N - jj);

        for (int j = 0; j < j_block; j++) {
          for (int k = 0; k < d; k++) {
            Kj[j * d + k] = K[base + (jj + j) * d + k];
            Vj[j * d + k] = V[base + (jj + j) * d + k];
          }
        }

        for (int ii = 0; ii < N; ii += Br) {
          uint i_block = std::min(Br, N - ii);

          // read block
          for (int i = 0; i < i_block; i++) {
            li[i] = l[ii + i];
            for (int k = 0; k < d; k++) {
              Qi[i * d + k] = Q[base + (ii + i) * d + k];
              Oi[i * d + k] = O[base + (ii + i) * d + k];
            }
          }

          // compute Sij
          for (int i = 0; i < i_block; i++) {
            for (int j = 0; j < j_block; j++) {
              float sum = 0.f;
              for (int k = 0; k < d; k++) {
                sum += Qi[i * d + k] * Kj[j * d + k];
              }
              Sij[i * j_block + j] = sum;
            }
          }

          // compute Pij
          for (int i = 0; i < i_block; i++) {
            for (int j = 0; j < j_block; j++) {
              Pij[i * j_block + j] = std::exp(Sij[i * j_block + j]);
            }
          }

          // Rowsum
          for (int i = 0; i < i_block; i++) {
            float sum = 0.f;
            for (int j = 0; j < j_block; j++) {
              sum += Pij[i * j_block + j];
            }
            lij[i] = sum;
          }

          // update lblock
          for (int i = 0; i < i_block; i++) {
            lnew[i] = li[i] + lij[i];
          }

          // update softmax block
          for (int i = 0; i < i_block; i++) {
            for (int k = 0; k < d; k++) {
              float sum = 0.f;
              for (int j = 0; j < j_block; j++) {
                sum += Pij[i * j_block + j] * Vj[j * d + k];
              }
              Oi[i * d + k] = (li[i] * Oi[i * d + k] + sum) / lnew[i];
            }
          }

          // writeback
          for (int i = 0; i < i_block; i++) {
            for (int k = 0; k < d; k++) {
              O[base + (ii + i) * d + k] = Oi[i * d + k];
            }
            l[ii + i] = lnew[i];
          }
        }
      }
    }
  }

  // DO NOT EDIT THIS RETURN STATEMENT //
  // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and
  // returns it //
  return torch::from_blob(O.data(), {B, H, N, d},
                          torch::TensorOptions().dtype(torch::kFloat32))
      .clone();
}

/* DO NOT EDIT THESE BINDINGS */
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("myNaiveAttention", &myNaiveAttention, "Naive Attention");
  m.def("myUnfusedAttentionBlocked", &myUnfusedAttentionBlocked,
        " Blocked Unfused Attention");
  m.def("myFusedAttention", &myFusedAttention, "Fused Attention");
  m.def("myFlashAttention", &myFlashAttention, "Flash Attention");
  m.def("twoDimRead", &twoDimRead, "twoDimRead");
  m.def("fourDimRead", &fourDimRead, "fourDimRead");
}
