#include <torch/extension.h>
#include <ATen/ATen.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <immintrin.h>
#include <cmath>

// Uncomment for ISPC
// #include "module_ispc.h"
// using namespace ispc;

// ------------------------------------ //
// 	WARM-UP: ACCESSING TENSORS      //
// ------------------------------------ //

// Step #1: Understand Read/Write Accessors for a 2D Tensor
inline float twoDimRead(std::vector<float> &tensor, int &x, int &y, const int &sizeX)
{
    // Note that sizeX is the size of a Row, not the number of rows
    return tensor[x * (sizeX) + y];
}

inline void twoDimWrite(std::vector<float> &tensor, int &x, int &y, const int &sizeX, float &val)
{
    tensor[x * (sizeX) + y] = val;
}

// Step #2: Implement Read/Write Accessors for a 4D Tensor
inline float fourDimRead(std::vector<float> &tensor, int &x, int &y, int &z, int &b,
                         const int &sizeX, const int &sizeY, const int &sizeZ)
{
    // return tensor[
    //     b * (sizeX * sizeY * sizeZ) + z * (sizeX * sizeY) + y * sizeY + x
    // ];
    return tensor[x * (sizeX * sizeY * sizeZ) + y * (sizeY * sizeZ) + z * sizeZ + b];
}

inline void fourDimWrite(std::vector<float> &tensor, int &x, int &y, int &z, int &b,
                         const int &sizeX, const int &sizeY, const int &sizeZ, float &val)
{
    tensor[x * (sizeX * sizeY * sizeZ) + y * (sizeY * sizeZ) + z * sizeZ + b] = val;
}

inline void print_vector(const std::string &name, const std::vector<float> &tensor)
{
    std::cout << "name : " << name << " size : " << tensor.size();
    // for(int i = 0; i < tensor.size(); i ++)
    //     std::cout << tensor[i] << " ";
    std::cout << std::endl;
}

// DO NOT EDIT THIS FUNCTION //
std::vector<float> formatTensor(torch::Tensor tensor)
{
    tensor = tensor.flatten();
    tensor = tensor.contiguous();
    std::vector<float> vec(tensor.data_ptr<float>(), tensor.data_ptr<float>() + tensor.numel());
    return vec;
}

inline float myexp(float x)
{
    // e^x = 1 + x + x^2 / 2! + x^3 / 3! + x^4 / 4! + x^5 / 5!
    // float x2 = x * x;
    // float x3 = x2 * x;
    // float x4 = x2 * x2;
    // float x5 = x4 * x;

    // return 1 + x + x2 * 0.5 + x3 * 0.166667 + x4 * 0.0625 + x5 * 0.008333;
    return exp(x);
}

inline size_t get_cache_line_size() {
    FILE* fp = popen("getconf LEVEL1_DCACHE_LINESIZE", "r");
    if (fp == nullptr) {
        std::cerr << "Failed to execute getconf command." << std::endl;
        return 0;
    }

    size_t cache_line_size;
    fscanf(fp, "%zu", &cache_line_size);
    fclose(fp);

    return cache_line_size;
}

inline size_t get_l1_cache_size() {
    FILE* fp = popen("getconf LEVEL1_DCACHE_SIZE", "r");
    if (fp == nullptr) {
        std::cerr << "Failed to execute getconf command." << std::endl;
        return 0;
    }

    size_t cache_size;
    fscanf(fp, "%zu", &cache_size);
    fclose(fp);

    return cache_size;
}

/* Programming Your Attention Modules.
 *
 * You are given Q, K, and V Tensors as inputs that are formatted as vectors. We have also created O and QK^t Tensors
 * that are formatted as vectors. After you have implemented your accessors in the Warm-Up you should be able to
 * read/write to these tensors via the read/write functions above.
 *
 * You are also given 4 integers as parameters: B, H, N, d:
 *
 * B (Batch Size) - The number of samples for your attention layer. Think of it this way - if I asked my dnn
 * a question and it output 5 different answers it had a batch size of 5. These samples are independent of each
 * other and thus can be parallelized.
 *
 * H (Number of Heads) - Each head runs on its own set of Q, K, V matrices. This effectively allows each head
 * to operate the same attention algorithm, but each with each head using different hyperparameters. These
 * allow each head to have their own definition of what relevance is when looking at a token. These heads
 * can operate independently of one another and thus can be parallized.
 *
 * N (Sequence Length) - The number of tokens. You may think of this as the number of words in a sample.
 *
 * d (Embedding Dimensionality) - The number of features each token encodes per attention head. Let's
 * say I encoded a word using the follow (length, number of vowels, has a capital letters). The
 * emvedded dimensionaliy would be 3.
 * */

// ---------------------------------------------------------- //
//                  PART 1: NAIVE ATTENTION                   //
// ---------------------------------------------------------- //

torch::Tensor myNaiveAttention(torch::Tensor QTensor, torch::Tensor KTensor, torch::Tensor VTensor, torch::Tensor QK_tTensor,
                               int B, int H, int N, int d)
{

    // Q, K, V are passed in with Shape: (B, H, N, d)
    // QK^t Intermediate Tensor has Shape (N, N)

    // Make O Tensor with Shape (B, H, N, d)
    at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

    std::cout << "B : " << B << " H : " << H << " N : " << N << " d : " << d << std::endl;

    // Format O, Q, K, and V tensors into 4D vectors
    std::vector<float> O = formatTensor(OTensor);
    std::vector<float> Q = formatTensor(QTensor);
    std::vector<float> K = formatTensor(KTensor);
    std::vector<float> V = formatTensor(VTensor);

    // Format QK_t Tensor into a 2D vector.
    std::vector<float> QK_t = formatTensor(QK_tTensor);

    /* Here is an example of how to read/write 0's to  Q (B, H, N, d) using the 4D accessors

        //loop over Batch Size
         for (int b = 0; b < B; b++) {

             //loop over Heads
             for (int h = 0; h < H; h++) {

                 //loop over Sequence Length
                 for (int i = 0; i < N; i++) {

                     //loop over Embedding Dimensionality
                     for (int j = 0; j < d; j++) {
                        float val = fourDimRead(Q, b, h, i, j, H, N, d);
                        val = 0.0;
                        fourDimWrite(Q, b, h, i, j, H, N, d, val);
                     }
                 }
             }
         }
    */

    /* Here is an example of how to read/write 0's to  QK_t (N, N) using the 2D accessors

           for (int i = 0; i < N; i++) {
           for (int j = 0; j < N; j++) {
               float val = twoDimRead(QK_t, i, j, N);
               val = 0.0;
               twoDimWrite(QK_t, i, j, N, val);
             }
         }
    */

    // -------- YOUR CODE HERE  -------- //

    for (int b = 0; b < B; b++)
    {
        for (int h = 0; h < H; h++)
        {
            for (int i = 0; i < N; i++)
            {
                for (int j = 0; j < d; j++)
                {
                    float Q_val = fourDimRead(Q, b, h, i, j, H, N, d);
                    for (int k = 0; k < N; k++)
                    {
                        // QK_t[i][k] += Q[i][j] * KT_val[j][k]
                        // KT_val[j][k] = K[k][j]
                        float KT_val = fourDimRead(K, b, h, k, j, H, N, d);
                        float QKT_val = twoDimRead(QK_t, i, k, N);
                        float write_val = QKT_val + Q_val * KT_val;
                        twoDimWrite(QK_t, i, k, N, write_val);
                    }
                }
            }

            for (int i = 0; i < N; i++)
            {
                float l = 0.0f;
                for (int j = 0; j < N; j++)
                {
                    float s_ij = twoDimRead(QK_t, i, j, N);
                    float exp_s_ij = myexp(s_ij);
                    l += exp_s_ij;
                    twoDimWrite(QK_t, i, j, N, exp_s_ij);
                }

                float trans = 1 / l;
                for (int j = 0; j < N; j++)
                {
                    float exp_s_ij = twoDimRead(QK_t, i, j, N);
                    float write_val = exp_s_ij * trans;
                    twoDimWrite(QK_t, i, j, N, write_val);
                }
            }

             for (int i = 0; i < N; i++)
            {
                for (int j = 0; j < N; j++)
                {
                    float p_ij = twoDimRead(QK_t, i, j, N);
                    for (int k = 0; k < d; k++)
                    {
                        float v_jk = fourDimRead(V, b, h, j, k, H, N, d);
                        float o_ik = fourDimRead(O, b, h, i, k, H, N, d);
                        o_ik += p_ij * v_jk;
                        fourDimWrite(O, b, h, i, k, H, N, d, o_ik);
                    }
                }
            }
        }
    }

    // DO NOT EDIT THIS RETURN STATEMENT //
    // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and returns it //
    return torch::from_blob(O.data(), {B, H, N, d}, torch::TensorOptions().dtype(torch::kFloat32)).clone();
}

// ---------------------------------------------------------- //
//     PART 2: BLOCKED MATRIX MULTIPLY AND UNFUSED SOFTMAX    //
// ---------------------------------------------------------- //

torch::Tensor myUnfusedAttentionBlocked(torch::Tensor QTensor, torch::Tensor KTensor, torch::Tensor VTensor, torch::Tensor QK_tTensor,
                                        int B, int H, int N, int d)
{

    // Q, K, V are passed in with Shape: (B, H, N, d)
    // QK^t Intermediate Tensor has Shape (N, N)

    // Make O Tensor with Shape (B, H, N, d)
    at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

    // Format O, Q, K, and V tensors into 4D vectors
    std::vector<float> O = formatTensor(OTensor);
    std::vector<float> Q = formatTensor(QTensor);
    std::vector<float> K = formatTensor(KTensor);
    std::vector<float> V = formatTensor(VTensor);

    // Format QK_t Tensor into a 2D vector.
    std::vector<float> QK_t = formatTensor(QK_tTensor);

    // std::cout << "L1 Cache Size: " << get_l1_cache_size() << std::endl;
    // std::cout << "L1 Cache Line Size: " << get_cache_line_size() << std::endl;

    int bsize_H, bsize_W;
    bsize_H = get_cache_line_size() / 4;
    bsize_W = 64;
    // -------- YOUR CODE HERE  -------- //
    for(int b = 0; b < B; b ++)
    {
        for(int h = 0; h < H; h ++)
        {
            // S(i, j) = Q(i, k) * K(j, k)
            // std::fill(QK_t.begin(), QK_t.end(), 0.0);
            // int bsize_H, bsize_W;
            // bsize_H = get_cache_line_size() / 4;
            // bsize_W = get_l1_cache_size() / get_cache_line_size();
            for(int is = 0; is < N; is += bsize_H)
            {
                for(int js = 0; js < N; js += bsize_W)
                {
                    for(int ks = 0; ks < d; ks += bsize_W)
                    {
                        for(int i = is; i < std::min(is + bsize_H, N); i ++)
                        {
                            for(int j = js; j < std::min(js + bsize_W, N); j ++)
                            {
                                float val = twoDimRead(QK_t, i, j, N);
                                for(int k = ks; k < std::min(ks + bsize_W, d); k ++)
                                {
                                    val += fourDimRead(Q, b, h, i, k, H, N, d) * fourDimRead(K, b, h, j, k, H, N, d);
                                }
                                twoDimWrite(QK_t, i, j, N, val);
                            }
                        }
                    }
                }
            }

            for (int i = 0; i < N; i++)
            {
                float l = 0.0f;
                for (int j = 0; j < N; j++)
                {
                    float s_ij = twoDimRead(QK_t, i, j, N);
                    float exp_s_ij = myexp(s_ij);
                    l += exp_s_ij;
                    twoDimWrite(QK_t, i, j, N, exp_s_ij);
                }

                float trans = 1 / l;
                for (int j = 0; j < N; j++)
                {
                    float exp_s_ij = twoDimRead(QK_t, i, j, N);
                    float write_val = exp_s_ij * trans;
                    twoDimWrite(QK_t, i, j, N, write_val);
                }
            }

            // O(i, j) += P(i, k) * V(k, j)
            for(int is = 0; is < N; is += bsize_H)
            {
                for(int js = 0; js < d; js += bsize_W)
                {
                    for(int ks = 0; ks < N; ks += bsize_H)
                    {
                        for(int i = is; i < std::min(is + bsize_H, N); i ++)
                        {
                            for(int j = js; j < std::min(js + bsize_W, d); j ++)
                            {
                                float val = fourDimRead(O, b, h, i, j, H, N, d);
                                for(int k = ks; k < std::min(ks + bsize_H, N); k ++)
                                {
                                    val += twoDimRead(QK_t, i, k, N) * fourDimRead(V, b, h, k, j, H, N, d);
                                }
                                fourDimWrite(O, b, h, i, j, H, N, d, val);
                            }
                        }
                    }
                }
            }


        }
    }

    

    // DO NOT EDIT THIS RETURN STATEMENT //
    // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and returns it //
    return torch::from_blob(O.data(), {B, H, N, d}, torch::TensorOptions().dtype(torch::kFloat32)).clone();
}

// ---------------------------------------------------------- //
//                 PART 3: FUSED ATTENTION     	              //
// ---------------------------------------------------------- //

torch::Tensor myFusedAttention(torch::Tensor QTensor, torch::Tensor KTensor, torch::Tensor VTensor, torch::Tensor temp,
                               int B, int H, int N, int d)
{

    // Q, K, V are passed in with Shape: (B, H, N, d)

    // Make O Tensor with Shape (B, H, N, d)
    // and O Row Tensor with Shape (N)
    at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);
    at::Tensor ORowTensor = at::zeros({N}, at::kFloat);

    // Format Y, Q, K, and V tensors into 4D vectors
    std::vector<float> O = formatTensor(OTensor);
    std::vector<float> Q = formatTensor(QTensor);
    std::vector<float> K = formatTensor(KTensor);
    std::vector<float> V = formatTensor(VTensor);

    // Format ORow Tensor into a 1D vector
    //  You can simply access this as ORow[i]
    std::vector<float> ORow = formatTensor(ORowTensor);

    // -------- YOUR CODE HERE  -------- //
    // We give you a template of the first three loops for your convenience
    // loop over batch
    // fused 意味着只需要一个N * 1的向量保存中间结果
    // 一旦计算出了QK_t矩阵的某一行，我们实际上已经可以对该行进行 softmax 操作
    // 一旦该行经过 softmax 处理，我们就可以立即将其与V相乘，从而完全计算出注意力输出的第一行
    // 永远不需要生成整个 N×N 矩阵，因为该矩阵在网络中之后不会再使用

    #pragma omp parallel for collapse(3)
    for (int b = 0; b < B; b++)
    {
        // loop over heads
        for (int h = 0; h < H; h++)
        {
            for (int i = 0; i < N; i++)
            {
                // Every thread has a local vector of one row temp
                // std::vector<float> tempRow(N, 0);
                at::Tensor ORowTensor = temp.index({torch::indexing::Slice(omp_get_thread_num(), torch::indexing::None)});
                std::vector<float> tempRow = formatTensor(ORowTensor);
                float RowSum = 0.0f;

                for(int j = 0; j < N; j ++)
                {
                    for(int k = 0; k < d; k ++)
                    {
                        tempRow[j] += fourDimRead(Q, b, h, i, k, H, N, d) * fourDimRead(K, b, h, j, k, H, N, d);
                    }
                    RowSum += myexp(tempRow[j]);
                }

                // One Row SoftMax
                for(int j = 0; j < N; j ++)
                {
                    tempRow[j] = myexp(tempRow[j]) / RowSum;
                }

                // One Temp Row muliply V
                // O(i, j)
                for(int k = 0; k < d; k ++)
                {
                    float Oval = 0.0f;
                    for(int j = 0; j < N; j ++)
                    {
                        Oval += tempRow[j] * fourDimRead(V, b, h, j, k, H, N, d);
                    }
                    fourDimWrite(O, b, h, i, k, H, N, d, Oval);
                }

                // YRow is moved inside so each OpenMP thread gets a local copy.
                // at::Tensor ORowTensor = temp.index({torch::indexing::Slice(omp_get_thread_num(), torch::indexing::None)});
                // std::vector<float> ORow = formatTensor(ORowTensor);
                // YOUR CODE HERE
            }
        }
    }

    // DO NOT EDIT THIS RETURN STATEMENT //
    // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and returns it //
    return torch::from_blob(O.data(), {B, H, N, d}, torch::TensorOptions().dtype(torch::kFloat32)).clone();
}

// ---------------------------------------------------------- //
//                PART 4: FLASH ATTENTION 		      //
// ---------------------------------------------------------- //

torch::Tensor myFlashAttention(torch::Tensor QTensor, torch::Tensor KTensor, torch::Tensor VTensor,
                               torch::Tensor QiTensor, torch::Tensor KjTensor, torch::Tensor VjTensor,
                               torch::Tensor SijTensor, torch::Tensor PijTensor, torch::Tensor PVTensor,
                               torch::Tensor OiTensor, torch::Tensor LTensor, torch::Tensor LiTensor,
                               torch::Tensor LijTensor, torch::Tensor LnewTensor, int Bc, int Br,
                               int B, int H, int N, int d)
{

    // Q, K, V are passed in with Shape: (B, H, N, d)
    // Sij, Pij are passed in with Shape: (Br, Bc)
    // Kj, Vj are passed in with Shape: (Bc, d)
    // Qi, Oi, and PV  are passed in with Shape: (Br, d)
    // L in passed in with Shape: (N)
    // Li, Lij, and Lnew are passed in with shape (Br)

    // Make O Tensor with Shape (B, H, N, d)
    at::Tensor OTensor = at::zeros({B, H, N, d}, at::kFloat);

    // Format All Tensors into Vectors
    std::vector<float> O = formatTensor(OTensor);
    std::vector<float> Q = formatTensor(QTensor);
    std::vector<float> K = formatTensor(KTensor);
    std::vector<float> V = formatTensor(VTensor);
    std::vector<float> Sij = formatTensor(SijTensor);
    std::vector<float> Pij = formatTensor(PijTensor);
    std::vector<float> Kj = formatTensor(KjTensor);
    std::vector<float> Vj = formatTensor(VjTensor);
    std::vector<float> Qi = formatTensor(QiTensor);
    std::vector<float> Oi = formatTensor(OiTensor);
    std::vector<float> l = formatTensor(LTensor);
    std::vector<float> PV = formatTensor(PVTensor);
    std::vector<float> li = formatTensor(LiTensor);
    std::vector<float> lij = formatTensor(LijTensor);
    std::vector<float> lnew = formatTensor(LnewTensor);

    // -------- YOUR CODE HERE  -------- //
    for(int b  = 0; b < B; b ++)
    {
        for(int h = 0; h < H; h ++)
        {
            size_t Tr = std::ceil(N / Br);
            size_t Tc = std::ceil(N / Bc);

            for(int j = 1; j <= Tc; j ++)
            {
                // load kj, vj into local memory
                // size : Bc * d
                for(int ii = 0; ii < Bc; ii ++)
                {
                    int index = (j - 1) * Bc + ii;
                    for(int dd = 0; dd < d; dd ++)
                    {
                        Kj[ii * d + dd] = fourDimRead(K, b, h, index, dd, H, N, d);
                        Vj[ii * d + dd] = fourDimRead(V, b, h, index, dd, H, N, d);
                    }
                }

                for(int i = 1; i <= Tr; i ++)
                {
                    // load Qi, Oi, li into local memory blocks
                    // Qi, Oi : size : Br * d
                    // li : size : Br * 1
                    for(int ii = 0; ii < Br; ii ++)
                    {
                        int index = (i - 1) * Br + ii;
                        for(int dd = 0; dd < d; dd ++)
                        {
                            Qi[ii * d + dd] = fourDimRead(Q, b, h, index, dd, H, N, d);
                            Oi[ii * d + dd] = fourDimRead(O, b, h, index, dd, H, N, d);
                        }
                        li[ii] = l[(i - 1) * Br + ii];
                    }

                    // compute
                    // sij = Qi * Kj^T
                    // Qi size : Br * d
                    // kj size : Bc * d
                    // sij size : Br * Bc
                    for(int ii = 0; ii < Br; ii ++)
                    {
                        for(int jj = 0; jj < Bc; jj ++)
                        {
                            float sij = 0.0f;
                            for(int dd = 0; dd < d; dd ++)
                            {
                                sij += Qi[ii * d + dd] * Kj[jj * d + dd];
                            }
                            Sij[ii * Bc + jj] = sij;
                        }
                    }

                    // pij = exp(sij)
                    // sij size : Br * Bc
                    // pij size : Br * Bc
                    for(int ii = 0; ii < Br; ii ++)
                    {
                        for(int jj = 0; jj < Bc; jj ++)
                        {
                            Pij[ii * Bc + jj] = myexp(Sij[ii * Bc + jj]);
                        }
                    }

                    // lij <- rowsum(pij) of size Br
                    // pij size : Br * Bc
                    // lij size : Br * 1
                    for(int ii = 0; ii < Br; ii ++)
                    {
                        for(int jj = 0; jj < Bc; jj ++)
                        {
                            lij[ii] += Pij[ii * Bc + jj];
                        }
                    }

                    // lnew <- li + lij
                    // size : Br * 1
                    for(int ii = 0; ii < Br; ii ++)
                    {
                        lnew[ii] = li[ii] + lij[ii];
                        // write lnew back to l in main memory
                        l[(i - 1) * Br + ii] = lnew[ii];
                    }

                    // Oi <- (liOi + pijVj) / lnew
                    // Oi size : Br * d
                    // li size : Br * 1
                    // BroadCast li to d
                    // li BroadCast size : Br * d
                    // pij size : Br * Bc
                    // vj size : Bc * d
                    // Oi size : Br * d
                    // lnew size : Br * 1
                    for(int ii = 0; ii < Br; ii++) 
                    {
                        for(int dd = 0; dd < d; dd++) 
                        {
                            float pij_vj_sum = 0.0f;
                            for(int jj = 0; jj < Bc; jj++) 
                            {
                                pij_vj_sum += Pij[ii * Bc + jj] * Vj[jj * d + dd];
                            }
                            Oi[ii * d + dd] = (li[ii] * Oi[ii * d + dd] + pij_vj_sum) / lnew[ii];
                            // write Oi back to O in main memory
                            int index = (i - 1) * Br + ii;
                            fourDimWrite(O, b, h, index, dd, H, N, d, Oi[ii * d + dd]);
                        }
                    }

                }
            }
        }
    }


    // DO NOT EDIT THIS RETURN STATEMENT //
    // It formats your C++ Vector O back into a Tensor of Shape (B, H, N, d) and returns it //
    return torch::from_blob(O.data(), {B, H, N, d}, torch::TensorOptions().dtype(torch::kFloat32)).clone();
}

/* DO NOT EDIT THESE BINDINGS */
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
    m.def("myNaiveAttention", &myNaiveAttention, "Naive Attention");
    m.def("myUnfusedAttentionBlocked", &myUnfusedAttentionBlocked, " Blocked Unfused Attention");
    m.def("myFusedAttention", &myFusedAttention, "Fused Attention");
    m.def("myFlashAttention", &myFlashAttention, "Flash Attention");
    m.def("twoDimRead", &twoDimRead, "twoDimRead");
    m.def("fourDimRead", &fourDimRead, "fourDimRead");
}
