#include <iostream>
#include <cuda_runtime.h>
#include <cmath>

#define N 4
#define D 2

// CUDA kernel to compute the Flash Attention v1
__global__ void flash_attention_v1(float* Q, float* K, float* V, float* O) {
    extern __shared__ float shared_mem[];
    float* S_i = shared_mem;
    float* exp_S = shared_mem + N;
    float* O_i = shared_mem + 2 * N;

    int seq_len = N;
    int head_dim = D;
    float scale = 1.0 / sqrtf(head_dim);

    for (int i = 0; i < seq_len; ++i) {
        // Compute Q[i] * K^T for row i
        for (int j = 0; j < seq_len; ++j) {
            float sum = 0.0;
            for (int k = 0; k < head_dim; ++k) {
                sum += Q[i * head_dim + k] * K[j * head_dim + k];
            }
            S_i[j] = sum * scale;
        }

        float m_i = -INFINITY;
        for (int j = 0; j < seq_len; ++j) {
            if (S_i[j] > m_i) m_i = S_i[j];
        }

        // float l_old = 0.0;
        float l_new = 0.0;
        for (int j = 0; j < seq_len; ++j) {
            exp_S[j] = expf(S_i[j] - m_i);
            l_new += exp_S[j];
        }

        // Update output: O[i] = exp(S_i - m_i) * V / l_new
        for (int j = 0; j < head_dim; ++j) {
            float sum = 0.0;
            for (int k = 0; k < seq_len; ++k) {
                sum += exp_S[k] * V[k * head_dim + j];
            }
            O_i[j] = sum / l_new;
        }

        // Copy O_i to O
        for (int j = 0; j < head_dim; ++j) {
            O[i * head_dim + j] = O_i[j];
        }
    }
}

int main() {
    // Input data
    float Q[N * D] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
    float K[N * D] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
    float V[N * D] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};

    // Output data
    float O[N * D];

    // Allocate device memory
    float* d_Q, * d_K, * d_V, * d_O;
    cudaMalloc(&d_Q, N * D * sizeof(float));
    cudaMalloc(&d_K, N * D * sizeof(float));
    cudaMalloc(&d_V, N * D * sizeof(float));
    cudaMalloc(&d_O, N * D * sizeof(float));

    // Copy input data to device
    cudaMemcpy(d_Q, Q, N * D * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_K, K, N * D * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_V, V, N * D * sizeof(float), cudaMemcpyHostToDevice);

    // Launch kernel
    int shared_mem_size = 3 * N * sizeof(float);
    flash_attention_v1<<<1, 1, shared_mem_size>>>(d_Q, d_K, d_V, d_O);

    // Copy output data to host
    cudaMemcpy(O, d_O, N * D * sizeof(float), cudaMemcpyDeviceToHost);

    // Print output
    std::cout << "Output of Flash Attention V1:" << std::endl;
    for (int i = 0; i < N; ++i) {
        for (int j = 0; j < D; ++j) {
            std::cout << O[i * D + j] << " ";
        }
        std::cout << std::endl;
    }

    // Free device memory
    cudaFree(d_Q);
    cudaFree(d_K);
    cudaFree(d_V);
    cudaFree(d_O);

    return 0;
}
