#include "build_casual_mask.h"

void CPUBuildCasualMask(float *mask,
                        const int *q_lens,
                        const int *k_lens,
                        int max_q_len,
                        int max_k_len,
                        int batch_size)
{
    for (int i = 0; i < batch_size; ++i) {
        int start = i * max_q_len * max_k_len;
        int q_len = q_lens[i];
        int k_len = k_lens[i];
        for (int j = 0; j < max_q_len; ++j) {
            for (int k = 0; k < max_k_len; ++k) {
                if (k <= j + (k_len - q_len) && j < q_len && k < k_len) {
                    mask[start+j*max_k_len+k] = 1.0;
                } else {
                    mask[start+j*max_k_len+k] = 0.0;
                }
            }
        }
    }
}

bool CheckResult(float *CPURes, float *GPURes, const int size) {
    for (int i = 0; i < size; ++i) {
        if (fabs(CPURes[i] - GPURes[i]) > 1e-5) {
            printf("the %dth res is wrong, CPU mask = %f, GPU mask = %f\n", i, CPURes[i], GPURes[i]);
            return false;
        }
    }
    return true;
}

int main() {
    const int batch_size = 1;
    const int max_q_len = 5;
    const int max_k_len = 5;

    const int mask_size = batch_size * max_q_len * max_k_len;
    int *h_q_lens = (int*)malloc(sizeof(int)*batch_size);
    int *h_k_lens = (int*)malloc(sizeof(int)*batch_size);
    int *d_q_lens;
    int *d_k_lens;
    cudaMalloc((void**)&d_q_lens, sizeof(int)*batch_size);
    cudaMalloc((void**)&d_k_lens, sizeof(int)*batch_size);

    float *h_mask = (float*)malloc(sizeof(float)*max_q_len*max_k_len*batch_size);
    float *d_mask;
    cudaMalloc((void**)&d_mask, sizeof(float)*max_q_len*max_k_len*batch_size);

    for (int i = 0; i < batch_size; ++i) {
        h_q_lens[i] = 3;
    }
    for (int i = 0; i < batch_size; ++i) {
        h_k_lens[i] = 3;
    }

    cudaMemcpy(d_q_lens, h_q_lens, sizeof(int)*batch_size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_k_lens, h_k_lens, sizeof(int)*batch_size, cudaMemcpyHostToDevice);

    DataType type_float = getTensorType<float>();
    DataType type_int = getTensorType<int>();
    TensorWrapper<float> *mask_tensor = new TensorWrapper<float>(Device::GPU, type_float, {batch_size, max_q_len, max_k_len}, d_mask);
    TensorWrapper<int> *q_lens_tensor = new TensorWrapper<int>(Device::GPU, type_int, {batch_size}, d_q_lens);
    TensorWrapper<int> *k_lens_tensor = new TensorWrapper<int>(Device::GPU, type_int, {batch_size}, d_k_lens);

    std::cout << "start build mask kernel" << std::endl;
    launchBuildCausalMask(mask_tensor, q_lens_tensor, k_lens_tensor);
    std::cout << "end build mask kernel" << std::endl;

    CHECK(cudaMemcpy(h_mask, d_mask, sizeof(float)*max_q_len*max_k_len, cudaMemcpyDeviceToHost));

    float *cpumask = (float*)malloc(sizeof(float)*max_q_len*max_k_len*batch_size);
    CPUBuildCasualMask(cpumask, h_q_lens, h_k_lens, max_q_len, max_k_len, batch_size);

    bool is_passed = CheckResult(cpumask, h_mask, batch_size*max_q_len*max_k_len);

    if (is_passed) {
        std::cout << "test passed" << std::endl;
    } else {
        std::cout << "test failed" << std::endl;
    }

    free(h_q_lens);
    free(h_k_lens);
    free(h_mask);
    free(cpumask);
    cudaFree(d_q_lens);
    cudaFree(d_k_lens);
    cudaFree(d_mask);
}