#include "build_casual_mask.h"

/*
    每个block处理一个batch的mask

    consider context past kv
*/
template<typename T>
__global__ void BuildCasualMask(
    T *mask,  // [batch_size, max_q_len, max_k_len]
    int *q_lens,   // [batch_size]
    int *k_lens,     // [batch_size]
    int max_q_len,  // 一个batch中最大的q_len
    int max_k_len)
{
    int tid = threadIdx.x;
    int q_len = q_lens[blockIdx.x];
    int k_len = k_lens[blockIdx.x];
    int offset = blockIdx.x * max_q_len * max_k_len;
    for (int i = tid; i < max_q_len * max_k_len; i += blockDim.x) {
        int q_idx = i / max_k_len;
        int k_idx = i % max_k_len;
        bool mask_val = (q_idx < q_len) && (k_idx < k_len) && (k_idx <= q_idx + (k_len - q_len));
        mask[i + offset] = static_cast<T>(mask_val);
    }
}

template<typename T>
void launchBuildCausalMask(
    TensorWrapper<T> *mask,
    TensorWrapper<int> *q_lens,
    TensorWrapper<int> *k_lens) 
{
    int batch_size = q_lens->shape[0];
    int max_q_len = mask->shape[1];
    int max_k_len = mask->shape[2];
    dim3 grid(batch_size);
    dim3 block(256);

    BuildCasualMask<T><<<grid, block>>>(mask->data, q_lens->data, k_lens->data, max_q_len, max_k_len);
}

template void launchBuildCausalMask<float>(
    TensorWrapper<float> *mask,
    TensorWrapper<int> *q_lens,
    TensorWrapper<int> *k_lens
);

template void launchBuildCausalMask<half>(
    TensorWrapper<half> *mask,
    TensorWrapper<int> *q_lens,
    TensorWrapper<int> *k_lens
);

