#include "calc_paddingoffset.h"

__global__ void CalcPaddingoffset(int *padding_offset, 
                                  int *cur_sum_seqlens,
                                  const int *input_lengths,
                                  const int batch_size,
                                  const int max_seq_len) {
    int cur_token_id = 0;
    int cur_sum_offset = 0;
    int cur_total_valid_tokens = 0;
    for (int bs = 0; bs < batch_size; ++bs) {
        int seq_len = input_lengths[bs];
        cur_sum_seqlens[bs] = cur_total_valid_tokens;
        for (int i = 0; i < seq_len; ++i) {
            padding_offset[cur_token_id] = cur_sum_offset;
            cur_token_id++;
        }
        cur_sum_offset += (max_seq_len - seq_len);
        cur_total_valid_tokens += seq_len;
    }
    cur_sum_seqlens[batch_size] = cur_total_valid_tokens;
}

void launchCalcPaddingoffset(TensorWrapper<int>* padding_offset,
                             TensorWrapper<int>* cur_sum_seqlens,
                             TensorWrapper<int>* input_lengths
) {
    const int batch_size = padding_offset->shape[0];
    const int max_seq_len = padding_offset->shape[1];
    LLM_CHECK_WITH_INFO(batch_size == input_lengths->shape[0], "input lenghts numbers should equal to padding offset bs dim!");
    LLM_CHECK_WITH_INFO(batch_size == (cur_sum_seqlens->shape[0] - 1),  "cum seqlen numbers should equal to padding offset bs dim + 1!");
    CalcPaddingoffset<<<1, 1>>>(padding_offset->data, cur_sum_seqlens->data, input_lengths->data, batch_size, max_seq_len);
}