#include "input_embedding.h"
#include "iostream"

template<typename T>
__global__ void embeddingFunctor(const int* input_ids,
                T *output,
                const T* embed_table,
                const int max_context_token_num,
                const int hidden_size) 
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    for (int index = tid; index < max_context_token_num * hidden_size; index += gridDim.x * blockDim.x) {
        int ids = input_ids[index / hidden_size];
        int offset = index % hidden_size;
        output[index] = embed_table[ids * hidden_size + offset];
    }
}

template<typename T>
void launchInputEmbedding(TensorWrapper<int>* input_ids,
                          TensorWrapper<T>* output,
                          EmbeddingWeight<T>* embed_table) 
{
    const int blockSize = 256;
    const int max_context_token_num = output->shape[0];   //max_context_token_num 应该是 sql len
    const int hidden_size = output->shape[1];
    const int gridSize = 2048;
    LLM_CHECK_WITH_INFO(max_context_token_num == input_ids->shape[0], "input ids 1st shape should euqal to 1st shaoe of output");
    embeddingFunctor<T><<<gridSize, blockSize>>>(input_ids->data,
                                               output->data,
                                               embed_table->data,
                                               max_context_token_num,
                                               hidden_size);
}




template void launchInputEmbedding(TensorWrapper<int>* input_ids,
                          TensorWrapper<float>* output,
                          EmbeddingWeight<float>* embed_table);


template void launchInputEmbedding(TensorWrapper<int>* input_ids,
                          TensorWrapper<half>* output,
                          EmbeddingWeight<half>* embed_table);