/*
  Copyright (C) 2024-2025  Sutou Kouhei <kou@clear-code.com>

  This library is free software; you can redistribute it and/or
  modify it under the terms of the GNU Lesser General Public
  License as published by the Free Software Foundation; either
  version 2.1 of the License, or (at your option) any later version.

  This library is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  Lesser General Public License for more details.

  You should have received a copy of the GNU Lesser General Public
  License along with this library; if not, write to the Free Software
  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
*/

#pragma once

#include <groonga/progress.h>

#ifdef __cplusplus
extern "C" {
#endif

/**
 * \brief Language model.
 *
 *        You need to use \ref grn_language_model_loader_load to load
 *        \ref grn_language_model.
 */
typedef struct grn_language_model_ grn_language_model;
/**
 * \brief Language model inferencer.
 *
 *        You need to use \ref grn_language_model_open_inferencer to
 *        open \ref grn_language_model_inferencer.
 */
typedef struct grn_language_model_inferencer_ grn_language_model_inferencer;
/**
 * \brief Language model loader.
 */
typedef struct grn_language_model_loader_ grn_language_model_loader;

/**
 * \brief Open a new language model loader.
 *
 * \param ctx The context object.
 *
 * \return A newly created language model loader on success, `NULL` on
 *         error.
 */
GRN_API grn_language_model_loader *
grn_language_model_loader_open(grn_ctx *ctx);
/**
 * \brief Close a language model loader.
 *
 * \param ctx The context object.
 * \param loader The loader to close.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_loader_close(grn_ctx *ctx,
                                grn_language_model_loader *loader);
/**
 * \brief Set language model name to load.
 *
 * \param ctx The context object.
 * \param loader The loader.
 * \param model The model name to load.
 * \param model_length The byte size of `model`. You can use `-1` if
 *                     `model` is a `\0`-terminated string.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 *
 * \since 15.1.8
 */
GRN_API grn_rc
grn_language_model_loader_set_model(grn_ctx *ctx,
                                    grn_language_model_loader *loader,
                                    const char *model,
                                    int64_t model_length);

/**
 * \brief The default N GPU layers to use in language model. In
 *        general, we use GPU as much as possible by default.
 *
 * This value is same as `n_gpu_layers` of
 * `llama_model_default_params()`.
 *
 * \since 15.2.1
 */
#define GRN_LANGUAGE_MODEL_LOADER_N_GPU_LAYERS_DEFAULT 999

/**
 * \brief Set the number of GPU layers to use.
 *
 * You can disable GPU by specifying `0` as `n_gpu_layers`.
 *
 * \param ctx The context object.
 * \param loader The loader.
 * \param n_gpu_layers The number of GPU layers to use.
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 *
 * \since 15.2.1
 */
GRN_API grn_rc
grn_language_model_loader_set_n_gpu_layers(grn_ctx *ctx,
                                           grn_language_model_loader *loader,
                                           int32_t n_gpu_layers);
/**
 * \brief Load a language model.
 *
 *        If the target model is already loaded, this reuses the model
 *        instead of loading a new model.
 *
 * \param ctx The context object.
 * \param loader The loader.
 *
 * \return A loaded \ref grn_language_model on success, `NULL` on error.
 *
 *         See `ctx->rc` for error details.
 */
GRN_API grn_language_model *
grn_language_model_loader_load(grn_ctx *ctx, grn_language_model_loader *loader);

/**
 * \brief Get the number of dimensions of embedding generated by this model.
 *
 * \param ctx The context object.
 * \param model The model.
 *
 * \return The number of dimensions on success, 0 on error.
 *
 *         See `ctx->rc` for error details.
 */
GRN_API uint32_t
grn_language_model_get_n_embedding_dimensions(grn_ctx *ctx,
                                              grn_language_model *model);

/**
 * \brief Close a language model.
 *
 * \param ctx The context object.
 * \param model The model to close.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_close(grn_ctx *ctx, grn_language_model *model);

/**
 * \brief Open a new language model inferencer.
 *
 * \param ctx The context object.
 * \param model The model to inference.
 *
 * \return A newly created \ref grn_language_model_inferencer on
 *         success, `NULL` on error.
 *
 *         See `ctx->rc` for error details.
 */
GRN_API grn_language_model_inferencer *
grn_language_model_open_inferencer(grn_ctx *ctx, grn_language_model *model);

/**
 * \brief Close a language model inferencer.
 *
 * \param ctx The context object.
 * \param inferencer The inferencer to close.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_inferencer_close(grn_ctx *ctx,
                                    grn_language_model_inferencer *inferencer);

/**
 * \brief Prepend `prefix` to all values of `input_column` in \ref
 *        grn_language_model_inferencer_vectorize_in_batch and \ref
 *        grn_language_model_inferencer_vectorize_applier.
 *
 * \param ctx The context object.
 * \param inferencer The inferencer.
 * \param prefix The prefix.
 * \param prefix_length The byte size of `prefix`. You can use `-1` if
 *                      `prefix` is a `\0`-terminated string.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 *
 * \since 15.1.9
 */
GRN_API grn_rc
grn_language_model_inferencer_set_input_column_value_prefix(
  grn_ctx *ctx,
  grn_language_model_inferencer *inferencer,
  const char *prefix,
  int64_t prefix_length);

/**
 * \brief Set progress callback that is called on each batch
 *        vectorization is completed.
 *
 * \param ctx The context object.
 * \param inferencer The inferencer.
 * \param callback The callback.
 * \param user_data The data that is passed to the `callback` when
 *                  `callback` is called.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 *
 * \since 15.2.1
 */
GRN_API grn_rc
grn_language_model_inferencer_set_progress_callback(
  grn_ctx *ctx,
  grn_language_model_inferencer *inferencer,
  grn_progress_callback_func callback,
  void *user_data);

/**
 * \brief Vectorize a text.
 *
 * In other words, compute embeddings of a text.
 *
 * \param ctx The context object.
 * \param inferencer The inferencer.
 * \param text The text to vectorize.
 * \param text_length The byte size of `text`. You can use `-1` if
 *                    `text` is a `\0`-terminated string.
 * \param output_vector \ref GRN_DB_FLOAT32 vector as an output. A
 *                      caller must initialize this by \ref
 *                      GRN_FLOAT32_INIT and \ref GRN_OBJ_VECTOR.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_inferencer_vectorize(
  grn_ctx *ctx,
  grn_language_model_inferencer *inferencer,
  const char *text,
  int64_t text_length,
  grn_obj *output_vector);

/**
 * \brief Vectorize texts in batch.
 *
 * In other words, compute embeddings set of texts. This is efficient
 * than calling grn_language_model_inferencer_vectorize() multiple
 * times.
 *
 * This should be used from an applier.
 *
 * \param ctx The context object.
 * \param inferencer The inferencer.
 * \param input_column The text family column or accessor. Caller must be
 *                     ensure it. This function doesn't validate it.
 * \param data The applier data passed to an applier function.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_inferencer_vectorize_applier(
  grn_ctx *ctx,
  grn_language_model_inferencer *inferencer,
  grn_obj *input_column,
  grn_applier_data *data);

/**
 * \brief Vectorize texts and output embeddings set to `Float32` \ref
 *        GRN_UVECTOR or vector column in batch.
 *
 * In other words, compute embeddings set of texts. This is efficient
 * than calling grn_language_model_inferencer_vectorize() multiple
 * times.
 *
 * You can use \ref grn_memory_map to write and read the result
 * embeddings set without allocating memory for all embeddings set:
 *
 * ```c
 * uint32_t n_dimensions = 256;
 * uint32_t n_records = 1000;
 * size_t embeddings_set_size = n_dimensions * n_records;
 * grn_memory_map *embeddings_set_map =
 *   grn_memory_map_open(ctx,
 *                       "/tmp/embeddings",
 *                       GRN_MEMORY_MAP_READ | GRN_MEMORY_MAP_WRITE,
 *                       0,
 *                       embeddings_set_size);
 * float *embeddings_set_raw =
 *   grn_memory_map_get_address(ctx, embeddings_set_map);
 * grn_obj embeddings_set;
 * GRN_FLOAT32_INIT(&embeddings_set, GRN_OBJ_VECTOR | GRN_OBJ_DO_SHALLOW_COPY);
 * GRN_BINARY_SET_REF(&embeddings_set, embeddings_set_raw, embeddings_set_size);
 * GRN_BULK_REWIND(&embeddings_set);
 * grn_table_cursor *cursor =
 *   grn_table_cursor_open(ctx,
 *                         source_table,
 *                         NULL, 0,
 *                         NULL, 0,
 *                         0, -1, GRN_CURSOR_BY_ID);
 * if (cursor) {
 *   grn_language_model_inferencer_vectorize_in_batch(ctx,
 *                                                    inferencer,
 *                                                    cursor,
 *                                                    source_column,
 *                                                    &embeddings_set);
 *   grn_table_cursor_close(ctx, cursor);
 * }
 * // Use &embedding_set
 * GRN_OBJ_FIN(ctx, &embedding_set);
 * grn_memory_map_close(ctx, embeddings_set_map);
 * ```
 *
 * \param ctx The context object.
 * \param inferencer The inferencer.
 * \param cursor The cursor that returns target record IDs.
 * \param input_column The text family column or accessor. Caller must be
 *                     ensure it. This function doesn't validate it.
 * \param output The generated embeddings set. This must be a
 *               `Float32` vector or `Float32` vector column. Output
 *               order is same as IDs returned by the `cursor`.
 *
 * \return \ref GRN_SUCCESS on success, the appropriate \ref grn_rc on
 *         error.
 */
GRN_API grn_rc
grn_language_model_inferencer_vectorize_in_batch(
  grn_ctx *ctx,
  grn_language_model_inferencer *inferencer,
  grn_table_cursor *cursor,
  grn_obj *input_column,
  grn_obj *output);

#ifdef __cplusplus
}
#endif
