|
#pragma once |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef GGML_SHARED |
|
# if defined(_WIN32) && !defined(__MINGW32__) |
|
# ifdef GGML_BUILD |
|
# define GGML_API __declspec(dllexport) extern |
|
# else |
|
# define GGML_API __declspec(dllimport) extern |
|
# endif |
|
# else |
|
# define GGML_API __attribute__ ((visibility ("default"))) extern |
|
# endif |
|
#else |
|
# define GGML_API extern |
|
#endif |
|
|
|
|
|
#ifdef __GNUC__ |
|
# define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint))) |
|
#elif defined(_MSC_VER) |
|
# define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func |
|
#else |
|
# define GGML_DEPRECATED(func, hint) func |
|
#endif |
|
|
|
#ifndef __GNUC__ |
|
# define GGML_ATTRIBUTE_FORMAT(...) |
|
#elif defined(__MINGW32__) |
|
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) |
|
#else |
|
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) |
|
#endif |
|
|
|
#include <stdbool.h> |
|
#include <stddef.h> |
|
#include <stdint.h> |
|
#include <stdio.h> |
|
|
|
#define GGML_FILE_MAGIC 0x67676d6c |
|
#define GGML_FILE_VERSION 2 |
|
|
|
#define GGML_QNT_VERSION 2 |
|
#define GGML_QNT_VERSION_FACTOR 1000 |
|
|
|
#define GGML_MAX_DIMS 4 |
|
#define GGML_MAX_PARAMS 2048 |
|
#define GGML_MAX_SRC 10 |
|
#define GGML_MAX_N_THREADS 512 |
|
#define GGML_MAX_OP_PARAMS 64 |
|
|
|
#ifndef GGML_MAX_NAME |
|
# define GGML_MAX_NAME 64 |
|
#endif |
|
|
|
#define GGML_DEFAULT_N_THREADS 4 |
|
#define GGML_DEFAULT_GRAPH_SIZE 2048 |
|
|
|
#if UINTPTR_MAX == 0xFFFFFFFF |
|
#define GGML_MEM_ALIGN 4 |
|
#else |
|
#define GGML_MEM_ALIGN 16 |
|
#endif |
|
|
|
#define GGML_EXIT_SUCCESS 0 |
|
#define GGML_EXIT_ABORTED 1 |
|
|
|
#define GGML_ROPE_TYPE_NEOX 2 |
|
|
|
#define GGUF_MAGIC "GGUF" |
|
|
|
#define GGUF_VERSION 3 |
|
|
|
#define GGUF_DEFAULT_ALIGNMENT 32 |
|
|
|
#define GGML_UNUSED(x) (void)(x) |
|
|
|
#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) |
|
|
|
#ifndef NDEBUG |
|
# define GGML_UNREACHABLE() do { fprintf(stderr, "statement should be unreachable\n"); abort(); } while(0) |
|
#elif defined(__GNUC__) |
|
# define GGML_UNREACHABLE() __builtin_unreachable() |
|
#elif defined(_MSC_VER) |
|
# define GGML_UNREACHABLE() __assume(0) |
|
#else |
|
# define GGML_UNREACHABLE() ((void) 0) |
|
#endif |
|
|
|
#ifdef __cplusplus |
|
# define GGML_NORETURN [[noreturn]] |
|
#elif defined(_MSC_VER) |
|
# define GGML_NORETURN __declspec(noreturn) |
|
#else |
|
# define GGML_NORETURN _Noreturn |
|
#endif |
|
|
|
#define GGML_ABORT(...) ggml_abort(__FILE__, __LINE__, __VA_ARGS__) |
|
#define GGML_ASSERT(x) if (!(x)) GGML_ABORT("GGML_ASSERT(%s) failed", #x) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \ |
|
const type prefix##0 = (pointer)->array[0]; \ |
|
GGML_UNUSED(prefix##0); |
|
#define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \ |
|
GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \ |
|
const type prefix##1 = (pointer)->array[1]; \ |
|
GGML_UNUSED(prefix##1); |
|
#define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \ |
|
GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \ |
|
const type prefix##2 = (pointer)->array[2]; \ |
|
GGML_UNUSED(prefix##2); |
|
#define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \ |
|
GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \ |
|
const type prefix##3 = (pointer)->array[3]; \ |
|
GGML_UNUSED(prefix##3); |
|
|
|
#define GGML_TENSOR_UNARY_OP_LOCALS \ |
|
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ |
|
GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb, dst, nb) |
|
|
|
#define GGML_TENSOR_BINARY_OP_LOCALS \ |
|
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ |
|
GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ |
|
GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb, dst, nb) |
|
|
|
#define GGML_TENSOR_BINARY_OP_LOCALS01 \ |
|
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ |
|
GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ |
|
GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) |
|
|
|
#ifdef __cplusplus |
|
extern "C" { |
|
#endif |
|
|
|
GGML_NORETURN GGML_ATTRIBUTE_FORMAT(3, 4) |
|
GGML_API void ggml_abort(const char * file, int line, const char * fmt, ...); |
|
|
|
enum ggml_status { |
|
GGML_STATUS_ALLOC_FAILED = -2, |
|
GGML_STATUS_FAILED = -1, |
|
GGML_STATUS_SUCCESS = 0, |
|
GGML_STATUS_ABORTED = 1, |
|
}; |
|
|
|
|
|
GGML_API const char * ggml_status_to_string(enum ggml_status status); |
|
|
|
|
|
|
|
typedef uint16_t ggml_fp16_t; |
|
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t); |
|
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float); |
|
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t); |
|
GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t); |
|
|
|
|
|
typedef struct { uint16_t bits; } ggml_bf16_t; |
|
GGML_API ggml_bf16_t ggml_fp32_to_bf16(float); |
|
GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); |
|
GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t); |
|
GGML_API void ggml_fp32_to_bf16_row_ref(const float *, ggml_bf16_t *, int64_t); |
|
GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t); |
|
|
|
struct ggml_object; |
|
struct ggml_context; |
|
struct ggml_cgraph; |
|
|
|
|
|
enum ggml_type { |
|
GGML_TYPE_F32 = 0, |
|
GGML_TYPE_F16 = 1, |
|
GGML_TYPE_Q4_0 = 2, |
|
GGML_TYPE_Q4_1 = 3, |
|
|
|
|
|
GGML_TYPE_Q5_0 = 6, |
|
GGML_TYPE_Q5_1 = 7, |
|
GGML_TYPE_Q8_0 = 8, |
|
GGML_TYPE_Q8_1 = 9, |
|
GGML_TYPE_Q2_K = 10, |
|
GGML_TYPE_Q3_K = 11, |
|
GGML_TYPE_Q4_K = 12, |
|
GGML_TYPE_Q5_K = 13, |
|
GGML_TYPE_Q6_K = 14, |
|
GGML_TYPE_Q8_K = 15, |
|
GGML_TYPE_IQ2_XXS = 16, |
|
GGML_TYPE_IQ2_XS = 17, |
|
GGML_TYPE_IQ3_XXS = 18, |
|
GGML_TYPE_IQ1_S = 19, |
|
GGML_TYPE_IQ4_NL = 20, |
|
GGML_TYPE_IQ3_S = 21, |
|
GGML_TYPE_IQ2_S = 22, |
|
GGML_TYPE_IQ4_XS = 23, |
|
GGML_TYPE_I8 = 24, |
|
GGML_TYPE_I16 = 25, |
|
GGML_TYPE_I32 = 26, |
|
GGML_TYPE_I64 = 27, |
|
GGML_TYPE_F64 = 28, |
|
GGML_TYPE_IQ1_M = 29, |
|
GGML_TYPE_BF16 = 30, |
|
GGML_TYPE_Q4_0_4_4 = 31, |
|
GGML_TYPE_Q4_0_4_8 = 32, |
|
GGML_TYPE_Q4_0_8_8 = 33, |
|
GGML_TYPE_TQ1_0 = 34, |
|
GGML_TYPE_TQ2_0 = 35, |
|
GGML_TYPE_IQ4_NL_4_4 = 36, |
|
|
|
|
|
GGML_TYPE_COUNT, |
|
}; |
|
|
|
|
|
enum ggml_prec { |
|
GGML_PREC_DEFAULT, |
|
GGML_PREC_F32, |
|
}; |
|
|
|
enum ggml_backend_type { |
|
GGML_BACKEND_TYPE_CPU = 0, |
|
GGML_BACKEND_TYPE_GPU = 10, |
|
GGML_BACKEND_TYPE_GPU_SPLIT = 20, |
|
}; |
|
|
|
|
|
enum ggml_ftype { |
|
GGML_FTYPE_UNKNOWN = -1, |
|
GGML_FTYPE_ALL_F32 = 0, |
|
GGML_FTYPE_MOSTLY_F16 = 1, |
|
GGML_FTYPE_MOSTLY_Q4_0 = 2, |
|
GGML_FTYPE_MOSTLY_Q4_1 = 3, |
|
GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, |
|
GGML_FTYPE_MOSTLY_Q8_0 = 7, |
|
GGML_FTYPE_MOSTLY_Q5_0 = 8, |
|
GGML_FTYPE_MOSTLY_Q5_1 = 9, |
|
GGML_FTYPE_MOSTLY_Q2_K = 10, |
|
GGML_FTYPE_MOSTLY_Q3_K = 11, |
|
GGML_FTYPE_MOSTLY_Q4_K = 12, |
|
GGML_FTYPE_MOSTLY_Q5_K = 13, |
|
GGML_FTYPE_MOSTLY_Q6_K = 14, |
|
GGML_FTYPE_MOSTLY_IQ2_XXS = 15, |
|
GGML_FTYPE_MOSTLY_IQ2_XS = 16, |
|
GGML_FTYPE_MOSTLY_IQ3_XXS = 17, |
|
GGML_FTYPE_MOSTLY_IQ1_S = 18, |
|
GGML_FTYPE_MOSTLY_IQ4_NL = 19, |
|
GGML_FTYPE_MOSTLY_IQ3_S = 20, |
|
GGML_FTYPE_MOSTLY_IQ2_S = 21, |
|
GGML_FTYPE_MOSTLY_IQ4_XS = 22, |
|
GGML_FTYPE_MOSTLY_IQ1_M = 23, |
|
GGML_FTYPE_MOSTLY_BF16 = 24, |
|
GGML_FTYPE_MOSTLY_Q4_0_4_4 = 25, |
|
GGML_FTYPE_MOSTLY_Q4_0_4_8 = 26, |
|
GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, |
|
}; |
|
|
|
|
|
enum ggml_op { |
|
GGML_OP_NONE = 0, |
|
|
|
GGML_OP_DUP, |
|
GGML_OP_ADD, |
|
GGML_OP_ADD1, |
|
GGML_OP_ACC, |
|
GGML_OP_SUB, |
|
GGML_OP_MUL, |
|
GGML_OP_DIV, |
|
GGML_OP_SQR, |
|
GGML_OP_SQRT, |
|
GGML_OP_LOG, |
|
GGML_OP_SIN, |
|
GGML_OP_COS, |
|
GGML_OP_SUM, |
|
GGML_OP_SUM_ROWS, |
|
GGML_OP_MEAN, |
|
GGML_OP_ARGMAX, |
|
GGML_OP_COUNT_EQUAL, |
|
GGML_OP_REPEAT, |
|
GGML_OP_REPEAT_BACK, |
|
GGML_OP_CONCAT, |
|
GGML_OP_SILU_BACK, |
|
GGML_OP_NORM, |
|
GGML_OP_RMS_NORM, |
|
GGML_OP_RMS_NORM_BACK, |
|
GGML_OP_GROUP_NORM, |
|
|
|
GGML_OP_MUL_MAT, |
|
GGML_OP_MUL_MAT_ID, |
|
GGML_OP_OUT_PROD, |
|
|
|
GGML_OP_SCALE, |
|
GGML_OP_SET, |
|
GGML_OP_CPY, |
|
GGML_OP_CONT, |
|
GGML_OP_RESHAPE, |
|
GGML_OP_VIEW, |
|
GGML_OP_PERMUTE, |
|
GGML_OP_TRANSPOSE, |
|
GGML_OP_GET_ROWS, |
|
GGML_OP_GET_ROWS_BACK, |
|
GGML_OP_DIAG, |
|
GGML_OP_DIAG_MASK_INF, |
|
GGML_OP_DIAG_MASK_ZERO, |
|
GGML_OP_SOFT_MAX, |
|
GGML_OP_SOFT_MAX_BACK, |
|
GGML_OP_ROPE, |
|
GGML_OP_ROPE_BACK, |
|
GGML_OP_CLAMP, |
|
GGML_OP_CONV_TRANSPOSE_1D, |
|
GGML_OP_IM2COL, |
|
GGML_OP_IM2COL_BACK, |
|
GGML_OP_CONV_TRANSPOSE_2D, |
|
GGML_OP_POOL_1D, |
|
GGML_OP_POOL_2D, |
|
GGML_OP_POOL_2D_BACK, |
|
GGML_OP_UPSCALE, |
|
GGML_OP_PAD, |
|
GGML_OP_ARANGE, |
|
GGML_OP_TIMESTEP_EMBEDDING, |
|
GGML_OP_ARGSORT, |
|
GGML_OP_LEAKY_RELU, |
|
|
|
GGML_OP_FLASH_ATTN_EXT, |
|
GGML_OP_FLASH_ATTN_BACK, |
|
GGML_OP_SSM_CONV, |
|
GGML_OP_SSM_SCAN, |
|
GGML_OP_WIN_PART, |
|
GGML_OP_WIN_UNPART, |
|
GGML_OP_GET_REL_POS, |
|
GGML_OP_ADD_REL_POS, |
|
GGML_OP_RWKV_WKV6, |
|
|
|
GGML_OP_UNARY, |
|
|
|
GGML_OP_MAP_UNARY, |
|
GGML_OP_MAP_BINARY, |
|
|
|
GGML_OP_MAP_CUSTOM1_F32, |
|
GGML_OP_MAP_CUSTOM2_F32, |
|
GGML_OP_MAP_CUSTOM3_F32, |
|
|
|
GGML_OP_MAP_CUSTOM1, |
|
GGML_OP_MAP_CUSTOM2, |
|
GGML_OP_MAP_CUSTOM3, |
|
|
|
GGML_OP_CROSS_ENTROPY_LOSS, |
|
GGML_OP_CROSS_ENTROPY_LOSS_BACK, |
|
GGML_OP_OPT_STEP_ADAMW, |
|
|
|
GGML_OP_COUNT, |
|
}; |
|
|
|
enum ggml_unary_op { |
|
GGML_UNARY_OP_ABS, |
|
GGML_UNARY_OP_SGN, |
|
GGML_UNARY_OP_NEG, |
|
GGML_UNARY_OP_STEP, |
|
GGML_UNARY_OP_TANH, |
|
GGML_UNARY_OP_ELU, |
|
GGML_UNARY_OP_RELU, |
|
GGML_UNARY_OP_SIGMOID, |
|
GGML_UNARY_OP_GELU, |
|
GGML_UNARY_OP_GELU_QUICK, |
|
GGML_UNARY_OP_SILU, |
|
GGML_UNARY_OP_HARDSWISH, |
|
GGML_UNARY_OP_HARDSIGMOID, |
|
GGML_UNARY_OP_EXP, |
|
|
|
GGML_UNARY_OP_COUNT, |
|
}; |
|
|
|
enum ggml_object_type { |
|
GGML_OBJECT_TYPE_TENSOR, |
|
GGML_OBJECT_TYPE_GRAPH, |
|
GGML_OBJECT_TYPE_WORK_BUFFER |
|
}; |
|
|
|
enum ggml_log_level { |
|
GGML_LOG_LEVEL_NONE = 0, |
|
GGML_LOG_LEVEL_DEBUG = 1, |
|
GGML_LOG_LEVEL_INFO = 2, |
|
GGML_LOG_LEVEL_WARN = 3, |
|
GGML_LOG_LEVEL_ERROR = 4, |
|
GGML_LOG_LEVEL_CONT = 5, |
|
}; |
|
|
|
|
|
enum ggml_tensor_flag { |
|
GGML_TENSOR_FLAG_INPUT = 1, |
|
GGML_TENSOR_FLAG_OUTPUT = 2, |
|
GGML_TENSOR_FLAG_PARAM = 4, |
|
GGML_TENSOR_FLAG_LOSS = 8, |
|
}; |
|
|
|
struct ggml_init_params { |
|
|
|
size_t mem_size; |
|
void * mem_buffer; |
|
bool no_alloc; |
|
}; |
|
|
|
|
|
struct ggml_tensor { |
|
enum ggml_type type; |
|
|
|
GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor"); |
|
|
|
struct ggml_backend_buffer * buffer; |
|
|
|
int64_t ne[GGML_MAX_DIMS]; |
|
size_t nb[GGML_MAX_DIMS]; |
|
|
|
|
|
|
|
|
|
|
|
enum ggml_op op; |
|
|
|
|
|
int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; |
|
|
|
int32_t flags; |
|
|
|
struct ggml_tensor * src[GGML_MAX_SRC]; |
|
|
|
|
|
struct ggml_tensor * view_src; |
|
size_t view_offs; |
|
|
|
void * data; |
|
|
|
char name[GGML_MAX_NAME]; |
|
|
|
void * extra; |
|
|
|
char padding[8]; |
|
}; |
|
|
|
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor); |
|
|
|
|
|
|
|
|
|
typedef bool (*ggml_abort_callback)(void * data); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef uint8_t ggml_guid[16]; |
|
typedef ggml_guid * ggml_guid_t; |
|
|
|
GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b); |
|
|
|
|
|
|
|
GGML_API void ggml_time_init(void); |
|
GGML_API int64_t ggml_time_ms(void); |
|
GGML_API int64_t ggml_time_us(void); |
|
GGML_API int64_t ggml_cycles(void); |
|
GGML_API int64_t ggml_cycles_per_ms(void); |
|
|
|
|
|
GGML_API FILE * ggml_fopen(const char * fname, const char * mode); |
|
|
|
GGML_API void ggml_print_object (const struct ggml_object * obj); |
|
GGML_API void ggml_print_objects(const struct ggml_context * ctx); |
|
|
|
GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor); |
|
GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor); |
|
GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor); |
|
GGML_API size_t ggml_nbytes_pad(const struct ggml_tensor * tensor); |
|
|
|
GGML_API int64_t ggml_blck_size(enum ggml_type type); |
|
GGML_API size_t ggml_type_size(enum ggml_type type); |
|
GGML_API size_t ggml_row_size (enum ggml_type type, int64_t ne); |
|
|
|
GGML_DEPRECATED( |
|
GGML_API double ggml_type_sizef(enum ggml_type type), |
|
"use ggml_row_size() instead"); |
|
|
|
GGML_API const char * ggml_type_name(enum ggml_type type); |
|
GGML_API const char * ggml_op_name (enum ggml_op op); |
|
GGML_API const char * ggml_op_symbol(enum ggml_op op); |
|
|
|
GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op); |
|
GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); |
|
|
|
GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); |
|
|
|
GGML_API bool ggml_is_quantized(enum ggml_type type); |
|
|
|
|
|
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype); |
|
|
|
GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_empty (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor); |
|
GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); |
|
|
|
GGML_API bool ggml_is_contiguous (const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_contiguous_0(const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_contiguous_1(const struct ggml_tensor * tensor); |
|
GGML_API bool ggml_is_contiguous_2(const struct ggml_tensor * tensor); |
|
|
|
GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); |
|
GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); |
|
|
|
GGML_API bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1); |
|
|
|
|
|
GGML_API size_t ggml_tensor_overhead(void); |
|
|
|
GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes); |
|
|
|
|
|
|
|
GGML_API struct ggml_context * ggml_init (struct ggml_init_params params); |
|
GGML_API void ggml_reset(struct ggml_context * ctx); |
|
GGML_API void ggml_free (struct ggml_context * ctx); |
|
|
|
GGML_API size_t ggml_used_mem(const struct ggml_context * ctx); |
|
|
|
GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx); |
|
GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc); |
|
|
|
GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx); |
|
GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx); |
|
GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx); |
|
|
|
GGML_API struct ggml_tensor * ggml_new_tensor( |
|
struct ggml_context * ctx, |
|
enum ggml_type type, |
|
int n_dims, |
|
const int64_t *ne); |
|
|
|
GGML_API struct ggml_tensor * ggml_new_tensor_1d( |
|
struct ggml_context * ctx, |
|
enum ggml_type type, |
|
int64_t ne0); |
|
|
|
GGML_API struct ggml_tensor * ggml_new_tensor_2d( |
|
struct ggml_context * ctx, |
|
enum ggml_type type, |
|
int64_t ne0, |
|
int64_t ne1); |
|
|
|
GGML_API struct ggml_tensor * ggml_new_tensor_3d( |
|
struct ggml_context * ctx, |
|
enum ggml_type type, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2); |
|
|
|
GGML_API struct ggml_tensor * ggml_new_tensor_4d( |
|
struct ggml_context * ctx, |
|
enum ggml_type type, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2, |
|
int64_t ne3); |
|
|
|
GGML_API void * ggml_new_buffer(struct ggml_context * ctx, size_t nbytes); |
|
|
|
GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); |
|
GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx); |
|
GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor); |
|
GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); |
|
|
|
|
|
GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); |
|
|
|
GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor); |
|
|
|
GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); |
|
GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); |
|
|
|
GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor); |
|
GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name); |
|
GGML_ATTRIBUTE_FORMAT(2, 3) |
|
GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...); |
|
|
|
|
|
GGML_API void ggml_set_input(struct ggml_tensor * tensor); |
|
GGML_API void ggml_set_output(struct ggml_tensor * tensor); |
|
GGML_API void ggml_set_param(struct ggml_context * ctx, struct ggml_tensor * tensor); |
|
GGML_API void ggml_set_loss(struct ggml_tensor * tensor); |
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_dup( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_dup_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_add( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_add_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_add_cast( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
enum ggml_type type); |
|
|
|
GGML_API struct ggml_tensor * ggml_add1( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_add1_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_acc( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t nb3, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_acc_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t nb3, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_sub( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_sub_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_mul( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_mul_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_div( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_div_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_sqr( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sqr_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sqrt( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sqrt_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_log( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_log_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sin( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sin_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_cos( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_cos_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_sum( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_sum_rows( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_mean( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_argmax( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_count_equal( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_repeat( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_repeat_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_concat( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int dim); |
|
|
|
GGML_API struct ggml_tensor * ggml_abs( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_abs_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sgn( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sgn_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_neg( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_neg_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_step( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_step_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_tanh( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_tanh_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_elu( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_elu_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_relu( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_leaky_relu( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, float negative_slope, bool inplace); |
|
|
|
GGML_API struct ggml_tensor * ggml_relu_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sigmoid( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_sigmoid_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_gelu( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_gelu_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_gelu_quick( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_gelu_quick_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_silu( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_silu_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_silu_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_hardswish( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_hardsigmoid( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_exp( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
GGML_API struct ggml_tensor * ggml_exp_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_norm( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float eps); |
|
|
|
GGML_API struct ggml_tensor * ggml_norm_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float eps); |
|
|
|
GGML_API struct ggml_tensor * ggml_rms_norm( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float eps); |
|
|
|
GGML_API struct ggml_tensor * ggml_rms_norm_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float eps); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_group_norm( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_groups, |
|
float eps); |
|
|
|
GGML_API struct ggml_tensor * ggml_group_norm_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_groups, |
|
float eps); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rms_norm_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
float eps); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_mul_mat( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
GGML_API void ggml_mul_mat_set_prec( |
|
struct ggml_tensor * a, |
|
enum ggml_prec prec); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_mul_mat_id( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * as, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * ids); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_out_prod( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_scale( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float s); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_scale_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float s); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_set( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t nb3, |
|
size_t offset); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_set_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t nb3, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_set_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_set_1d_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t offset); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_set_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t offset); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_set_2d_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
size_t nb1, |
|
size_t offset); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_cpy( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_cast( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_type type); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_cont( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_cont_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0); |
|
|
|
GGML_API struct ggml_tensor * ggml_cont_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1); |
|
|
|
GGML_API struct ggml_tensor * ggml_cont_3d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2); |
|
|
|
GGML_API struct ggml_tensor * ggml_cont_4d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2, |
|
int64_t ne3); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_reshape( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_reshape_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0); |
|
|
|
GGML_API struct ggml_tensor * ggml_reshape_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_reshape_3d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2); |
|
|
|
GGML_API struct ggml_tensor * ggml_reshape_4d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2, |
|
int64_t ne3); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_view_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_view_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
size_t nb1, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_view_3d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_view_4d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int64_t ne0, |
|
int64_t ne1, |
|
int64_t ne2, |
|
int64_t ne3, |
|
size_t nb1, |
|
size_t nb2, |
|
size_t nb3, |
|
size_t offset); |
|
|
|
GGML_API struct ggml_tensor * ggml_permute( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int axis0, |
|
int axis1, |
|
int axis2, |
|
int axis3); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_transpose( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_get_rows( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_get_rows_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c); |
|
|
|
GGML_API struct ggml_tensor * ggml_diag( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_diag_mask_inf( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_past); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_past); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_diag_mask_zero( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_past); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int n_past); |
|
|
|
GGML_API struct ggml_tensor * ggml_soft_max( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_soft_max_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_soft_max_ext( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * mask, |
|
float scale, |
|
float max_bias); |
|
|
|
GGML_API struct ggml_tensor * ggml_soft_max_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_soft_max_back_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rope( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int n_dims, |
|
int mode); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rope_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int n_dims, |
|
int mode); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rope_ext( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
int n_dims, |
|
int mode, |
|
int n_ctx_orig, |
|
float freq_base, |
|
float freq_scale, |
|
float ext_factor, |
|
float attn_factor, |
|
float beta_fast, |
|
float beta_slow); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rope_ext_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
int n_dims, |
|
int mode, |
|
int n_ctx_orig, |
|
float freq_base, |
|
float freq_scale, |
|
float ext_factor, |
|
float attn_factor, |
|
float beta_fast, |
|
float beta_slow); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int n_dims, |
|
int mode, |
|
int n_ctx_orig, |
|
float freq_base, |
|
float freq_scale, |
|
float ext_factor, |
|
float attn_factor, |
|
float beta_fast, |
|
float beta_slow), |
|
"use ggml_rope_ext instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_rope_custom_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int n_dims, |
|
int mode, |
|
int n_ctx_orig, |
|
float freq_base, |
|
float freq_scale, |
|
float ext_factor, |
|
float attn_factor, |
|
float beta_fast, |
|
float beta_slow), |
|
"use ggml_rope_ext_inplace instead"); |
|
|
|
|
|
GGML_API void ggml_rope_yarn_corr_dims( |
|
int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_rope_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
int n_dims, |
|
int mode, |
|
int n_ctx_orig, |
|
float freq_base, |
|
float freq_scale, |
|
float ext_factor, |
|
float attn_factor, |
|
float beta_fast, |
|
float beta_slow); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_clamp( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
float min, |
|
float max); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_im2col( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s0, |
|
int s1, |
|
int p0, |
|
int p1, |
|
int d0, |
|
int d1, |
|
bool is_2D, |
|
enum ggml_type dst_type); |
|
|
|
GGML_API struct ggml_tensor * ggml_im2col_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int64_t * ne, |
|
int s0, |
|
int s1, |
|
int p0, |
|
int p1, |
|
int d0, |
|
int d1, |
|
bool is_2D); |
|
|
|
GGML_API struct ggml_tensor * ggml_conv_depthwise_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s0, |
|
int s1, |
|
int p0, |
|
int p1, |
|
int d0, |
|
int d1); |
|
|
|
GGML_API struct ggml_tensor * ggml_conv_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s0, |
|
int p0, |
|
int d0); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor* ggml_conv_1d_ph( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s, |
|
int d); |
|
|
|
GGML_API struct ggml_tensor * ggml_conv_transpose_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s0, |
|
int p0, |
|
int d0); |
|
|
|
GGML_API struct ggml_tensor * ggml_conv_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int s0, |
|
int s1, |
|
int p0, |
|
int p1, |
|
int d0, |
|
int d1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
int stride); |
|
|
|
enum ggml_op_pool { |
|
GGML_OP_POOL_MAX, |
|
GGML_OP_POOL_AVG, |
|
GGML_OP_POOL_COUNT, |
|
}; |
|
|
|
GGML_API struct ggml_tensor * ggml_pool_1d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_op_pool op, |
|
int k0, |
|
int s0, |
|
int p0); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_pool_2d( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_op_pool op, |
|
int k0, |
|
int k1, |
|
int s0, |
|
int s1, |
|
float p0, |
|
float p1); |
|
|
|
GGML_API struct ggml_tensor * ggml_pool_2d_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * af, |
|
enum ggml_op_pool op, |
|
int k0, |
|
int k1, |
|
int s0, |
|
int s1, |
|
float p0, |
|
float p1); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_upscale( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int scale_factor); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_upscale_ext( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int ne0, |
|
int ne1, |
|
int ne2, |
|
int ne3); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_pad( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int p0, |
|
int p1, |
|
int p2, |
|
int p3); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_timestep_embedding( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * timesteps, |
|
int dim, |
|
int max_period); |
|
|
|
|
|
enum ggml_sort_order { |
|
GGML_SORT_ORDER_ASC, |
|
GGML_SORT_ORDER_DESC, |
|
}; |
|
|
|
GGML_API struct ggml_tensor * ggml_argsort( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_sort_order order); |
|
|
|
GGML_API struct ggml_tensor * ggml_arange( |
|
struct ggml_context * ctx, |
|
float start, |
|
float stop, |
|
float step); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_top_k( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int k); |
|
|
|
#define GGML_KQ_MASK_PAD 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_flash_attn_ext( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * q, |
|
struct ggml_tensor * k, |
|
struct ggml_tensor * v, |
|
struct ggml_tensor * mask, |
|
float scale, |
|
float max_bias, |
|
float logit_softcap); |
|
|
|
GGML_API void ggml_flash_attn_ext_set_prec( |
|
struct ggml_tensor * a, |
|
enum ggml_prec prec); |
|
|
|
GGML_API enum ggml_prec ggml_flash_attn_ext_get_prec( |
|
const struct ggml_tensor * a); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_flash_attn_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * q, |
|
struct ggml_tensor * k, |
|
struct ggml_tensor * v, |
|
struct ggml_tensor * d, |
|
bool masked); |
|
|
|
GGML_API struct ggml_tensor * ggml_ssm_conv( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * sx, |
|
struct ggml_tensor * c); |
|
|
|
GGML_API struct ggml_tensor * ggml_ssm_scan( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * s, |
|
struct ggml_tensor * x, |
|
struct ggml_tensor * dt, |
|
struct ggml_tensor * A, |
|
struct ggml_tensor * B, |
|
struct ggml_tensor * C); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_win_part( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int w); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_win_unpart( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int w0, |
|
int h0, |
|
int w); |
|
|
|
GGML_API struct ggml_tensor * ggml_unary( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_unary_op op); |
|
|
|
GGML_API struct ggml_tensor * ggml_unary_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
enum ggml_unary_op op); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_get_rel_pos( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
int qh, |
|
int kh); |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_add_rel_pos( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * pw, |
|
struct ggml_tensor * ph); |
|
|
|
GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * pw, |
|
struct ggml_tensor * ph); |
|
|
|
GGML_API struct ggml_tensor * ggml_rwkv_wkv6( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * k, |
|
struct ggml_tensor * v, |
|
struct ggml_tensor * r, |
|
struct ggml_tensor * tf, |
|
struct ggml_tensor * td, |
|
struct ggml_tensor * state); |
|
|
|
|
|
|
|
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *); |
|
typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *); |
|
|
|
typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *); |
|
typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *); |
|
typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_unary_op_f32_t fun), |
|
"use ggml_map_custom1 instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_unary_op_f32_t fun), |
|
"use ggml_map_custom1_inplace instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_binary_op_f32_t fun), |
|
"use ggml_map_custom2 instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_binary_op_f32_t fun), |
|
"use ggml_map_custom2_inplace instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_custom1_op_f32_t fun), |
|
"use ggml_map_custom1 instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_custom1_op_f32_t fun), |
|
"use ggml_map_custom1_inplace instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_custom2_op_f32_t fun), |
|
"use ggml_map_custom2 instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_custom2_op_f32_t fun), |
|
"use ggml_map_custom2_inplace instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
ggml_custom3_op_f32_t fun), |
|
"use ggml_map_custom3 instead"); |
|
|
|
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
ggml_custom3_op_f32_t fun), |
|
"use ggml_map_custom3_inplace instead"); |
|
|
|
|
|
|
|
typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata); |
|
typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata); |
|
typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata); |
|
|
|
#define GGML_N_TASKS_MAX (-1) |
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom1( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_custom1_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom1_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
ggml_custom1_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom2( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_custom2_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom2_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
ggml_custom2_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom3( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
ggml_custom3_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
GGML_API struct ggml_tensor * ggml_map_custom3_inplace( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c, |
|
ggml_custom3_op_t fun, |
|
int n_tasks, |
|
void * userdata); |
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_cross_entropy_loss( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b); |
|
|
|
GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * b, |
|
struct ggml_tensor * c); |
|
|
|
|
|
|
|
|
|
GGML_API struct ggml_tensor * ggml_opt_step_adamw( |
|
struct ggml_context * ctx, |
|
struct ggml_tensor * a, |
|
struct ggml_tensor * grad, |
|
struct ggml_tensor * m, |
|
struct ggml_tensor * v, |
|
struct ggml_tensor * adamw_params); |
|
|
|
|
|
|
|
|
|
|
|
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); |
|
GGML_API void ggml_build_backward_expand( |
|
struct ggml_context * ctx_static, |
|
struct ggml_context * ctx_compute, |
|
struct ggml_cgraph * cgraph, |
|
bool accumulate); |
|
|
|
|
|
GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); |
|
GGML_API struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads); |
|
GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph); |
|
GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst); |
|
GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); |
|
GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph); |
|
|
|
GGML_API int ggml_graph_size (struct ggml_cgraph * cgraph); |
|
GGML_API struct ggml_tensor * ggml_graph_node (struct ggml_cgraph * cgraph, int i); |
|
GGML_API struct ggml_tensor ** ggml_graph_nodes (struct ggml_cgraph * cgraph); |
|
GGML_API int ggml_graph_n_nodes(struct ggml_cgraph * cgraph); |
|
|
|
GGML_API void ggml_graph_add_node(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); |
|
|
|
GGML_API size_t ggml_graph_overhead(void); |
|
GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads); |
|
|
|
GGML_API struct ggml_tensor * ggml_graph_get_tensor (const struct ggml_cgraph * cgraph, const char * name); |
|
GGML_API struct ggml_tensor * ggml_graph_get_grad (const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); |
|
GGML_API struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); |
|
|
|
GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname); |
|
GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval); |
|
|
|
|
|
GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); |
|
|
|
|
|
GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); |
|
|
|
|
|
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); |
|
|
|
|
|
|
|
GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data); |
|
|
|
GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API void ggml_quantize_init(enum ggml_type type); |
|
GGML_API void ggml_quantize_free(void); |
|
|
|
|
|
GGML_API bool ggml_quantize_requires_imatrix(enum ggml_type type); |
|
|
|
|
|
GGML_API size_t ggml_quantize_chunk( |
|
enum ggml_type type, |
|
const float * src, |
|
void * dst, |
|
int64_t start, |
|
int64_t nrows, |
|
int64_t n_per_row, |
|
const float * imatrix); |
|
|
|
|
|
|
|
|
|
|
|
enum gguf_type { |
|
GGUF_TYPE_UINT8 = 0, |
|
GGUF_TYPE_INT8 = 1, |
|
GGUF_TYPE_UINT16 = 2, |
|
GGUF_TYPE_INT16 = 3, |
|
GGUF_TYPE_UINT32 = 4, |
|
GGUF_TYPE_INT32 = 5, |
|
GGUF_TYPE_FLOAT32 = 6, |
|
GGUF_TYPE_BOOL = 7, |
|
GGUF_TYPE_STRING = 8, |
|
GGUF_TYPE_ARRAY = 9, |
|
GGUF_TYPE_UINT64 = 10, |
|
GGUF_TYPE_INT64 = 11, |
|
GGUF_TYPE_FLOAT64 = 12, |
|
GGUF_TYPE_COUNT, |
|
}; |
|
|
|
struct gguf_context; |
|
|
|
struct gguf_init_params { |
|
bool no_alloc; |
|
|
|
|
|
struct ggml_context ** ctx; |
|
}; |
|
|
|
GGML_API struct gguf_context * gguf_init_empty(void); |
|
GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); |
|
|
|
|
|
GGML_API void gguf_free(struct gguf_context * ctx); |
|
|
|
GGML_API const char * gguf_type_name(enum gguf_type type); |
|
|
|
GGML_API int gguf_get_version (const struct gguf_context * ctx); |
|
GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx); |
|
GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx); |
|
GGML_API void * gguf_get_data (const struct gguf_context * ctx); |
|
|
|
GGML_API int gguf_get_n_kv(const struct gguf_context * ctx); |
|
GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key); |
|
GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id); |
|
|
|
GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id); |
|
GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id); |
|
|
|
|
|
GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id); |
|
GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id); |
|
GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id); |
|
GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id); |
|
GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id); |
|
GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id); |
|
GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id); |
|
GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id); |
|
GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id); |
|
GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id); |
|
GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id); |
|
GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id); |
|
GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id); |
|
GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id); |
|
GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id); |
|
GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i); |
|
|
|
GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx); |
|
GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name); |
|
GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i); |
|
GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i); |
|
GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i); |
|
|
|
|
|
GGML_API void gguf_remove_key(struct gguf_context * ctx, const char * key); |
|
|
|
|
|
GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); |
|
GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); |
|
GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); |
|
GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); |
|
GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); |
|
GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); |
|
GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); |
|
GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val); |
|
GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val); |
|
GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val); |
|
GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); |
|
GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); |
|
GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n); |
|
GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n); |
|
|
|
|
|
GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); |
|
|
|
|
|
GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); |
|
GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); |
|
GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta); |
|
|
|
|
|
GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx); |
|
GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data); |
|
|
|
#ifdef __cplusplus |
|
|
|
#define GGML_RESTRICT |
|
#else |
|
#define GGML_RESTRICT restrict |
|
#endif |
|
typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); |
|
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); |
|
|
|
struct ggml_type_traits { |
|
const char * type_name; |
|
int64_t blck_size; |
|
int64_t blck_size_interleave; |
|
size_t type_size; |
|
bool is_quantized; |
|
ggml_to_float_t to_float; |
|
ggml_from_float_t from_float_ref; |
|
}; |
|
|
|
GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type); |
|
|
|
|
|
|
|
|
|
|
|
|
|
enum ggml_sched_priority { |
|
GGML_SCHED_PRIO_NORMAL, |
|
GGML_SCHED_PRIO_MEDIUM, |
|
GGML_SCHED_PRIO_HIGH, |
|
GGML_SCHED_PRIO_REALTIME |
|
}; |
|
|
|
|
|
|
|
struct ggml_threadpool_params { |
|
bool cpumask[GGML_MAX_N_THREADS]; |
|
int n_threads; |
|
enum ggml_sched_priority prio; |
|
uint32_t poll; |
|
bool strict_cpu; |
|
bool paused; |
|
}; |
|
|
|
struct ggml_threadpool; |
|
|
|
typedef struct ggml_threadpool * ggml_threadpool_t; |
|
|
|
GGML_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads); |
|
GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads); |
|
GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1); |
|
|
|
#ifdef __cplusplus |
|
} |
|
#endif |
|
|