|
llama_model_loader: loaded meta data with 28 key-value pairs and 508 tensors from gemma-2-27b-it-IMat-GGUF/gemma-2-27b-it.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = gemma2 |
|
llama_model_loader: - kv 1: general.name str = gemma-2-27b-it |
|
llama_model_loader: - kv 2: gemma2.context_length u32 = 8192 |
|
llama_model_loader: - kv 3: gemma2.embedding_length u32 = 4608 |
|
llama_model_loader: - kv 4: gemma2.block_count u32 = 46 |
|
llama_model_loader: - kv 5: gemma2.feed_forward_length u32 = 36864 |
|
llama_model_loader: - kv 6: gemma2.attention.head_count u32 = 32 |
|
llama_model_loader: - kv 7: gemma2.attention.head_count_kv u32 = 16 |
|
llama_model_loader: - kv 8: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001 |
|
llama_model_loader: - kv 9: gemma2.attention.key_length u32 = 128 |
|
llama_model_loader: - kv 10: gemma2.attention.value_length u32 = 128 |
|
llama_model_loader: - kv 11: general.file_type u32 = 7 |
|
llama_model_loader: - kv 12: gemma2.attn_logit_softcapping f32 = 50.000000 |
|
llama_model_loader: - kv 13: gemma2.final_logit_softcapping f32 = 30.000000 |
|
llama_model_loader: - kv 14: tokenizer.ggml.model str = llama |
|
llama_model_loader: - kv 15: tokenizer.ggml.pre str = default |
|
llama_model_loader: - kv 16: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ... |
|
llama_model_loader: - kv 17: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00... |
|
llama_model_loader: - kv 18: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, ... |
|
llama_model_loader: - kv 19: tokenizer.ggml.bos_token_id u32 = 2 |
|
llama_model_loader: - kv 20: tokenizer.ggml.eos_token_id u32 = 1 |
|
llama_model_loader: - kv 21: tokenizer.ggml.unknown_token_id u32 = 3 |
|
llama_model_loader: - kv 22: tokenizer.ggml.padding_token_id u32 = 0 |
|
llama_model_loader: - kv 23: tokenizer.ggml.add_bos_token bool = true |
|
llama_model_loader: - kv 24: tokenizer.ggml.add_eos_token bool = false |
|
llama_model_loader: - kv 25: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol... |
|
llama_model_loader: - kv 26: tokenizer.ggml.add_space_prefix bool = false |
|
llama_model_loader: - kv 27: general.quantization_version u32 = 2 |
|
llama_model_loader: - type f32: 185 tensors |
|
llama_model_loader: - type q8_0: 323 tensors |
|
llm_load_vocab: special tokens cache size = 261 |
|
llm_load_vocab: token to piece cache size = 1.6014 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = gemma2 |
|
llm_load_print_meta: vocab type = SPM |
|
llm_load_print_meta: n_vocab = 256000 |
|
llm_load_print_meta: n_merges = 0 |
|
llm_load_print_meta: n_ctx_train = 8192 |
|
llm_load_print_meta: n_embd = 4608 |
|
llm_load_print_meta: n_head = 32 |
|
llm_load_print_meta: n_head_kv = 16 |
|
llm_load_print_meta: n_layer = 46 |
|
llm_load_print_meta: n_rot = 144 |
|
llm_load_print_meta: n_embd_head_k = 128 |
|
llm_load_print_meta: n_embd_head_v = 128 |
|
llm_load_print_meta: n_gqa = 2 |
|
llm_load_print_meta: n_embd_k_gqa = 2048 |
|
llm_load_print_meta: n_embd_v_gqa = 2048 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-06 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 36864 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 2 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 10000.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 8192 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: model type = 27B |
|
llm_load_print_meta: model ftype = Q8_0 |
|
llm_load_print_meta: model params = 27.23 B |
|
llm_load_print_meta: model size = 26.94 GiB (8.50 BPW) |
|
llm_load_print_meta: general.name = gemma-2-27b-it |
|
llm_load_print_meta: BOS token = 2 '<bos>' |
|
llm_load_print_meta: EOS token = 1 '<eos>' |
|
llm_load_print_meta: UNK token = 3 '<unk>' |
|
llm_load_print_meta: PAD token = 0 '<pad>' |
|
llm_load_print_meta: LF token = 227 '<0x0A>' |
|
llm_load_print_meta: EOT token = 107 '<end_of_turn>' |
|
llm_load_print_meta: max token length = 93 |
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no |
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no |
|
ggml_cuda_init: found 1 CUDA devices: |
|
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes |
|
llm_load_tensors: ggml ctx size = 0.45 MiB |
|
llm_load_tensors: offloading 37 repeating layers to GPU |
|
llm_load_tensors: offloaded 37/47 layers to GPU |
|
llm_load_tensors: CPU buffer size = 27591.06 MiB |
|
llm_load_tensors: CUDA0 buffer size = 21231.35 MiB |
|
.............................................................................................. |
|
llama_new_context_with_model: n_ctx = 512 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 512 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 10000.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
llama_kv_cache_init: CUDA_Host KV buffer size = 36.00 MiB |
|
llama_kv_cache_init: CUDA0 KV buffer size = 148.00 MiB |
|
llama_new_context_with_model: KV self size = 184.00 MiB, K (f16): 92.00 MiB, V (f16): 92.00 MiB |
|
llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB |
|
llama_new_context_with_model: CUDA0 compute buffer size = 1704.31 MiB |
|
llama_new_context_with_model: CUDA_Host compute buffer size = 10.01 MiB |
|
llama_new_context_with_model: graph nodes = 1850 |
|
llama_new_context_with_model: graph splits = 121 |
|
|
|
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | |
|
compute_imatrix: tokenizing the input .. |
|
compute_imatrix: tokenization took 94.256 ms |
|
compute_imatrix: computing over 128 chunks with batch_size 512 |
|
compute_imatrix: 1.92 seconds per pass - ETA 4.08 minutes |
|
[1]12.2429,[2]6.2081,[3]5.2588,[4]6.2085,[5]6.7166,[6]7.2390,[7]7.6746,[8]8.1610,[9]8.5380, |
|
save_imatrix: stored collected data after 10 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[10]7.7409,[11]7.6231,[12]8.3069,[13]8.8175,[14]8.9820,[15]9.5963,[16]9.7448,[17]9.8649,[18]10.2368,[19]10.1102, |
|
save_imatrix: stored collected data after 20 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[20]10.2363,[21]10.9791,[22]10.9424,[23]10.8230,[24]11.0184,[25]10.9711,[26]10.8238,[27]11.0156,[28]11.1952,[29]11.2073, |
|
save_imatrix: stored collected data after 30 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[30]11.4849,[31]10.7086,[32]10.2419,[33]9.8871,[34]9.6029,[35]9.3962,[36]9.5205,[37]9.7559,[38]9.8816,[39]10.0292, |
|
save_imatrix: stored collected data after 40 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[40]10.1462,[41]10.2011,[42]10.6384,[43]10.9116,[44]11.2397,[45]11.4389,[46]11.2550,[47]11.0805,[48]11.2637,[49]11.4301, |
|
save_imatrix: stored collected data after 50 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[50]11.2782,[51]11.1894,[52]11.2327,[53]11.4036,[54]11.5967,[55]11.8117,[56]11.9157,[57]11.9185,[58]11.9380,[59]11.7810, |
|
save_imatrix: stored collected data after 60 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[60]11.6650,[61]11.5287,[62]11.4071,[63]11.4760,[64]11.5875,[65]11.4612,[66]11.4694,[67]11.4358,[68]11.4182,[69]11.3768, |
|
save_imatrix: stored collected data after 70 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[70]11.3167,[71]11.3025,[72]11.2862,[73]11.3380,[74]11.2888,[75]11.1897,[76]11.1629,[77]11.1599,[78]11.1381,[79]11.0667, |
|
save_imatrix: stored collected data after 80 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[80]11.1144,[81]11.1690,[82]11.1841,[83]11.2762,[84]11.2937,[85]11.1199,[86]11.0622,[87]10.9503,[88]10.9787,[89]10.9774, |
|
save_imatrix: stored collected data after 90 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[90]11.0439,[91]11.0167,[92]10.9757,[93]10.9302,[94]10.8609,[95]10.8306,[96]10.7705,[97]10.7326,[98]10.6794,[99]10.7099, |
|
save_imatrix: stored collected data after 100 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[100]10.7050,[101]10.8097,[102]10.8830,[103]10.9458,[104]11.0763,[105]11.1774,[106]11.1839,[107]11.1889,[108]11.1470,[109]11.1662, |
|
save_imatrix: stored collected data after 110 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[110]11.0690,[111]10.9087,[112]10.7355,[113]10.7965,[114]10.8346,[115]10.8238,[116]10.8008,[117]10.8403,[118]10.8670,[119]10.8845, |
|
save_imatrix: stored collected data after 120 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
[120]10.8804,[121]10.8749,[122]10.8379,[123]10.8537,[124]10.9275,[125]11.0106,[126]11.0989,[127]11.1369,[128]11.1788, |
|
save_imatrix: stored collected data after 128 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat |
|
|
|
llama_print_timings: load time = 4035.26 ms |
|
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: prompt eval time = 226486.47 ms / 65536 tokens ( 3.46 ms per token, 289.36 tokens per second) |
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: total time = 230157.77 ms / 65537 tokens |
|
|
|
Final estimate: PPL = 11.1788 +/- 0.20186 |
|
|