zephyr7b / zephyr_f32_int3.txt
TobDeBer's picture
descriptions
bd7bbd3
(dream) tb@IBM-PF38WZKF:~/funstreams/AI$ ./llama.cpp/quantize zephyr_f32.gguf Q3_K
main: build = 1798 (128de35)
main: built with cc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0 for x86_64-linux-gnu
main: quantizing 'zephyr_f32.gguf' to 'ggml-model-Q3_K.gguf' as Q3_K
llama_model_loader: loaded meta data with 22 key-value pairs and 291 tensors from zephyr_f32.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = llama
llama_model_loader: - kv 1: general.name str = .
llama_model_loader: - kv 2: llama.context_length u32 = 32768
llama_model_loader: - kv 3: llama.embedding_length u32 = 4096
llama_model_loader: - kv 4: llama.block_count u32 = 32
llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336
llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128
llama_model_loader: - kv 7: llama.attention.head_count u32 = 32
llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8
llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000
llama_model_loader: - kv 11: general.file_type u32 = 0
llama_model_loader: - kv 12: tokenizer.ggml.model str = llama
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32000] = ["<unk>", "<s>", "</s>", "<0x00>", "<...
llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...
llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
llama_model_loader: - kv 16: tokenizer.ggml.merges arr[str,58980] = ["▁ t", "i n", "e r", "▁ a", "h e...
llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 1
llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 2
llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 0
llama_model_loader: - kv 20: tokenizer.ggml.padding_token_id u32 = 2
llama_model_loader: - kv 21: tokenizer.chat_template str = {% for message in messages %}\n{% if m...
llama_model_loader: - type f32: 291 tensors
llama_model_quantize_internal: meta size = 1671648 bytes
[ 1/ 291] token_embd.weight - [ 4096, 32000, 1, 1], type = f32, quantizing to q3_K .. size = 500.00 MiB -> 53.71 MiB | hist:
[ 2/ 291] blk.0.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 3/ 291] blk.0.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q5_K .. size = 224.00 MiB -> 38.50 MiB | hist:
[ 4/ 291] blk.0.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 5/ 291] blk.0.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 6/ 291] blk.0.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 7/ 291] blk.0.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 8/ 291] blk.0.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 9/ 291] blk.0.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 10/ 291] blk.0.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q5_K .. size = 16.00 MiB -> 2.75 MiB | hist:
[ 11/ 291] blk.1.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 12/ 291] blk.1.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q5_K .. size = 224.00 MiB -> 38.50 MiB | hist:
[ 13/ 291] blk.1.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 14/ 291] blk.1.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 15/ 291] blk.1.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 16/ 291] blk.1.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 17/ 291] blk.1.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 18/ 291] blk.1.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 19/ 291] blk.1.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q5_K .. size = 16.00 MiB -> 2.75 MiB | hist:
[ 20/ 291] blk.2.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 21/ 291] blk.2.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 22/ 291] blk.2.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 23/ 291] blk.2.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 24/ 291] blk.2.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 25/ 291] blk.2.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 26/ 291] blk.2.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 27/ 291] blk.2.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 28/ 291] blk.2.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 29/ 291] blk.3.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 30/ 291] blk.3.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 31/ 291] blk.3.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 32/ 291] blk.3.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 33/ 291] blk.3.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 34/ 291] blk.3.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 35/ 291] blk.3.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 36/ 291] blk.3.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 37/ 291] blk.3.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 38/ 291] blk.4.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 39/ 291] blk.4.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 40/ 291] blk.4.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 41/ 291] blk.4.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 42/ 291] blk.4.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 43/ 291] blk.4.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 44/ 291] blk.4.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 45/ 291] blk.4.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 46/ 291] blk.4.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 47/ 291] blk.5.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 48/ 291] blk.5.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 49/ 291] blk.5.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 50/ 291] blk.5.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 51/ 291] blk.5.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 52/ 291] blk.5.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 53/ 291] blk.5.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 54/ 291] blk.5.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 55/ 291] blk.5.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 56/ 291] blk.6.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 57/ 291] blk.6.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 58/ 291] blk.6.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 59/ 291] blk.6.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 60/ 291] blk.6.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 61/ 291] blk.6.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 62/ 291] blk.6.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 63/ 291] blk.6.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 64/ 291] blk.6.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 65/ 291] blk.7.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 66/ 291] blk.7.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 67/ 291] blk.7.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 68/ 291] blk.7.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 69/ 291] blk.7.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 70/ 291] blk.7.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 71/ 291] blk.7.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 72/ 291] blk.7.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 73/ 291] blk.7.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 74/ 291] blk.8.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 75/ 291] blk.8.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 76/ 291] blk.8.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 77/ 291] blk.8.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 78/ 291] blk.10.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 79/ 291] blk.10.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 80/ 291] blk.10.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 81/ 291] blk.10.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 82/ 291] blk.10.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 83/ 291] blk.10.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 84/ 291] blk.10.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 85/ 291] blk.10.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 86/ 291] blk.10.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 87/ 291] blk.11.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 88/ 291] blk.11.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 89/ 291] blk.11.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 90/ 291] blk.11.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 91/ 291] blk.11.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 92/ 291] blk.11.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 93/ 291] blk.11.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 94/ 291] blk.11.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 95/ 291] blk.11.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 96/ 291] blk.12.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 97/ 291] blk.12.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 98/ 291] blk.12.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 99/ 291] blk.12.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 100/ 291] blk.12.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 101/ 291] blk.12.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 102/ 291] blk.8.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 103/ 291] blk.8.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 104/ 291] blk.8.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 105/ 291] blk.8.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 106/ 291] blk.8.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 107/ 291] blk.9.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 108/ 291] blk.9.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 109/ 291] blk.9.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 110/ 291] blk.9.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 111/ 291] blk.9.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 112/ 291] blk.9.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 113/ 291] blk.9.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 114/ 291] blk.9.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 115/ 291] blk.9.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 116/ 291] blk.12.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 117/ 291] blk.12.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 118/ 291] blk.12.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 119/ 291] blk.13.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 120/ 291] blk.13.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 121/ 291] blk.13.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 122/ 291] blk.13.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 123/ 291] blk.13.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 124/ 291] blk.13.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 125/ 291] blk.13.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 126/ 291] blk.13.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 127/ 291] blk.13.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 128/ 291] blk.14.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 129/ 291] blk.14.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 130/ 291] blk.14.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 131/ 291] blk.14.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 132/ 291] blk.14.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 133/ 291] blk.14.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 134/ 291] blk.14.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 135/ 291] blk.14.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 136/ 291] blk.14.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 137/ 291] blk.15.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 138/ 291] blk.15.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 139/ 291] blk.15.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 140/ 291] blk.15.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 141/ 291] blk.15.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 142/ 291] blk.15.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 143/ 291] blk.15.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 144/ 291] blk.15.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 145/ 291] blk.15.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 146/ 291] blk.16.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 147/ 291] blk.16.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 148/ 291] blk.16.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 149/ 291] blk.16.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 150/ 291] blk.16.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 151/ 291] blk.16.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 152/ 291] blk.16.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 153/ 291] blk.16.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 154/ 291] blk.16.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 155/ 291] blk.17.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 156/ 291] blk.17.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 157/ 291] blk.17.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 158/ 291] blk.17.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 159/ 291] blk.17.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 160/ 291] blk.17.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 161/ 291] blk.17.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 162/ 291] blk.17.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 163/ 291] blk.17.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 164/ 291] blk.18.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 165/ 291] blk.18.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 166/ 291] blk.18.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 167/ 291] blk.18.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 168/ 291] blk.18.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 169/ 291] blk.18.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 170/ 291] blk.18.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 171/ 291] blk.18.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 172/ 291] blk.18.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 173/ 291] blk.19.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 174/ 291] blk.19.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 175/ 291] blk.19.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 176/ 291] blk.19.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 177/ 291] blk.19.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 178/ 291] blk.19.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 179/ 291] blk.19.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 180/ 291] blk.19.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 181/ 291] blk.19.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 182/ 291] blk.20.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 183/ 291] blk.20.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 184/ 291] blk.20.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 185/ 291] blk.20.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 186/ 291] blk.20.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 187/ 291] blk.20.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 188/ 291] blk.20.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 189/ 291] blk.20.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 190/ 291] blk.20.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 191/ 291] blk.21.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 192/ 291] blk.21.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 193/ 291] blk.21.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 194/ 291] blk.21.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 195/ 291] blk.21.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 196/ 291] blk.21.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 197/ 291] blk.21.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 198/ 291] blk.21.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 199/ 291] blk.21.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 200/ 291] blk.22.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 201/ 291] blk.22.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 202/ 291] blk.22.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 203/ 291] blk.22.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 204/ 291] blk.22.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 205/ 291] blk.22.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 206/ 291] blk.22.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 207/ 291] blk.22.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 208/ 291] blk.22.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 209/ 291] blk.23.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 210/ 291] blk.23.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 211/ 291] blk.23.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 212/ 291] blk.23.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 213/ 291] blk.23.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 214/ 291] blk.23.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 215/ 291] blk.23.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 216/ 291] blk.23.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 217/ 291] blk.23.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 218/ 291] blk.24.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 219/ 291] blk.24.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 220/ 291] blk.24.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 221/ 291] blk.24.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 222/ 291] blk.24.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 223/ 291] blk.24.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 224/ 291] blk.24.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 225/ 291] blk.24.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 226/ 291] blk.24.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 227/ 291] blk.25.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 228/ 291] blk.25.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 229/ 291] blk.25.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 230/ 291] blk.25.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 231/ 291] blk.25.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 232/ 291] blk.25.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 233/ 291] blk.25.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 234/ 291] blk.25.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 235/ 291] blk.25.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 236/ 291] blk.26.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 237/ 291] blk.26.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 238/ 291] blk.26.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 239/ 291] blk.26.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 240/ 291] blk.26.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 241/ 291] blk.26.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 242/ 291] blk.26.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 243/ 291] blk.26.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 244/ 291] blk.26.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 245/ 291] blk.27.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 246/ 291] blk.27.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 247/ 291] blk.27.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 248/ 291] blk.27.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 249/ 291] blk.27.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 250/ 291] blk.27.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 251/ 291] blk.27.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 252/ 291] blk.27.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 253/ 291] blk.27.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 254/ 291] blk.28.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 255/ 291] blk.28.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 256/ 291] blk.28.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 257/ 291] blk.28.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 258/ 291] blk.28.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 259/ 291] blk.28.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 260/ 291] blk.28.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 261/ 291] blk.28.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 262/ 291] blk.28.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 263/ 291] blk.29.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 264/ 291] blk.29.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 265/ 291] blk.29.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 266/ 291] blk.29.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 267/ 291] blk.29.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 268/ 291] blk.29.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 269/ 291] blk.29.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 270/ 291] blk.29.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 271/ 291] blk.29.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 272/ 291] blk.30.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 273/ 291] blk.30.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 274/ 291] blk.30.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 275/ 291] blk.30.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 276/ 291] blk.30.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 277/ 291] blk.30.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 278/ 291] output.weight - [ 4096, 32000, 1, 1], type = f32, quantizing to q6_K .. size = 500.00 MiB -> 102.54 MiB | hist:
[ 279/ 291] blk.30.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 280/ 291] blk.30.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 281/ 291] blk.30.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 282/ 291] blk.31.attn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 283/ 291] blk.31.ffn_down.weight - [14336, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 224.00 MiB -> 31.50 MiB | hist:
[ 284/ 291] blk.31.ffn_gate.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 285/ 291] blk.31.ffn_up.weight - [ 4096, 14336, 1, 1], type = f32, quantizing to q3_K .. size = 224.00 MiB -> 24.06 MiB | hist:
[ 286/ 291] blk.31.ffn_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
[ 287/ 291] blk.31.attn_k.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q3_K .. size = 16.00 MiB -> 1.72 MiB | hist:
[ 288/ 291] blk.31.attn_output.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q4_K .. size = 64.00 MiB -> 9.00 MiB | hist:
[ 289/ 291] blk.31.attn_q.weight - [ 4096, 4096, 1, 1], type = f32, quantizing to q3_K .. size = 64.00 MiB -> 6.88 MiB | hist:
[ 290/ 291] blk.31.attn_v.weight - [ 4096, 1024, 1, 1], type = f32, quantizing to q4_K .. size = 16.00 MiB -> 2.25 MiB | hist:
[ 291/ 291] output_norm.weight - [ 4096, 1, 1, 1], type = f32, size = 0.016 MB
llama_model_quantize_internal: model size = 27625.02 MB
llama_model_quantize_internal: quant size = 3355.27 MB
main: quantize time = 368785.09 ms
main: total time = 368785.09 ms