legraphista
commited on
Commit
•
32d83d1
1
Parent(s):
a887c33
Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +156 -0
imatrix.log
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
main: build = 3058 (30e238b2)
|
2 |
+
main: built with cc (Ubuntu 13.2.0-23ubuntu4) 13.2.0 for x86_64-linux-gnu
|
3 |
+
main: seed = 1717202742
|
4 |
+
llama_model_loader: additional 2 GGUFs metadata loaded.
|
5 |
+
llama_model_loader: loaded meta data with 28 key-value pairs and 723 tensors from K2-ckpt_360.Q8_0/K2-ckpt_360.Q8_0-00001-of-00003.gguf (version GGUF V3 (latest))
|
6 |
+
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
7 |
+
llama_model_loader: - kv 0: general.architecture str = llama
|
8 |
+
llama_model_loader: - kv 1: general.name str = K2-ckpt_360
|
9 |
+
llama_model_loader: - kv 2: llama.block_count u32 = 80
|
10 |
+
llama_model_loader: - kv 3: llama.context_length u32 = 2048
|
11 |
+
llama_model_loader: - kv 4: llama.embedding_length u32 = 8192
|
12 |
+
llama_model_loader: - kv 5: llama.feed_forward_length u32 = 22016
|
13 |
+
llama_model_loader: - kv 6: llama.attention.head_count u32 = 64
|
14 |
+
llama_model_loader: - kv 7: llama.attention.head_count_kv u32 = 64
|
15 |
+
llama_model_loader: - kv 8: llama.rope.freq_base f32 = 10000.000000
|
16 |
+
llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
|
17 |
+
llama_model_loader: - kv 10: general.file_type u32 = 7
|
18 |
+
llama_model_loader: - kv 11: llama.vocab_size u32 = 32032
|
19 |
+
llama_model_loader: - kv 12: llama.rope.dimension_count u32 = 128
|
20 |
+
llama_model_loader: - kv 13: tokenizer.ggml.model str = llama
|
21 |
+
llama_model_loader: - kv 14: tokenizer.ggml.pre str = default
|
22 |
+
llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,32032] = ["<unk>", "<s>", "</s>", "<0x00>", "<...
|
23 |
+
llama_model_loader: - kv 16: tokenizer.ggml.scores arr[f32,32032] = [0.000000, 0.000000, 0.000000, 0.0000...
|
24 |
+
llama_model_loader: - kv 17: tokenizer.ggml.token_type arr[i32,32032] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...
|
25 |
+
llama_model_loader: - kv 18: tokenizer.ggml.bos_token_id u32 = 2
|
26 |
+
llama_model_loader: - kv 19: tokenizer.ggml.eos_token_id u32 = 2
|
27 |
+
llama_model_loader: - kv 20: tokenizer.ggml.unknown_token_id u32 = 0
|
28 |
+
llama_model_loader: - kv 21: tokenizer.ggml.padding_token_id u32 = 0
|
29 |
+
llama_model_loader: - kv 22: tokenizer.ggml.add_bos_token bool = false
|
30 |
+
llama_model_loader: - kv 23: tokenizer.ggml.add_eos_token bool = false
|
31 |
+
llama_model_loader: - kv 24: general.quantization_version u32 = 2
|
32 |
+
llama_model_loader: - kv 25: split.no u16 = 0
|
33 |
+
llama_model_loader: - kv 26: split.count u16 = 3
|
34 |
+
llama_model_loader: - kv 27: split.tensors.count i32 = 723
|
35 |
+
llama_model_loader: - type f32: 161 tensors
|
36 |
+
llama_model_loader: - type q8_0: 562 tensors
|
37 |
+
llm_load_vocab: special tokens cache size = 291
|
38 |
+
llm_load_vocab: token to piece cache size = 0.3373 MB
|
39 |
+
llm_load_print_meta: format = GGUF V3 (latest)
|
40 |
+
llm_load_print_meta: arch = llama
|
41 |
+
llm_load_print_meta: vocab type = SPM
|
42 |
+
llm_load_print_meta: n_vocab = 32032
|
43 |
+
llm_load_print_meta: n_merges = 0
|
44 |
+
llm_load_print_meta: n_ctx_train = 2048
|
45 |
+
llm_load_print_meta: n_embd = 8192
|
46 |
+
llm_load_print_meta: n_head = 64
|
47 |
+
llm_load_print_meta: n_head_kv = 64
|
48 |
+
llm_load_print_meta: n_layer = 80
|
49 |
+
llm_load_print_meta: n_rot = 128
|
50 |
+
llm_load_print_meta: n_embd_head_k = 128
|
51 |
+
llm_load_print_meta: n_embd_head_v = 128
|
52 |
+
llm_load_print_meta: n_gqa = 1
|
53 |
+
llm_load_print_meta: n_embd_k_gqa = 8192
|
54 |
+
llm_load_print_meta: n_embd_v_gqa = 8192
|
55 |
+
llm_load_print_meta: f_norm_eps = 0.0e+00
|
56 |
+
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
|
57 |
+
llm_load_print_meta: f_clamp_kqv = 0.0e+00
|
58 |
+
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
|
59 |
+
llm_load_print_meta: f_logit_scale = 0.0e+00
|
60 |
+
llm_load_print_meta: n_ff = 22016
|
61 |
+
llm_load_print_meta: n_expert = 0
|
62 |
+
llm_load_print_meta: n_expert_used = 0
|
63 |
+
llm_load_print_meta: causal attn = 1
|
64 |
+
llm_load_print_meta: pooling type = 0
|
65 |
+
llm_load_print_meta: rope type = 0
|
66 |
+
llm_load_print_meta: rope scaling = linear
|
67 |
+
llm_load_print_meta: freq_base_train = 10000.0
|
68 |
+
llm_load_print_meta: freq_scale_train = 1
|
69 |
+
llm_load_print_meta: n_yarn_orig_ctx = 2048
|
70 |
+
llm_load_print_meta: rope_finetuned = unknown
|
71 |
+
llm_load_print_meta: ssm_d_conv = 0
|
72 |
+
llm_load_print_meta: ssm_d_inner = 0
|
73 |
+
llm_load_print_meta: ssm_d_state = 0
|
74 |
+
llm_load_print_meta: ssm_dt_rank = 0
|
75 |
+
llm_load_print_meta: model type = 65B
|
76 |
+
llm_load_print_meta: model ftype = Q8_0
|
77 |
+
llm_load_print_meta: model params = 65.29 B
|
78 |
+
llm_load_print_meta: model size = 64.61 GiB (8.50 BPW)
|
79 |
+
llm_load_print_meta: general.name = K2-ckpt_360
|
80 |
+
llm_load_print_meta: BOS token = 2 '</s>'
|
81 |
+
llm_load_print_meta: EOS token = 2 '</s>'
|
82 |
+
llm_load_print_meta: UNK token = 0 '<unk>'
|
83 |
+
llm_load_print_meta: PAD token = 0 '<unk>'
|
84 |
+
llm_load_print_meta: LF token = 13 '<0x0A>'
|
85 |
+
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
86 |
+
ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes
|
87 |
+
ggml_cuda_init: found 1 CUDA devices:
|
88 |
+
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes
|
89 |
+
llm_load_tensors: ggml ctx size = 0.74 MiB
|
90 |
+
llm_load_tensors: offloading 20 repeating layers to GPU
|
91 |
+
llm_load_tensors: offloaded 20/81 layers to GPU
|
92 |
+
llm_load_tensors: CPU buffer size = 24509.70 MiB
|
93 |
+
llm_load_tensors: CPU buffer size = 24426.62 MiB
|
94 |
+
llm_load_tensors: CPU buffer size = 17220.48 MiB
|
95 |
+
llm_load_tensors: CUDA0 buffer size = 16406.25 MiB
|
96 |
+
....................................................................................................
|
97 |
+
llama_new_context_with_model: n_ctx = 512
|
98 |
+
llama_new_context_with_model: n_batch = 512
|
99 |
+
llama_new_context_with_model: n_ubatch = 512
|
100 |
+
llama_new_context_with_model: flash_attn = 0
|
101 |
+
llama_new_context_with_model: freq_base = 10000.0
|
102 |
+
llama_new_context_with_model: freq_scale = 1
|
103 |
+
llama_kv_cache_init: CUDA_Host KV buffer size = 960.00 MiB
|
104 |
+
llama_kv_cache_init: CUDA0 KV buffer size = 320.00 MiB
|
105 |
+
llama_new_context_with_model: KV self size = 1280.00 MiB, K (f16): 640.00 MiB, V (f16): 640.00 MiB
|
106 |
+
llama_new_context_with_model: CUDA_Host output buffer size = 0.12 MiB
|
107 |
+
llama_new_context_with_model: CUDA0 compute buffer size = 344.45 MiB
|
108 |
+
llama_new_context_with_model: CUDA_Host compute buffer size = 33.01 MiB
|
109 |
+
llama_new_context_with_model: graph nodes = 2566
|
110 |
+
llama_new_context_with_model: graph splits = 664
|
111 |
+
|
112 |
+
system_info: n_threads = 32 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
113 |
+
compute_imatrix: tokenizing the input ..
|
114 |
+
compute_imatrix: tokenization took 90.613 ms
|
115 |
+
compute_imatrix: computing over 151 chunks with batch_size 512
|
116 |
+
compute_imatrix: 54.64 seconds per pass - ETA 2 hours 17.50 minutes
|
117 |
+
[1]2.5539,[2]2.1725,[3]2.2401,[4]2.3300,[5]2.6606,[6]2.6351,[7]2.4401,[8]2.7931,[9]2.9649,
|
118 |
+
save_imatrix: stored collected data after 10 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
119 |
+
[10]3.1688,[11]3.2642,[12]3.0681,[13]3.1229,[14]3.3242,[15]3.6052,[16]3.7343,[17]3.9537,[18]4.1038,[19]4.2571,
|
120 |
+
save_imatrix: stored collected data after 20 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
121 |
+
[20]4.3761,[21]4.4609,[22]4.3091,[23]4.1686,[24]4.1532,[25]4.1627,[26]4.1712,[27]4.1575,[28]4.2221,[29]4.3438,
|
122 |
+
save_imatrix: stored collected data after 30 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
123 |
+
[30]4.4658,[31]4.4285,[32]4.4013,[33]4.4066,[34]4.4415,[35]4.4515,[36]4.4150,[37]4.2677,[38]4.1633,[39]4.1454,
|
124 |
+
save_imatrix: stored collected data after 40 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
125 |
+
[40]4.1309,[41]4.1052,[42]4.1051,[43]4.0700,[44]4.0656,[45]4.0514,[46]4.0371,[47]4.0569,[48]4.1215,[49]4.1808,
|
126 |
+
save_imatrix: stored collected data after 50 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
127 |
+
[50]4.2053,[51]4.2157,[52]4.2462,[53]4.3301,[54]4.3998,[55]4.4450,[56]4.4333,[57]4.4086,[58]4.4343,[59]4.4672,
|
128 |
+
save_imatrix: stored collected data after 60 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
129 |
+
[60]4.5250,[61]4.4823,[62]4.5074,[63]4.5355,[64]4.5937,[65]4.6457,[66]4.6640,[67]4.7075,[68]4.7445,[69]4.7587,
|
130 |
+
save_imatrix: stored collected data after 70 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
131 |
+
[70]4.7712,[71]4.7811,[72]4.7778,[73]4.7534,[74]4.7181,[75]4.7263,[76]4.7284,[77]4.7388,[78]4.7089,[79]4.7126,
|
132 |
+
save_imatrix: stored collected data after 80 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
133 |
+
[80]4.7190,[81]4.6978,[82]4.6902,[83]4.6675,[84]4.6722,[85]4.6693,[86]4.6627,[87]4.6610,[88]4.6630,[89]4.6458,
|
134 |
+
save_imatrix: stored collected data after 90 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
135 |
+
[90]4.6360,[91]4.6361,[92]4.6215,[93]4.6015,[94]4.5871,[95]4.5457,[96]4.5578,[97]4.5484,[98]4.5443,[99]4.5292,
|
136 |
+
save_imatrix: stored collected data after 100 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
137 |
+
[100]4.5199,[101]4.5321,[102]4.5079,[103]4.4911,[104]4.4818,[105]4.4947,[106]4.5010,[107]4.5164,[108]4.5313,[109]4.4987,
|
138 |
+
save_imatrix: stored collected data after 110 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
139 |
+
[110]4.4669,[111]4.4338,[112]4.4028,[113]4.3726,[114]4.3421,[115]4.3136,[116]4.2847,[117]4.2740,[118]4.2853,[119]4.2989,
|
140 |
+
save_imatrix: stored collected data after 120 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
141 |
+
[120]4.3347,[121]4.3653,[122]4.4021,[123]4.4374,[124]4.4908,[125]4.5376,[126]4.5504,[127]4.5590,[128]4.5281,[129]4.5314,
|
142 |
+
save_imatrix: stored collected data after 130 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
143 |
+
[130]4.5324,[131]4.5179,[132]4.4969,[133]4.4791,[134]4.4977,[135]4.5185,[136]4.5240,[137]4.5240,[138]4.5372,[139]4.5527,
|
144 |
+
save_imatrix: stored collected data after 140 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
145 |
+
[140]4.5666,[141]4.5727,[142]4.5798,[143]4.5811,[144]4.5694,[145]4.5811,[146]4.5913,[147]4.5950,[148]4.6098,[149]4.6184,
|
146 |
+
save_imatrix: stored collected data after 150 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
147 |
+
[150]4.6283,[151]4.6390,
|
148 |
+
save_imatrix: stored collected data after 151 chunks in K2-ckpt_360-IMat-GGUF/imatrix.dat
|
149 |
+
|
150 |
+
llama_print_timings: load time = 80625.37 ms
|
151 |
+
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
152 |
+
llama_print_timings: prompt eval time = 5710379.48 ms / 77312 tokens ( 73.86 ms per token, 13.54 tokens per second)
|
153 |
+
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
154 |
+
llama_print_timings: total time = 5750793.99 ms / 77313 tokens
|
155 |
+
|
156 |
+
Final estimate: PPL = 4.6390 +/- 0.05047
|