ddh0 commited on
Commit
cdd97fe
1 Parent(s): b0d84a8

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +89 -0
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ pipeline_tag: text-generation
4
+ ---
5
+
6
+ # Meta-Llama-3-8B-Instruct-bf16-GGUF
7
+
8
+ This is [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct), converted to GGUF without changing tensor data type. Moreover, the new correct pre-tokenizer `llama-bpe` is used ([ref](https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2094991999)), and the EOS token is correctly set to `<|eot_id|>` ([ref](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/commit/a8977699a3d0820e80129fb3c93c20fbd9972c41)).
9
+
10
+ The `llama.cpp` output for this model is shown below for reference.
11
+
12
+ ```
13
+ Log start
14
+ main: build = 2842 (18e43766)
15
+ main: built with cc (Debian 12.2.0-14) 12.2.0 for x86_64-linux-gnu
16
+ main: seed = 1715355914
17
+ llama_model_loader: loaded meta data with 22 key-value pairs and 291 tensors from /media/dylan/SanDisk/LLMs/Meta-Llama-3-8B-Instruct-bf16.gguf (version GGUF V3 (latest))
18
+ llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
19
+ llama_model_loader: - kv 0: general.architecture str = llama
20
+ llama_model_loader: - kv 1: general.name str = Meta-Llama-3-8B-Instruct
21
+ llama_model_loader: - kv 2: llama.block_count u32 = 32
22
+ llama_model_loader: - kv 3: llama.context_length u32 = 8192
23
+ llama_model_loader: - kv 4: llama.embedding_length u32 = 4096
24
+ llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336
25
+ llama_model_loader: - kv 6: llama.attention.head_count u32 = 32
26
+ llama_model_loader: - kv 7: llama.attention.head_count_kv u32 = 8
27
+ llama_model_loader: - kv 8: llama.rope.freq_base f32 = 500000.000000
28
+ llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
29
+ llama_model_loader: - kv 10: general.file_type u32 = 32
30
+ llama_model_loader: - kv 11: llama.vocab_size u32 = 128256
31
+ llama_model_loader: - kv 12: llama.rope.dimension_count u32 = 128
32
+ llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2
33
+ llama_model_loader: - kv 14: tokenizer.ggml.pre str = llama-bpe
34
+ llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ...
35
+ llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
36
+ llama_model_loader: - kv 17: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
37
+ llama_model_loader: - kv 18: tokenizer.ggml.bos_token_id u32 = 128000
38
+ llama_model_loader: - kv 19: tokenizer.ggml.eos_token_id u32 = 128009
39
+ llama_model_loader: - kv 20: tokenizer.chat_template str = {% set loop_messages = messages %}{% ...
40
+ llama_model_loader: - kv 21: general.quantization_version u32 = 2
41
+ llama_model_loader: - type f32: 65 tensors
42
+ llama_model_loader: - type bf16: 226 tensors
43
+ llm_load_vocab: special tokens definition check successful ( 256/128256 ).
44
+ llm_load_print_meta: format = GGUF V3 (latest)
45
+ llm_load_print_meta: arch = llama
46
+ llm_load_print_meta: vocab type = BPE
47
+ llm_load_print_meta: n_vocab = 128256
48
+ llm_load_print_meta: n_merges = 280147
49
+ llm_load_print_meta: n_ctx_train = 8192
50
+ llm_load_print_meta: n_embd = 4096
51
+ llm_load_print_meta: n_head = 32
52
+ llm_load_print_meta: n_head_kv = 8
53
+ llm_load_print_meta: n_layer = 32
54
+ llm_load_print_meta: n_rot = 128
55
+ llm_load_print_meta: n_embd_head_k = 128
56
+ llm_load_print_meta: n_embd_head_v = 128
57
+ llm_load_print_meta: n_gqa = 4
58
+ llm_load_print_meta: n_embd_k_gqa = 1024
59
+ llm_load_print_meta: n_embd_v_gqa = 1024
60
+ llm_load_print_meta: f_norm_eps = 0.0e+00
61
+ llm_load_print_meta: f_norm_rms_eps = 1.0e-05
62
+ llm_load_print_meta: f_clamp_kqv = 0.0e+00
63
+ llm_load_print_meta: f_max_alibi_bias = 0.0e+00
64
+ llm_load_print_meta: f_logit_scale = 0.0e+00
65
+ llm_load_print_meta: n_ff = 14336
66
+ llm_load_print_meta: n_expert = 0
67
+ llm_load_print_meta: n_expert_used = 0
68
+ llm_load_print_meta: causal attn = 1
69
+ llm_load_print_meta: pooling type = 0
70
+ llm_load_print_meta: rope type = 0
71
+ llm_load_print_meta: rope scaling = linear
72
+ llm_load_print_meta: freq_base_train = 500000.0
73
+ llm_load_print_meta: freq_scale_train = 1
74
+ llm_load_print_meta: n_yarn_orig_ctx = 8192
75
+ llm_load_print_meta: rope_finetuned = unknown
76
+ llm_load_print_meta: ssm_d_conv = 0
77
+ llm_load_print_meta: ssm_d_inner = 0
78
+ llm_load_print_meta: ssm_d_state = 0
79
+ llm_load_print_meta: ssm_dt_rank = 0
80
+ llm_load_print_meta: model type = 8B
81
+ llm_load_print_meta: model ftype = BF16
82
+ llm_load_print_meta: model params = 8.03 B
83
+ llm_load_print_meta: model size = 14.96 GiB (16.00 BPW)
84
+ llm_load_print_meta: general.name = Meta-Llama-3-8B-Instruct
85
+ llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>'
86
+ llm_load_print_meta: EOS token = 128009 '<|eot_id|>'
87
+ llm_load_print_meta: LF token = 128 'Ä'
88
+ llm_load_print_meta: EOT token = 128009 '<|eot_id|>'
89
+ ```