ddh0 commited on
Commit
3ba8e8d
1 Parent(s): e0e725f

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +88 -0
README.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ pipeline_tag: text-generation
4
+ ---
5
+ # Meta-Llama-3-8B-Instruct-bf16-GGUF
6
+
7
+ This is [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct), converted to GGUF without changing tensor data type. Moreover, the new correct pre-tokenizer `llama-bpe` is used ([ref](https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2094991999)), and the EOS token is correctly set to `<|eot_id|>` ([ref](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/commit/a8977699a3d0820e80129fb3c93c20fbd9972c41)).
8
+
9
+ The `llama.cpp` output for this model is shown below for reference.
10
+
11
+ ```
12
+ Log start
13
+ main: build = 2842 (18e43766)
14
+ main: built with cc (Debian 12.2.0-14) 12.2.0 for x86_64-linux-gnu
15
+ main: seed = 1715361717
16
+ llama_model_loader: loaded meta data with 22 key-value pairs and 723 tensors from /media/dylan/SanDisk/LLMs/Meta-Llama-3-70B-Instruct-bf16.gguf (version GGUF V3 (latest))
17
+ llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
18
+ llama_model_loader: - kv 0: general.architecture str = llama
19
+ llama_model_loader: - kv 1: general.name str = Meta-Llama-3-70B-Instruct
20
+ llama_model_loader: - kv 2: llama.block_count u32 = 80
21
+ llama_model_loader: - kv 3: llama.context_length u32 = 8192
22
+ llama_model_loader: - kv 4: llama.embedding_length u32 = 8192
23
+ llama_model_loader: - kv 5: llama.feed_forward_length u32 = 28672
24
+ llama_model_loader: - kv 6: llama.attention.head_count u32 = 64
25
+ llama_model_loader: - kv 7: llama.attention.head_count_kv u32 = 8
26
+ llama_model_loader: - kv 8: llama.rope.freq_base f32 = 500000.000000
27
+ llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010
28
+ llama_model_loader: - kv 10: general.file_type u32 = 32
29
+ llama_model_loader: - kv 11: llama.vocab_size u32 = 128256
30
+ llama_model_loader: - kv 12: llama.rope.dimension_count u32 = 128
31
+ llama_model_loader: - kv 13: tokenizer.ggml.model str = gpt2
32
+ llama_model_loader: - kv 14: tokenizer.ggml.pre str = llama-bpe
33
+ llama_model_loader: - kv 15: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ...
34
+ llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
35
+ llama_model_loader: - kv 17: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "...
36
+ llama_model_loader: - kv 18: tokenizer.ggml.bos_token_id u32 = 128000
37
+ llama_model_loader: - kv 19: tokenizer.ggml.eos_token_id u32 = 128009
38
+ llama_model_loader: - kv 20: tokenizer.chat_template str = {% set loop_messages = messages %}{% ...
39
+ llama_model_loader: - kv 21: general.quantization_version u32 = 2
40
+ llama_model_loader: - type f32: 161 tensors
41
+ llama_model_loader: - type bf16: 562 tensors
42
+ llm_load_vocab: special tokens definition check successful ( 256/128256 ).
43
+ llm_load_print_meta: format = GGUF V3 (latest)
44
+ llm_load_print_meta: arch = llama
45
+ llm_load_print_meta: vocab type = BPE
46
+ llm_load_print_meta: n_vocab = 128256
47
+ llm_load_print_meta: n_merges = 280147
48
+ llm_load_print_meta: n_ctx_train = 8192
49
+ llm_load_print_meta: n_embd = 8192
50
+ llm_load_print_meta: n_head = 64
51
+ llm_load_print_meta: n_head_kv = 8
52
+ llm_load_print_meta: n_layer = 80
53
+ llm_load_print_meta: n_rot = 128
54
+ llm_load_print_meta: n_embd_head_k = 128
55
+ llm_load_print_meta: n_embd_head_v = 128
56
+ llm_load_print_meta: n_gqa = 8
57
+ llm_load_print_meta: n_embd_k_gqa = 1024
58
+ llm_load_print_meta: n_embd_v_gqa = 1024
59
+ llm_load_print_meta: f_norm_eps = 0.0e+00
60
+ llm_load_print_meta: f_norm_rms_eps = 1.0e-05
61
+ llm_load_print_meta: f_clamp_kqv = 0.0e+00
62
+ llm_load_print_meta: f_max_alibi_bias = 0.0e+00
63
+ llm_load_print_meta: f_logit_scale = 0.0e+00
64
+ llm_load_print_meta: n_ff = 28672
65
+ llm_load_print_meta: n_expert = 0
66
+ llm_load_print_meta: n_expert_used = 0
67
+ llm_load_print_meta: causal attn = 1
68
+ llm_load_print_meta: pooling type = 0
69
+ llm_load_print_meta: rope type = 0
70
+ llm_load_print_meta: rope scaling = linear
71
+ llm_load_print_meta: freq_base_train = 500000.0
72
+ llm_load_print_meta: freq_scale_train = 1
73
+ llm_load_print_meta: n_yarn_orig_ctx = 8192
74
+ llm_load_print_meta: rope_finetuned = unknown
75
+ llm_load_print_meta: ssm_d_conv = 0
76
+ llm_load_print_meta: ssm_d_inner = 0
77
+ llm_load_print_meta: ssm_d_state = 0
78
+ llm_load_print_meta: ssm_dt_rank = 0
79
+ llm_load_print_meta: model type = 70B
80
+ llm_load_print_meta: model ftype = BF16
81
+ llm_load_print_meta: model params = 70.55 B
82
+ llm_load_print_meta: model size = 131.42 GiB (16.00 BPW)
83
+ llm_load_print_meta: general.name = Meta-Llama-3-70B-Instruct
84
+ llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>'
85
+ llm_load_print_meta: EOS token = 128009 '<|eot_id|>'
86
+ llm_load_print_meta: LF token = 128 'Ä'
87
+ llm_load_print_meta: EOT token = 128009 '<|eot_id|>'
88
+ ```