Upload folder using huggingface_hub
Browse files- .gitattributes +4 -0
- Meta-Llama-3-8B-Instruct.f16.gguf +3 -0
- Meta-Llama-3-8B-Instruct.q5_k.gguf +3 -0
- Meta-Llama-3-8B-Instruct.q6_k.gguf +3 -0
- Meta-Llama-3-8B-Instruct.q8_0.gguf +3 -0
- README.md +16 -0
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Meta-Llama-3-8B-Instruct.f16.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
Meta-Llama-3-8B-Instruct.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
Meta-Llama-3-8B-Instruct.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
Meta-Llama-3-8B-Instruct.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
Meta-Llama-3-8B-Instruct.f16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2296999531d6120801529a45b1d103f7370c5970be939ebfc2ba5d0833e9e1e
|
3 |
+
size 16068890912
|
Meta-Llama-3-8B-Instruct.q5_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e41764bc2e639fa8dd084b087ec66cf0f9d06d5999a7434ea1dd10f0fca71246
|
3 |
+
size 7042224416
|
Meta-Llama-3-8B-Instruct.q6_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89e45f8e321498289c758ded6e443a1a4faeb2ac7595107b3d4102a54f1c988c
|
3 |
+
size 7835472160
|
Meta-Llama-3-8B-Instruct.q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31ac180019ceefdee35c490698d1f479745722fb9d38ab6feb02a161377672ab
|
3 |
+
size 9525776672
|
README.md
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: mit
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
---
|
7 |
+
|
8 |
+
My own (ZeroWw) quantizations.
|
9 |
+
output and embed tensors quantized to f16.
|
10 |
+
all other tensors quantized to q5_k or q6_k.
|
11 |
+
|
12 |
+
Result:
|
13 |
+
both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
|
14 |
+
and they perform as well as the pure f16.
|
15 |
+
|
16 |
+
Updated on: Wed Jul 10, 03:23:42
|