ZeroWw commited on
Commit
ba613c0
1 Parent(s): a94124d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Llama-3-8B-Instruct-Gradient-1048k.f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Llama-3-8B-Instruct-Gradient-1048k.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Llama-3-8B-Instruct-Gradient-1048k.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Llama-3-8B-Instruct-Gradient-1048k.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Llama-3-8B-Instruct-Gradient-1048k.f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14306135b6d8afa069d3604f0386fe9bf4d2070589a56cd41168145f5e93115e
3
+ size 16068890912
Llama-3-8B-Instruct-Gradient-1048k.q5_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7a47142439baf7b4c8c58237e352fd91b306a60650fd3e6d748c9053e32ad86
3
+ size 7042224416
Llama-3-8B-Instruct-Gradient-1048k.q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:455467581d725df8b947a7b568d1a7f0cf37a22884696d3fa9b2c91a74ae5f09
3
+ size 7835472160
Llama-3-8B-Instruct-Gradient-1048k.q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7b17338af5bdaf825cf478ad5b1a69ef32f9a17c8561843afff2f389708ffb
3
+ size 9525776672
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: mit
4
+ language:
5
+ - en
6
+ ---
7
+
8
+ My own (ZeroWw) quantizations.
9
+ output and embed tensors quantized to f16.
10
+ all other tensors quantized to q5_k or q6_k.
11
+
12
+ Result:
13
+ both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
14
+ and they perform as well as the pure f16.