ZeroWw commited on
Commit
0d70ad8
·
verified ·
1 Parent(s): f8eef0d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Phi3Unlocked.f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Phi3Unlocked.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Phi3Unlocked.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Phi3Unlocked.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Phi3Unlocked.q8_p.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Phi3Unlocked.q8q4.gguf filter=lfs diff=lfs merge=lfs -text
Phi3Unlocked.f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51e0d38e597b5af3a3ae3f21928509cdbc11ecd088d02113d13d07468bb3911e
3
+ size 7643296320
Phi3Unlocked.q5_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9c0ecfe10dc569d2df3cb796bdce3454bd8e8ea6fd8f9a63398d1db2b02ab78
3
+ size 3060757056
Phi3Unlocked.q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a55c95c7d155efedbb03dade1fe5bd7972825cfe77317c55035c12dcad4a45f3
3
+ size 3368251968
Phi3Unlocked.q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:648324136e551711f7486b522a9968f636702c58a4e65715a28b76dd1fce8568
3
+ size 4245910080
Phi3Unlocked.q8_p.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f34bb5c798502e7f80faeb328d941cb0233cf1d106fe840aafc6200ff27a54e4
3
+ size 4061221440
Phi3Unlocked.q8q4.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c433a0914107e008f34e45d81419f95567ce12ecef5df39ef50ad5e7e238b7
3
+ size 2466337344
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: mit
4
+ language:
5
+ - en
6
+ pipeline_tag: text-generation
7
+ ---
8
+
9
+ My own (ZeroWw) quantizations.
10
+ output and embed tensors quantized to f16.
11
+ all other tensors quantized to q5_k or q6_k.
12
+
13
+ Result:
14
+ both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
15
+ and they perform as well as the pure f16.
16
+
17
+ Updated on: Fri Oct 25, 14:14:40