ZeroWw commited on
Commit
5d079ef
1 Parent(s): 8aa2d72

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ L3-SthenoMaid-8B-V1.f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ L3-SthenoMaid-8B-V1.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
38
+ L3-SthenoMaid-8B-V1.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
39
+ L3-SthenoMaid-8B-V1.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
40
+ L3-SthenoMaid-8B-V1.q8_p.gguf filter=lfs diff=lfs merge=lfs -text
L3-SthenoMaid-8B-V1.f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bccc5c40e8eb8af9a0c66605796090290e9e974b6377479753af38e26156279c
3
+ size 16068890944
L3-SthenoMaid-8B-V1.q5_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1855b5840e9b06fac33aab89144d708bef8b679fc62958583c76676d548d69e
3
+ size 7042224448
L3-SthenoMaid-8B-V1.q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e4abcbc1195fb151d18c45bda3fc306c35a006759da4490d86e98ac67eae607
3
+ size 7835472192
L3-SthenoMaid-8B-V1.q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2005012eee8053e4112dc24d733a67b4c62f2e6e1cdb8e1dcd07f0e66163c3e
3
+ size 9525776704
L3-SthenoMaid-8B-V1.q8_p.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbf5607b480d0968d245a6f0d9f90a35e26506ddd4ceb0e18755aeb0f80ca875
3
+ size 8540770624
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: mit
4
+ language:
5
+ - en
6
+ ---
7
+
8
+ My own (ZeroWw) quantizations.
9
+ output and embed tensors quantized to f16.
10
+ all other tensors quantized to q5_k or q6_k.
11
+
12
+ Result:
13
+ both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
14
+ and they perform as well as the pure f16.
15
+
16
+ Updated on: Thu Jul 18, 10:07:00