Upload folder using huggingface_hub
Browse files
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
NeuralDaredevil-8B-abliterated.f16.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
NeuralDaredevil-8B-abliterated.q5_k.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
NeuralDaredevil-8B-abliterated.q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
NeuralDaredevil-8B-abliterated.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
NeuralDaredevil-8B-abliterated.f16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6fa29cd7ba09817d08545d2376fc8575919a80657b4e39e2f29b967724e22e7
|
3 |
+
size 16068890912
|
NeuralDaredevil-8B-abliterated.q5_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7418562c9947e2bb38159252cf2659690ea99bd8c7ac912d652cd23bd8888559
|
3 |
+
size 7042224416
|
NeuralDaredevil-8B-abliterated.q6_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e772a22defa217c39613c98dad2d39164089ed49fb210ff245e03bcfc0ddcda4
|
3 |
+
size 7835472160
|
NeuralDaredevil-8B-abliterated.q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:436c3da46625fecd7c9aa6c27944f38866d6f1ad4a72e2fa16aaa15b1f93b127
|
3 |
+
size 9525776672
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: mit
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
---
|
7 |
+
|
8 |
+
My own (ZeroWw) quantizations.
|
9 |
+
output and embed tensors quantized to f16.
|
10 |
+
all other tensors quantized to q5_k or q6_k.
|
11 |
+
|
12 |
+
Result:
|
13 |
+
both f16.q6 and f16.q5 are smaller than q8_0 standard quantization
|
14 |
+
and they perform as well as the pure f16.
|