bartowski commited on
Commit
eaab160
0 Parent(s):

Duplicate from bartowski/stable-code-instruct-3b-GGUF

Browse files
.gitattributes ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ stable-code-instruct-3b-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ stable-code-instruct-3b-IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text
38
+ stable-code-instruct-3b-IQ4_NL.gguf filter=lfs diff=lfs merge=lfs -text
39
+ stable-code-instruct-3b-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
40
+ stable-code-instruct-3b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
41
+ stable-code-instruct-3b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
42
+ stable-code-instruct-3b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
43
+ stable-code-instruct-3b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
44
+ stable-code-instruct-3b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
45
+ stable-code-instruct-3b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
46
+ stable-code-instruct-3b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
47
+ stable-code-instruct-3b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
48
+ stable-code-instruct-3b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
49
+ stable-code-instruct-3b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
50
+ stable-code-instruct-3b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
51
+ stable-code-instruct-3b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ language:
4
+ - en
5
+ tags:
6
+ - causal-lm
7
+ - code
8
+ metrics:
9
+ - code_eval
10
+ library_name: transformers
11
+ model-index:
12
+ - name: stabilityai/stable-code-instruct-3b
13
+ results:
14
+ - task:
15
+ type: text-generation
16
+ dataset:
17
+ type: nuprl/MultiPL-E
18
+ name: MultiPL-HumanEval (Python)
19
+ metrics:
20
+ - name: pass@1
21
+ type: pass@1
22
+ value: 32.4
23
+ verified: false
24
+ - task:
25
+ type: text-generation
26
+ dataset:
27
+ type: nuprl/MultiPL-E
28
+ name: MultiPL-HumanEval (C++)
29
+ metrics:
30
+ - name: pass@1
31
+ type: pass@1
32
+ value: 30.9
33
+ verified: false
34
+ - task:
35
+ type: text-generation
36
+ dataset:
37
+ type: nuprl/MultiPL-E
38
+ name: MultiPL-HumanEval (Java)
39
+ metrics:
40
+ - name: pass@1
41
+ type: pass@1
42
+ value: 32.1
43
+ verified: false
44
+ - task:
45
+ type: text-generation
46
+ dataset:
47
+ type: nuprl/MultiPL-E
48
+ name: MultiPL-HumanEval (JavaScript)
49
+ metrics:
50
+ - name: pass@1
51
+ type: pass@1
52
+ value: 32.1
53
+ verified: false
54
+ - task:
55
+ type: text-generation
56
+ dataset:
57
+ type: nuprl/MultiPL-E
58
+ name: MultiPL-HumanEval (PHP)
59
+ metrics:
60
+ - name: pass@1
61
+ type: pass@1
62
+ value: 24.2
63
+ verified: false
64
+ - task:
65
+ type: text-generation
66
+ dataset:
67
+ type: nuprl/MultiPL-E
68
+ name: MultiPL-HumanEval (Rust)
69
+ metrics:
70
+ - name: pass@1
71
+ type: pass@1
72
+ value: 23.0
73
+ verified: false
74
+ quantized_by: bartowski
75
+ pipeline_tag: text-generation
76
+ ---
77
+
78
+ ## Llamacpp Quantizations of stable-code-instruct-3b
79
+
80
+ Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b2440">b2440</a> for quantization.
81
+
82
+ Original model: https://huggingface.co/stabilityai/stable-code-instruct-3b
83
+
84
+ Download a file (not the whole branch) from below:
85
+
86
+ | Filename | Quant type | File Size | Description |
87
+ | -------- | ---------- | --------- | ----------- |
88
+ | [stable-code-instruct-3b-Q8_0.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q8_0.gguf) | Q8_0 | 2.97GB | Extremely high quality, generally unneeded but max available quant. |
89
+ | [stable-code-instruct-3b-Q6_K.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q6_K.gguf) | Q6_K | 2.29GB | Very high quality, near perfect, *recommended*. |
90
+ | [stable-code-instruct-3b-Q5_K_M.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q5_K_M.gguf) | Q5_K_M | 1.99GB | High quality, very usable. |
91
+ | [stable-code-instruct-3b-Q5_K_S.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q5_K_S.gguf) | Q5_K_S | 1.94GB | High quality, very usable. |
92
+ | [stable-code-instruct-3b-Q5_0.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q5_0.gguf) | Q5_0 | 1.94GB | High quality, older format, generally not recommended. |
93
+ | [stable-code-instruct-3b-Q4_K_M.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q4_K_M.gguf) | Q4_K_M | 1.70GB | Good quality, similar to 4.25 bpw. |
94
+ | [stable-code-instruct-3b-Q4_K_S.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q4_K_S.gguf) | Q4_K_S | 1.62GB | Slightly lower quality with small space savings. |
95
+ | [stable-code-instruct-3b-IQ4_NL.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-IQ4_NL.gguf) | IQ4_NL | 1.61GB | Good quality, similar to Q4_K_S, new method of quanting, |
96
+ | [stable-code-instruct-3b-IQ4_XS.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-IQ4_XS.gguf) | IQ4_XS | 1.53GB | Decent quality, new method with similar performance to Q4. |
97
+ | [stable-code-instruct-3b-Q4_0.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q4_0.gguf) | Q4_0 | 1.60GB | Decent quality, older format, generally not recommended. |
98
+ | [stable-code-instruct-3b-IQ3_M.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-IQ3_M.gguf) | IQ3_M | 1.31GB | Medium-low quality, new method with decent performance. |
99
+ | [stable-code-instruct-3b-IQ3_S.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-IQ3_S.gguf) | IQ3_S | 1.25GB | Lower quality, new method with decent performance, recommended over Q3 quants. |
100
+ | [stable-code-instruct-3b-Q3_K_L.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q3_K_L.gguf) | Q3_K_L | 1.50GB | Lower quality but usable, good for low RAM availability. |
101
+ | [stable-code-instruct-3b-Q3_K_M.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q3_K_M.gguf) | Q3_K_M | 1.39GB | Even lower quality. |
102
+ | [stable-code-instruct-3b-Q3_K_S.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q3_K_S.gguf) | Q3_K_S | 1.25GB | Low quality, not recommended. |
103
+ | [stable-code-instruct-3b-Q2_K.gguf](https://huggingface.co/bartowski/stable-code-instruct-3b-GGUF/blob/main/stable-code-instruct-3b-Q2_K.gguf) | Q2_K | 1.08GB | Extremely low quality, *not* recommended.
104
+
105
+ Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski
stable-code-instruct-3b-IQ3_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77523c943a759b64f7f48a2687f80f0865e20ccc9ea7a2579fb2e17077e8dc85
3
+ size 1319555296
stable-code-instruct-3b-IQ3_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eee007d3241e95ca4996e1a29f1f327f8427a8f7877e39486a3aa2c10df2eaa
3
+ size 1254449376
stable-code-instruct-3b-IQ4_NL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f926d21d9a6e86d49ac93916b54ee6d2f6962c23bd6c9509ee6814237ca897df
3
+ size 1617418976
stable-code-instruct-3b-IQ4_XS.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d4b586e0d2674e2aa905dcb91674f101aa737e09cf654e4fdddebfde1117963
3
+ size 1536307936
stable-code-instruct-3b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f3dc6d820f71ddcb2bf0749abb1017cc6753f7e252e8b7751a58e64002c2e3
3
+ size 1083756256
stable-code-instruct-3b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f9b7e7d477a756d2cbc7a6313843afa510111e6a64abc9c72b8d91d2cd50e3b
3
+ size 1508565216
stable-code-instruct-3b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0abf49a249ce89be933c2f089b587859af8fb5d274f089441c76482ec36eb90
3
+ size 1391419616
stable-code-instruct-3b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d920545957af897bafe514fefa0709e67db3d5daf6c82a8412cf76704f4b2744
3
+ size 1254449376
stable-code-instruct-3b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f40640058302d69b949aa0521f78e28fee6d28ef3042723a6c12d85ef993708e
3
+ size 1608571616
stable-code-instruct-3b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a3a4ca7e7b81a7dddb139c39817e0f2f7636d4d7c0b2087368b9c2eaf417434
3
+ size 1708595936
stable-code-instruct-3b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f901215ade142d933591fbe50320e6b11e81840ca1ecd6d4753ca0f380e1d4ea
3
+ size 1620695776
stable-code-instruct-3b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:226f3cc1e11c45dda8283a409b271a4d8bffadb274e980e07556949bb5f52b3e
3
+ size 1941863136
stable-code-instruct-3b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f952b22a1682bb0c2862c7fb391833e0122d7192ce4f9270840f95a63cf403b8
3
+ size 1993390816
stable-code-instruct-3b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c123f8cff87c3fd21ddf1a11d3c6aa15983fdfc32f936ca0fbf1d3f73de7397b
3
+ size 1941863136
stable-code-instruct-3b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf5a4fa7220ccef6f4d58faf9bbf04c574101aed056aec5563d65115974d01d4
3
+ size 2295985376
stable-code-instruct-3b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ffc06aacad9b90fe633c3920d3784618d7419e5704151e9ab7087a5958a3c63
3
+ size 2972926176