rpand002 commited on
Commit
2657bb0
1 Parent(s): 509c560

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +242 -242
README.md CHANGED
@@ -1,242 +1,242 @@
1
- ---
2
- pipeline_tag: text-generation
3
- inference: false
4
- license: apache-2.0
5
- datasets:
6
- - codeparrot/github-code-clean
7
- - bigcode/starcoderdata
8
- # - Stackexchange
9
- # - CommonCrawl
10
- - open-web-math/open-web-math
11
- - math-ai/StackMathQA
12
- # - Arxiv
13
- # - Wikipedia
14
- # - conceptofmind/FLAN_2022 # Original link is broken, we used IBM's filtered version
15
- metrics:
16
- - code_eval
17
- library_name: transformers
18
- tags:
19
- - code
20
- - granite
21
- model-index:
22
- - name: granite-3b-code-base
23
- results:
24
- - task:
25
- type: text-generation
26
- dataset:
27
- type: mbpp
28
- name: MBPP
29
- metrics:
30
- - name: pass@1
31
- type: pass@1
32
- value: 36.0
33
- veriefied: false
34
- - task:
35
- type: text-generation
36
- dataset:
37
- type: evalplus/mbppplus
38
- name: MBPP+
39
- metrics:
40
- - name: pass@1
41
- type: pass@1
42
- value: 45.1
43
- veriefied: false
44
- - task:
45
- type: text-generation
46
- dataset:
47
- type: bigcode/humanevalpack
48
- name: HumanEvalSynthesis(Python)
49
- metrics:
50
- - name: pass@1
51
- type: pass@1
52
- value: 36.6
53
- veriefied: false
54
- - task:
55
- type: text-generation
56
- dataset:
57
- type: bigcode/humanevalpack
58
- name: HumanEvalSynthesis(JavaScript)
59
- metrics:
60
- - name: pass@1
61
- type: pass@1
62
- value: 37.2
63
- veriefied: false
64
- - task:
65
- type: text-generation
66
- dataset:
67
- type: bigcode/humanevalpack
68
- name: HumanEvalSynthesis(Java)
69
- metrics:
70
- - name: pass@1
71
- type: pass@1
72
- value: 40.9
73
- veriefied: false
74
- - task:
75
- type: text-generation
76
- dataset:
77
- type: bigcode/humanevalpack
78
- name: HumanEvalSynthesis(Go)
79
- metrics:
80
- - name: pass@1
81
- type: pass@1
82
- value: 26.2
83
- veriefied: false
84
- - task:
85
- type: text-generation
86
- dataset:
87
- type: bigcode/humanevalpack
88
- name: HumanEvalSynthesis(C++)
89
- metrics:
90
- - name: pass@1
91
- type: pass@1
92
- value: 35.4
93
- veriefied: false
94
- - task:
95
- type: text-generation
96
- dataset:
97
- type: bigcode/humanevalpack
98
- name: HumanEvalSynthesis(Rust)
99
- metrics:
100
- - name: pass@1
101
- type: pass@1
102
- value: 22.0
103
- veriefied: false
104
- - task:
105
- type: text-generation
106
- dataset:
107
- type: bigcode/humanevalpack
108
- name: HumanEvalExplain(Python)
109
- metrics:
110
- - name: pass@1
111
- type: pass@1
112
- value: 25.0
113
- veriefied: false
114
- - task:
115
- type: text-generation
116
- dataset:
117
- type: bigcode/humanevalpack
118
- name: HumanEvalExplain(JavaScript)
119
- metrics:
120
- - name: pass@1
121
- type: pass@1
122
- value: 18.9
123
- veriefied: false
124
- - task:
125
- type: text-generation
126
- dataset:
127
- type: bigcode/humanevalpack
128
- name: HumanEvalExplain(Java)
129
- metrics:
130
- - name: pass@1
131
- type: pass@1
132
- value: 29.9
133
- veriefied: false
134
- - task:
135
- type: text-generation
136
- dataset:
137
- type: bigcode/humanevalpack
138
- name: HumanEvalExplain(Go)
139
- metrics:
140
- - name: pass@1
141
- type: pass@1
142
- value: 17.1
143
- veriefied: false
144
- - task:
145
- type: text-generation
146
- dataset:
147
- type: bigcode/humanevalpack
148
- name: HumanEvalExplain(C++)
149
- metrics:
150
- - name: pass@1
151
- type: pass@1
152
- value: 26.8
153
- veriefied: false
154
- - task:
155
- type: text-generation
156
- dataset:
157
- type: bigcode/humanevalpack
158
- name: HumanEvalExplain(Rust)
159
- metrics:
160
- - name: pass@1
161
- type: pass@1
162
- value: 14.0
163
- veriefied: false
164
- - task:
165
- type: text-generation
166
- dataset:
167
- type: bigcode/humanevalpack
168
- name: HumanEvalFix(Python)
169
- metrics:
170
- - name: pass@1
171
- type: pass@1
172
- value: 18.3
173
- veriefied: false
174
- - task:
175
- type: text-generation
176
- dataset:
177
- type: bigcode/humanevalpack
178
- name: HumanEvalFix(JavaScript)
179
- metrics:
180
- - name: pass@1
181
- type: pass@1
182
- value: 23.2
183
- veriefied: false
184
- - task:
185
- type: text-generation
186
- dataset:
187
- type: bigcode/humanevalpack
188
- name: HumanEvalFix(Java)
189
- metrics:
190
- - name: pass@1
191
- type: pass@1
192
- value: 29.9
193
- veriefied: false
194
- - task:
195
- type: text-generation
196
- dataset:
197
- type: bigcode/humanevalpack
198
- name: HumanEvalFix(Go)
199
- metrics:
200
- - name: pass@1
201
- type: pass@1
202
- value: 24.4
203
- veriefied: false
204
- - task:
205
- type: text-generation
206
- dataset:
207
- type: bigcode/humanevalpack
208
- name: HumanEvalFix(C++)
209
- metrics:
210
- - name: pass@1
211
- type: pass@1
212
- value: 16.5
213
- veriefied: false
214
- - task:
215
- type: text-generation
216
- dataset:
217
- type: bigcode/humanevalpack
218
- name: HumanEvalFix(Rust)
219
- metrics:
220
- - name: pass@1
221
- type: pass@1
222
- value: 3.7
223
- veriefied: false
224
- ---
225
-
226
- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png)
227
-
228
- # ibm-granite/granite-3b-code-base-GGUF
229
- This is the Q4_K_M converted version of the original [`ibm-granite/granite-3b-code-base`](https://huggingface.co/ibm-granite/granite-3b-code-base).
230
- Refer to the [original model card](https://huggingface.co/ibm-granite/granite-3b-code-base) for more details.
231
-
232
- ## Use with llama.cpp
233
- ```shell
234
- git clone https://github.com/ggerganov/llama.cpp
235
- cd llama.cpp
236
-
237
- # install
238
- make
239
-
240
- # run generation
241
- ./main -m granite-3b-code-base-GGUF/granite-3b-code-base.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color
242
- ```
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ inference: false
4
+ license: apache-2.0
5
+ datasets:
6
+ - codeparrot/github-code-clean
7
+ - bigcode/starcoderdata
8
+ # - Stackexchange
9
+ # - CommonCrawl
10
+ - open-web-math/open-web-math
11
+ - math-ai/StackMathQA
12
+ # - Arxiv
13
+ # - Wikipedia
14
+ # - conceptofmind/FLAN_2022 # Original link is broken, we used IBM's filtered version
15
+ metrics:
16
+ - code_eval
17
+ library_name: transformers
18
+ tags:
19
+ - code
20
+ - granite
21
+ model-index:
22
+ - name: granite-3b-code-base-2k
23
+ results:
24
+ - task:
25
+ type: text-generation
26
+ dataset:
27
+ type: mbpp
28
+ name: MBPP
29
+ metrics:
30
+ - name: pass@1
31
+ type: pass@1
32
+ value: 36.0
33
+ veriefied: false
34
+ - task:
35
+ type: text-generation
36
+ dataset:
37
+ type: evalplus/mbppplus
38
+ name: MBPP+
39
+ metrics:
40
+ - name: pass@1
41
+ type: pass@1
42
+ value: 45.1
43
+ veriefied: false
44
+ - task:
45
+ type: text-generation
46
+ dataset:
47
+ type: bigcode/humanevalpack
48
+ name: HumanEvalSynthesis(Python)
49
+ metrics:
50
+ - name: pass@1
51
+ type: pass@1
52
+ value: 36.6
53
+ veriefied: false
54
+ - task:
55
+ type: text-generation
56
+ dataset:
57
+ type: bigcode/humanevalpack
58
+ name: HumanEvalSynthesis(JavaScript)
59
+ metrics:
60
+ - name: pass@1
61
+ type: pass@1
62
+ value: 37.2
63
+ veriefied: false
64
+ - task:
65
+ type: text-generation
66
+ dataset:
67
+ type: bigcode/humanevalpack
68
+ name: HumanEvalSynthesis(Java)
69
+ metrics:
70
+ - name: pass@1
71
+ type: pass@1
72
+ value: 40.9
73
+ veriefied: false
74
+ - task:
75
+ type: text-generation
76
+ dataset:
77
+ type: bigcode/humanevalpack
78
+ name: HumanEvalSynthesis(Go)
79
+ metrics:
80
+ - name: pass@1
81
+ type: pass@1
82
+ value: 26.2
83
+ veriefied: false
84
+ - task:
85
+ type: text-generation
86
+ dataset:
87
+ type: bigcode/humanevalpack
88
+ name: HumanEvalSynthesis(C++)
89
+ metrics:
90
+ - name: pass@1
91
+ type: pass@1
92
+ value: 35.4
93
+ veriefied: false
94
+ - task:
95
+ type: text-generation
96
+ dataset:
97
+ type: bigcode/humanevalpack
98
+ name: HumanEvalSynthesis(Rust)
99
+ metrics:
100
+ - name: pass@1
101
+ type: pass@1
102
+ value: 22.0
103
+ veriefied: false
104
+ - task:
105
+ type: text-generation
106
+ dataset:
107
+ type: bigcode/humanevalpack
108
+ name: HumanEvalExplain(Python)
109
+ metrics:
110
+ - name: pass@1
111
+ type: pass@1
112
+ value: 25.0
113
+ veriefied: false
114
+ - task:
115
+ type: text-generation
116
+ dataset:
117
+ type: bigcode/humanevalpack
118
+ name: HumanEvalExplain(JavaScript)
119
+ metrics:
120
+ - name: pass@1
121
+ type: pass@1
122
+ value: 18.9
123
+ veriefied: false
124
+ - task:
125
+ type: text-generation
126
+ dataset:
127
+ type: bigcode/humanevalpack
128
+ name: HumanEvalExplain(Java)
129
+ metrics:
130
+ - name: pass@1
131
+ type: pass@1
132
+ value: 29.9
133
+ veriefied: false
134
+ - task:
135
+ type: text-generation
136
+ dataset:
137
+ type: bigcode/humanevalpack
138
+ name: HumanEvalExplain(Go)
139
+ metrics:
140
+ - name: pass@1
141
+ type: pass@1
142
+ value: 17.1
143
+ veriefied: false
144
+ - task:
145
+ type: text-generation
146
+ dataset:
147
+ type: bigcode/humanevalpack
148
+ name: HumanEvalExplain(C++)
149
+ metrics:
150
+ - name: pass@1
151
+ type: pass@1
152
+ value: 26.8
153
+ veriefied: false
154
+ - task:
155
+ type: text-generation
156
+ dataset:
157
+ type: bigcode/humanevalpack
158
+ name: HumanEvalExplain(Rust)
159
+ metrics:
160
+ - name: pass@1
161
+ type: pass@1
162
+ value: 14.0
163
+ veriefied: false
164
+ - task:
165
+ type: text-generation
166
+ dataset:
167
+ type: bigcode/humanevalpack
168
+ name: HumanEvalFix(Python)
169
+ metrics:
170
+ - name: pass@1
171
+ type: pass@1
172
+ value: 18.3
173
+ veriefied: false
174
+ - task:
175
+ type: text-generation
176
+ dataset:
177
+ type: bigcode/humanevalpack
178
+ name: HumanEvalFix(JavaScript)
179
+ metrics:
180
+ - name: pass@1
181
+ type: pass@1
182
+ value: 23.2
183
+ veriefied: false
184
+ - task:
185
+ type: text-generation
186
+ dataset:
187
+ type: bigcode/humanevalpack
188
+ name: HumanEvalFix(Java)
189
+ metrics:
190
+ - name: pass@1
191
+ type: pass@1
192
+ value: 29.9
193
+ veriefied: false
194
+ - task:
195
+ type: text-generation
196
+ dataset:
197
+ type: bigcode/humanevalpack
198
+ name: HumanEvalFix(Go)
199
+ metrics:
200
+ - name: pass@1
201
+ type: pass@1
202
+ value: 24.4
203
+ veriefied: false
204
+ - task:
205
+ type: text-generation
206
+ dataset:
207
+ type: bigcode/humanevalpack
208
+ name: HumanEvalFix(C++)
209
+ metrics:
210
+ - name: pass@1
211
+ type: pass@1
212
+ value: 16.5
213
+ veriefied: false
214
+ - task:
215
+ type: text-generation
216
+ dataset:
217
+ type: bigcode/humanevalpack
218
+ name: HumanEvalFix(Rust)
219
+ metrics:
220
+ - name: pass@1
221
+ type: pass@1
222
+ value: 3.7
223
+ veriefied: false
224
+ ---
225
+
226
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png)
227
+
228
+ # ibm-granite/granite-3b-code-base-2k-GGUF
229
+ This is the Q4_K_M converted version of the original [`ibm-granite/granite-3b-code-base-2k`](https://huggingface.co/ibm-granite/granite-3b-code-base-2k).
230
+ Refer to the [original model card](https://huggingface.co/ibm-granite/granite-3b-code-base-2k) for more details.
231
+
232
+ ## Use with llama.cpp
233
+ ```shell
234
+ git clone https://github.com/ggerganov/llama.cpp
235
+ cd llama.cpp
236
+
237
+ # install
238
+ make
239
+
240
+ # run generation
241
+ ./main -m granite-3b-code-base-GGUF/granite-3b-code-base.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color
242
+ ```