morriszms commited on
Commit
7d462b0
1 Parent(s): 2617d43

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RoMistral-7b-Instruct-2024-05-17-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ RoMistral-7b-Instruct-2024-05-17-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ RoMistral-7b-Instruct-2024-05-17-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ RoMistral-7b-Instruct-2024-05-17-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ RoMistral-7b-Instruct-2024-05-17-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ RoMistral-7b-Instruct-2024-05-17-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ RoMistral-7b-Instruct-2024-05-17-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ RoMistral-7b-Instruct-2024-05-17-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ RoMistral-7b-Instruct-2024-05-17-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ RoMistral-7b-Instruct-2024-05-17-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ RoMistral-7b-Instruct-2024-05-17-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ RoMistral-7b-Instruct-2024-05-17-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ language:
4
+ - ro
5
+ base_model: OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17
6
+ datasets:
7
+ - OpenLLM-Ro/ro_sft_alpaca
8
+ - OpenLLM-Ro/ro_sft_alpaca_gpt4
9
+ - OpenLLM-Ro/ro_sft_dolly
10
+ - OpenLLM-Ro/ro_sft_selfinstruct_gpt4
11
+ - OpenLLM-Ro/ro_sft_norobots
12
+ - OpenLLM-Ro/ro_sft_orca
13
+ - OpenLLM-Ro/ro_sft_camel
14
+ tags:
15
+ - TensorBlock
16
+ - GGUF
17
+ model-index:
18
+ - name: OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17
19
+ results:
20
+ - task:
21
+ type: text-generation
22
+ dataset:
23
+ name: RoMT-Bench
24
+ type: RoMT-Bench
25
+ metrics:
26
+ - type: Score
27
+ value: 4.99
28
+ name: Score
29
+ - type: Score
30
+ value: 5.46
31
+ name: First turn
32
+ - type: Score
33
+ value: 4.53
34
+ name: Second turn
35
+ - task:
36
+ type: text-generation
37
+ dataset:
38
+ name: RoCulturaBench
39
+ type: RoCulturaBench
40
+ metrics:
41
+ - type: Score
42
+ value: 3.38
43
+ name: Score
44
+ - task:
45
+ type: text-generation
46
+ dataset:
47
+ name: Romanian_Academic_Benchmarks
48
+ type: Romanian_Academic_Benchmarks
49
+ metrics:
50
+ - type: accuracy
51
+ value: 52.54
52
+ name: Average accuracy
53
+ - task:
54
+ type: text-generation
55
+ dataset:
56
+ name: OpenLLM-Ro/ro_arc_challenge
57
+ type: OpenLLM-Ro/ro_arc_challenge
58
+ metrics:
59
+ - type: accuracy
60
+ value: 50.41
61
+ name: Average accuracy
62
+ - type: accuracy
63
+ value: 47.47
64
+ name: 0-shot
65
+ - type: accuracy
66
+ value: 48.59
67
+ name: 1-shot
68
+ - type: accuracy
69
+ value: 50.3
70
+ name: 3-shot
71
+ - type: accuracy
72
+ value: 51.33
73
+ name: 5-shot
74
+ - type: accuracy
75
+ value: 52.36
76
+ name: 10-shot
77
+ - type: accuracy
78
+ value: 52.44
79
+ name: 25-shot
80
+ - task:
81
+ type: text-generation
82
+ dataset:
83
+ name: OpenLLM-Ro/ro_mmlu
84
+ type: OpenLLM-Ro/ro_mmlu
85
+ metrics:
86
+ - type: accuracy
87
+ value: 51.61
88
+ name: Average accuracy
89
+ - type: accuracy
90
+ value: 50.01
91
+ name: 0-shot
92
+ - type: accuracy
93
+ value: 50.18
94
+ name: 1-shot
95
+ - type: accuracy
96
+ value: 53.13
97
+ name: 3-shot
98
+ - type: accuracy
99
+ value: 53.12
100
+ name: 5-shot
101
+ - task:
102
+ type: text-generation
103
+ dataset:
104
+ name: OpenLLM-Ro/ro_winogrande
105
+ type: OpenLLM-Ro/ro_winogrande
106
+ metrics:
107
+ - type: accuracy
108
+ value: 66.48
109
+ name: Average accuracy
110
+ - type: accuracy
111
+ value: 64.96
112
+ name: 0-shot
113
+ - type: accuracy
114
+ value: 67.09
115
+ name: 1-shot
116
+ - type: accuracy
117
+ value: 67.01
118
+ name: 3-shot
119
+ - type: accuracy
120
+ value: 66.85
121
+ name: 5-shot
122
+ - task:
123
+ type: text-generation
124
+ dataset:
125
+ name: OpenLLM-Ro/ro_hellaswag
126
+ type: OpenLLM-Ro/ro_hellaswag
127
+ metrics:
128
+ - type: accuracy
129
+ value: 60.27
130
+ name: Average accuracy
131
+ - type: accuracy
132
+ value: 59.99
133
+ name: 0-shot
134
+ - type: accuracy
135
+ value: 59.48
136
+ name: 1-shot
137
+ - type: accuracy
138
+ value: 60.14
139
+ name: 3-shot
140
+ - type: accuracy
141
+ value: 60.61
142
+ name: 5-shot
143
+ - type: accuracy
144
+ value: 61.12
145
+ name: 10-shot
146
+ - task:
147
+ type: text-generation
148
+ dataset:
149
+ name: OpenLLM-Ro/ro_gsm8k
150
+ type: OpenLLM-Ro/ro_gsm8k
151
+ metrics:
152
+ - type: accuracy
153
+ value: 34.19
154
+ name: Average accuracy
155
+ - type: accuracy
156
+ value: 21.68
157
+ name: 1-shot
158
+ - type: accuracy
159
+ value: 38.21
160
+ name: 3-shot
161
+ - type: accuracy
162
+ value: 42.68
163
+ name: 5-shot
164
+ - task:
165
+ type: text-generation
166
+ dataset:
167
+ name: OpenLLM-Ro/ro_truthfulqa
168
+ type: OpenLLM-Ro/ro_truthfulqa
169
+ metrics:
170
+ - type: accuracy
171
+ value: 52.3
172
+ name: Average accuracy
173
+ - task:
174
+ type: text-generation
175
+ dataset:
176
+ name: LaRoSeDa_binary
177
+ type: LaRoSeDa_binary
178
+ metrics:
179
+ - type: macro-f1
180
+ value: 97.36
181
+ name: Average macro-f1
182
+ - type: macro-f1
183
+ value: 97.27
184
+ name: 0-shot
185
+ - type: macro-f1
186
+ value: 96.37
187
+ name: 1-shot
188
+ - type: macro-f1
189
+ value: 97.97
190
+ name: 3-shot
191
+ - type: macro-f1
192
+ value: 97.83
193
+ name: 5-shot
194
+ - task:
195
+ type: text-generation
196
+ dataset:
197
+ name: LaRoSeDa_multiclass
198
+ type: LaRoSeDa_multiclass
199
+ metrics:
200
+ - type: macro-f1
201
+ value: 67.55
202
+ name: Average macro-f1
203
+ - type: macro-f1
204
+ value: 63.95
205
+ name: 0-shot
206
+ - type: macro-f1
207
+ value: 66.89
208
+ name: 1-shot
209
+ - type: macro-f1
210
+ value: 68.16
211
+ name: 3-shot
212
+ - type: macro-f1
213
+ value: 71.19
214
+ name: 5-shot
215
+ - task:
216
+ type: text-generation
217
+ dataset:
218
+ name: LaRoSeDa_binary_finetuned
219
+ type: LaRoSeDa_binary_finetuned
220
+ metrics:
221
+ - type: macro-f1
222
+ value: 98.8
223
+ name: Average macro-f1
224
+ - task:
225
+ type: text-generation
226
+ dataset:
227
+ name: LaRoSeDa_multiclass_finetuned
228
+ type: LaRoSeDa_multiclass_finetuned
229
+ metrics:
230
+ - type: macro-f1
231
+ value: 88.28
232
+ name: Average macro-f1
233
+ - task:
234
+ type: text-generation
235
+ dataset:
236
+ name: WMT_EN-RO
237
+ type: WMT_EN-RO
238
+ metrics:
239
+ - type: bleu
240
+ value: 27.93
241
+ name: Average bleu
242
+ - type: bleu
243
+ value: 24.87
244
+ name: 0-shot
245
+ - type: bleu
246
+ value: 28.3
247
+ name: 1-shot
248
+ - type: bleu
249
+ value: 29.26
250
+ name: 3-shot
251
+ - type: bleu
252
+ value: 29.27
253
+ name: 5-shot
254
+ - task:
255
+ type: text-generation
256
+ dataset:
257
+ name: WMT_RO-EN
258
+ type: WMT_RO-EN
259
+ metrics:
260
+ - type: bleu
261
+ value: 13.21
262
+ name: Average bleu
263
+ - type: bleu
264
+ value: 3.69
265
+ name: 0-shot
266
+ - type: bleu
267
+ value: 5.45
268
+ name: 1-shot
269
+ - type: bleu
270
+ value: 19.92
271
+ name: 3-shot
272
+ - type: bleu
273
+ value: 23.8
274
+ name: 5-shot
275
+ - task:
276
+ type: text-generation
277
+ dataset:
278
+ name: WMT_EN-RO_finetuned
279
+ type: WMT_EN-RO_finetuned
280
+ metrics:
281
+ - type: bleu
282
+ value: 28.72
283
+ name: Average bleu
284
+ - task:
285
+ type: text-generation
286
+ dataset:
287
+ name: WMT_RO-EN_finetuned
288
+ type: WMT_RO-EN_finetuned
289
+ metrics:
290
+ - type: bleu
291
+ value: 40.86
292
+ name: Average bleu
293
+ - task:
294
+ type: text-generation
295
+ dataset:
296
+ name: XQuAD
297
+ type: XQuAD
298
+ metrics:
299
+ - type: exact_match
300
+ value: 43.66
301
+ name: Average exact_match
302
+ - type: f1
303
+ value: 63.7
304
+ name: Average f1
305
+ - task:
306
+ type: text-generation
307
+ dataset:
308
+ name: XQuAD_finetuned
309
+ type: XQuAD_finetuned
310
+ metrics:
311
+ - type: exact_match
312
+ value: 55.04
313
+ name: Average exact_match
314
+ - type: f1
315
+ value: 72.31
316
+ name: Average f1
317
+ - task:
318
+ type: text-generation
319
+ dataset:
320
+ name: STS
321
+ type: STS
322
+ metrics:
323
+ - type: spearman
324
+ value: 77.43
325
+ name: Average spearman
326
+ - type: pearson
327
+ value: 78.43
328
+ name: Average pearson
329
+ - task:
330
+ type: text-generation
331
+ dataset:
332
+ name: STS_finetuned
333
+ type: STS_finetuned
334
+ metrics:
335
+ - type: spearman
336
+ value: 87.25
337
+ name: Average spearman
338
+ - type: pearson
339
+ value: 87.79
340
+ name: Average pearson
341
+ - task:
342
+ type: text-generation
343
+ dataset:
344
+ name: XQuAD_EM
345
+ type: XQuAD_EM
346
+ metrics:
347
+ - type: exact_match
348
+ value: 23.36
349
+ name: 0-shot
350
+ - type: exact_match
351
+ value: 47.98
352
+ name: 1-shot
353
+ - type: exact_match
354
+ value: 51.85
355
+ name: 3-shot
356
+ - type: exact_match
357
+ value: 51.43
358
+ name: 5-shot
359
+ - task:
360
+ type: text-generation
361
+ dataset:
362
+ name: XQuAD_F1
363
+ type: XQuAD_F1
364
+ metrics:
365
+ - type: f1
366
+ value: 46.29
367
+ name: 0-shot
368
+ - type: f1
369
+ value: 67.4
370
+ name: 1-shot
371
+ - type: f1
372
+ value: 70.58
373
+ name: 3-shot
374
+ - type: f1
375
+ value: 70.53
376
+ name: 5-shot
377
+ - task:
378
+ type: text-generation
379
+ dataset:
380
+ name: STS_Spearman
381
+ type: STS_Spearman
382
+ metrics:
383
+ - type: spearman
384
+ value: 77.91
385
+ name: 1-shot
386
+ - type: spearman
387
+ value: 77.73
388
+ name: 3-shot
389
+ - type: spearman
390
+ value: 76.65
391
+ name: 5-shot
392
+ - task:
393
+ type: text-generation
394
+ dataset:
395
+ name: STS_Pearson
396
+ type: STS_Pearson
397
+ metrics:
398
+ - type: pearson
399
+ value: 78.03
400
+ name: 1-shot
401
+ - type: pearson
402
+ value: 78.74
403
+ name: 3-shot
404
+ - type: pearson
405
+ value: 78.53
406
+ name: 5-shot
407
+ ---
408
+
409
+ <div style="width: auto; margin-left: auto; margin-right: auto">
410
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
411
+ </div>
412
+ <div style="display: flex; justify-content: space-between; width: 100%;">
413
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
414
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
415
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
416
+ </p>
417
+ </div>
418
+ </div>
419
+
420
+ ## OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17 - GGUF
421
+
422
+ This repo contains GGUF format model files for [OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17).
423
+
424
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
425
+
426
+ <div style="text-align: left; margin: 20px 0;">
427
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
428
+ Run them on the TensorBlock client using your local machine ↗
429
+ </a>
430
+ </div>
431
+
432
+ ## Prompt template
433
+
434
+ ```
435
+ <s>{system_prompt} [INST] {prompt} [/INST]
436
+ ```
437
+
438
+ ## Model file specification
439
+
440
+ | Filename | Quant type | File Size | Description |
441
+ | -------- | ---------- | --------- | ----------- |
442
+ | [RoMistral-7b-Instruct-2024-05-17-Q2_K.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q2_K.gguf) | Q2_K | 2.719 GB | smallest, significant quality loss - not recommended for most purposes |
443
+ | [RoMistral-7b-Instruct-2024-05-17-Q3_K_S.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q3_K_S.gguf) | Q3_K_S | 3.165 GB | very small, high quality loss |
444
+ | [RoMistral-7b-Instruct-2024-05-17-Q3_K_M.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q3_K_M.gguf) | Q3_K_M | 3.519 GB | very small, high quality loss |
445
+ | [RoMistral-7b-Instruct-2024-05-17-Q3_K_L.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q3_K_L.gguf) | Q3_K_L | 3.822 GB | small, substantial quality loss |
446
+ | [RoMistral-7b-Instruct-2024-05-17-Q4_0.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q4_0.gguf) | Q4_0 | 4.109 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
447
+ | [RoMistral-7b-Instruct-2024-05-17-Q4_K_S.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q4_K_S.gguf) | Q4_K_S | 4.140 GB | small, greater quality loss |
448
+ | [RoMistral-7b-Instruct-2024-05-17-Q4_K_M.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q4_K_M.gguf) | Q4_K_M | 4.368 GB | medium, balanced quality - recommended |
449
+ | [RoMistral-7b-Instruct-2024-05-17-Q5_0.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q5_0.gguf) | Q5_0 | 4.998 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
450
+ | [RoMistral-7b-Instruct-2024-05-17-Q5_K_S.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q5_K_S.gguf) | Q5_K_S | 4.998 GB | large, low quality loss - recommended |
451
+ | [RoMistral-7b-Instruct-2024-05-17-Q5_K_M.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q5_K_M.gguf) | Q5_K_M | 5.131 GB | large, very low quality loss - recommended |
452
+ | [RoMistral-7b-Instruct-2024-05-17-Q6_K.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q6_K.gguf) | Q6_K | 5.942 GB | very large, extremely low quality loss |
453
+ | [RoMistral-7b-Instruct-2024-05-17-Q8_0.gguf](https://huggingface.co/tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF/blob/main/RoMistral-7b-Instruct-2024-05-17-Q8_0.gguf) | Q8_0 | 7.696 GB | very large, extremely low quality loss - not recommended |
454
+
455
+
456
+ ## Downloading instruction
457
+
458
+ ### Command line
459
+
460
+ Firstly, install Huggingface Client
461
+
462
+ ```shell
463
+ pip install -U "huggingface_hub[cli]"
464
+ ```
465
+
466
+ Then, downoad the individual model file the a local directory
467
+
468
+ ```shell
469
+ huggingface-cli download tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF --include "RoMistral-7b-Instruct-2024-05-17-Q2_K.gguf" --local-dir MY_LOCAL_DIR
470
+ ```
471
+
472
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
473
+
474
+ ```shell
475
+ huggingface-cli download tensorblock/RoMistral-7b-Instruct-2024-05-17-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
476
+ ```
RoMistral-7b-Instruct-2024-05-17-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bbc2acd00ffb81fa5ab6f0fb65371dd4ab9a99f89848cd24d875e084ff88bbd
3
+ size 2719244640
RoMistral-7b-Instruct-2024-05-17-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d5dceef4d1e280d4153ae1924757907fd564d915afb4057c84a7a61795a03e
3
+ size 3822027104
RoMistral-7b-Instruct-2024-05-17-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bafee3c00ce1bdc6a3195fd2c3d90fc973b3be0e6d9cb1b373040f198f00cd66
3
+ size 3518988640
RoMistral-7b-Instruct-2024-05-17-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:813cb2a8c5a7373e69c15b941dc21a9b029e1ce4eaafa6b49a1d6f9c1923f8d1
3
+ size 3164569952
RoMistral-7b-Instruct-2024-05-17-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dff49b0cf32cdc2a32ca975dbe87e514acc54c304c27fdb0ec38b6dda802754
3
+ size 4108919136
RoMistral-7b-Instruct-2024-05-17-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1915c626eb9e9b9b1c88c29f070f644571fc8cf92fd58977e48adf15679c0973
3
+ size 4368441696
RoMistral-7b-Instruct-2024-05-17-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530558a0cf420ee668fcb20acdbd203402966b1ff14dd909dd4d0eb8b5699dc7
3
+ size 4140376416
RoMistral-7b-Instruct-2024-05-17-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf4193cf182d655c62c566a7189f202e3805697e62b78f13dfef8f034a6aa621
3
+ size 4997718368
RoMistral-7b-Instruct-2024-05-17-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ee10112397106a1a536859541463c6b33db91d3c5e1ec5d5a427d9035b84dd2
3
+ size 5131411808
RoMistral-7b-Instruct-2024-05-17-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf122d15e432ddc2d9f5af8228cc2c84fca3264bfa9caab6b5c289a5a7d2f7a0
3
+ size 4997718368
RoMistral-7b-Instruct-2024-05-17-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23344dd1d3c4bd058eb534aed420adee2fbd2142a20bd2fe8704f15871cb14de
3
+ size 5942067552
RoMistral-7b-Instruct-2024-05-17-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497a3ef7861d2ad06a8b46d2f10a0d58ccf02221e05733018bb0423b14a116c7
3
+ size 7695860064