eduagarcia commited on
Commit
fbb22d2
1 Parent(s): c8bb220

Uploading raw results for huggyllama/llama-7b

Browse files
huggyllama/llama-7b/raw_2024-02-05T23-45-55.633251/results.json ADDED
@@ -0,0 +1,1259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3333333333333333,
5
+ "acc,all": 0.5,
6
+ "alias": "assin2_rte"
7
+ },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.07703722378109175,
10
+ "mse,all": 2.5286274509803923,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.2614742698191933,
15
+ "acc,exam_id__USP_2021": 0.23076923076923078,
16
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
17
+ "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914,
18
+ "acc,exam_id__UNICAMP_2022": 0.3076923076923077,
19
+ "acc,exam_id__USP_2020": 0.30357142857142855,
20
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
21
+ "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
22
+ "acc,exam_id__UNICAMP_2020": 0.18181818181818182,
23
+ "acc,exam_id__UNICAMP_2019": 0.2,
24
+ "acc,exam_id__USP_2019": 0.325,
25
+ "acc,exam_id__USP_2022": 0.24489795918367346,
26
+ "acc,exam_id__USP_2024": 0.14634146341463414,
27
+ "acc,exam_id__USP_2023": 0.18181818181818182,
28
+ "acc,exam_id__USP_2018": 0.18518518518518517,
29
+ "acc,exam_id__UNICAMP_2024": 0.26666666666666666,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.2365290412876137,
35
+ "acc,exam_id__2016": 0.23140495867768596,
36
+ "acc,exam_id__2015": 0.19327731092436976,
37
+ "acc,exam_id__2017": 0.2413793103448276,
38
+ "acc,exam_id__2010": 0.1794871794871795,
39
+ "acc,exam_id__2023": 0.25925925925925924,
40
+ "acc,exam_id__2013": 0.2222222222222222,
41
+ "acc,exam_id__2009": 0.2782608695652174,
42
+ "acc,exam_id__2022": 0.24812030075187969,
43
+ "acc,exam_id__2014": 0.1926605504587156,
44
+ "acc,exam_id__2016_2": 0.22764227642276422,
45
+ "acc,exam_id__2011": 0.2222222222222222,
46
+ "acc,exam_id__2012": 0.33620689655172414
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.5548892542799111,
50
+ "acc,all": 0.7276923076923076,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "oab_exams": {
54
+ "acc,all": 0.2742596810933941,
55
+ "acc,exam_id__2012-08": 0.2875,
56
+ "acc,exam_id__2013-10": 0.2125,
57
+ "acc,exam_id__2012-06a": 0.2,
58
+ "acc,exam_id__2012-06": 0.2875,
59
+ "acc,exam_id__2015-17": 0.3333333333333333,
60
+ "acc,exam_id__2016-21": 0.3,
61
+ "acc,exam_id__2011-03": 0.2727272727272727,
62
+ "acc,exam_id__2017-23": 0.325,
63
+ "acc,exam_id__2012-09": 0.15584415584415584,
64
+ "acc,exam_id__2013-11": 0.3,
65
+ "acc,exam_id__2017-24": 0.2875,
66
+ "acc,exam_id__2011-04": 0.3125,
67
+ "acc,exam_id__2014-15": 0.3333333333333333,
68
+ "acc,exam_id__2013-12": 0.2,
69
+ "acc,exam_id__2010-02": 0.34,
70
+ "acc,exam_id__2016-20": 0.2375,
71
+ "acc,exam_id__2017-22": 0.3,
72
+ "acc,exam_id__2015-18": 0.275,
73
+ "acc,exam_id__2010-01": 0.27058823529411763,
74
+ "acc,exam_id__2018-25": 0.3,
75
+ "acc,exam_id__2014-14": 0.2375,
76
+ "acc,exam_id__2014-13": 0.275,
77
+ "acc,exam_id__2012-07": 0.25,
78
+ "acc,exam_id__2016-19": 0.3076923076923077,
79
+ "acc,exam_id__2016-20a": 0.25,
80
+ "acc,exam_id__2015-16": 0.2625,
81
+ "acc,exam_id__2011-05": 0.275,
82
+ "alias": "oab_exams"
83
+ },
84
+ "sparrow_emotion-2021-cortiz-por": {
85
+ "alias": "emotion-2021-cortiz-por",
86
+ "f1_macro,all": 0.08365150307457563,
87
+ "acc,all": 0.142
88
+ },
89
+ "sparrow_hate-2019-fortuna-por": {
90
+ "alias": "hate-2019-fortuna-por",
91
+ "f1_macro,all": 0.5377762837213211,
92
+ "acc,all": 0.538
93
+ },
94
+ "sparrow_sentiment-2016-mozetic-por": {
95
+ "alias": "sentiment-2016-mozetic-por",
96
+ "f1_macro,all": 0.42328641755398966,
97
+ "acc,all": 0.638
98
+ },
99
+ "sparrow_sentiment-2018-brum-por": {
100
+ "alias": "sentiment-2018-brum-por",
101
+ "f1_macro,all": 0.32817916935199615,
102
+ "acc,all": 0.372
103
+ }
104
+ },
105
+ "configs": {
106
+ "assin2_rte": {
107
+ "task": "assin2_rte",
108
+ "group": [
109
+ "pt_benchmark",
110
+ "assin2"
111
+ ],
112
+ "dataset_path": "assin2",
113
+ "test_split": "test",
114
+ "fewshot_split": "train",
115
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa?\nResposta:",
116
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
117
+ "description": "Abaixo contém pares de premissa e hipótese, para cada par você deve julgar se a hipótese pode ser inferida a partir da premissa, responda apenas com Sim ou Não.\n\n",
118
+ "target_delimiter": " ",
119
+ "fewshot_delimiter": "\n\n",
120
+ "fewshot_config": {
121
+ "sampler": "id_sampler",
122
+ "sampler_config": {
123
+ "id_list": [
124
+ 1,
125
+ 3251,
126
+ 2,
127
+ 3252,
128
+ 3,
129
+ 4,
130
+ 5,
131
+ 6,
132
+ 3253,
133
+ 7,
134
+ 3254,
135
+ 3255,
136
+ 3256,
137
+ 8,
138
+ 9,
139
+ 10,
140
+ 3257,
141
+ 11,
142
+ 3258,
143
+ 12,
144
+ 13,
145
+ 14,
146
+ 15,
147
+ 3259,
148
+ 3260,
149
+ 3261,
150
+ 3262,
151
+ 3263,
152
+ 16,
153
+ 17,
154
+ 3264,
155
+ 18,
156
+ 3265,
157
+ 3266,
158
+ 3267,
159
+ 19,
160
+ 20,
161
+ 3268,
162
+ 3269,
163
+ 21,
164
+ 3270,
165
+ 3271,
166
+ 22,
167
+ 3272,
168
+ 3273,
169
+ 23,
170
+ 3274,
171
+ 24,
172
+ 25,
173
+ 3275
174
+ ],
175
+ "id_column": "sentence_pair_id"
176
+ }
177
+ },
178
+ "num_fewshot": 15,
179
+ "metric_list": [
180
+ {
181
+ "metric": "f1_macro",
182
+ "aggregation": "f1_macro",
183
+ "higher_is_better": true
184
+ },
185
+ {
186
+ "metric": "acc",
187
+ "aggregation": "acc",
188
+ "higher_is_better": true
189
+ }
190
+ ],
191
+ "output_type": "generate_until",
192
+ "generation_kwargs": {
193
+ "max_gen_toks": 32,
194
+ "do_sample": false,
195
+ "temperature": 0.0,
196
+ "top_k": null,
197
+ "top_p": null,
198
+ "until": [
199
+ "\n\n"
200
+ ]
201
+ },
202
+ "repeats": 1,
203
+ "filter_list": [
204
+ {
205
+ "name": "all",
206
+ "filter": [
207
+ {
208
+ "function": "find_similar_label",
209
+ "labels": [
210
+ "Sim",
211
+ "Não"
212
+ ]
213
+ },
214
+ {
215
+ "function": "take_first"
216
+ }
217
+ ]
218
+ }
219
+ ],
220
+ "should_decontaminate": false,
221
+ "metadata": {
222
+ "version": 1.0
223
+ }
224
+ },
225
+ "assin2_sts": {
226
+ "task": "assin2_sts",
227
+ "group": [
228
+ "pt_benchmark",
229
+ "assin2"
230
+ ],
231
+ "dataset_path": "assin2",
232
+ "test_split": "test",
233
+ "fewshot_split": "train",
234
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Qual o grau de similaridade entre as duas frases de 1,0 a 5,0?\nResposta:",
235
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f43b5621760>",
236
+ "description": "Abaixo contém pares de frases, para cada par você deve julgar o grau de similaridade de 1,0 a 5,0, responda apenas com o número.\n\n",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "fewshot_config": {
240
+ "sampler": "id_sampler",
241
+ "sampler_config": {
242
+ "id_list": [
243
+ 1,
244
+ 3251,
245
+ 2,
246
+ 3252,
247
+ 3,
248
+ 4,
249
+ 5,
250
+ 6,
251
+ 3253,
252
+ 7,
253
+ 3254,
254
+ 3255,
255
+ 3256,
256
+ 8,
257
+ 9,
258
+ 10,
259
+ 3257,
260
+ 11,
261
+ 3258,
262
+ 12,
263
+ 13,
264
+ 14,
265
+ 15,
266
+ 3259,
267
+ 3260,
268
+ 3261,
269
+ 3262,
270
+ 3263,
271
+ 16,
272
+ 17,
273
+ 3264,
274
+ 18,
275
+ 3265,
276
+ 3266,
277
+ 3267,
278
+ 19,
279
+ 20,
280
+ 3268,
281
+ 3269,
282
+ 21,
283
+ 3270,
284
+ 3271,
285
+ 22,
286
+ 3272,
287
+ 3273,
288
+ 23,
289
+ 3274,
290
+ 24,
291
+ 25,
292
+ 3275
293
+ ],
294
+ "id_column": "sentence_pair_id"
295
+ }
296
+ },
297
+ "num_fewshot": 15,
298
+ "metric_list": [
299
+ {
300
+ "metric": "pearson",
301
+ "aggregation": "pearsonr",
302
+ "higher_is_better": true
303
+ },
304
+ {
305
+ "metric": "mse",
306
+ "aggregation": "mean_squared_error",
307
+ "higher_is_better": false
308
+ }
309
+ ],
310
+ "output_type": "generate_until",
311
+ "generation_kwargs": {
312
+ "max_gen_toks": 32,
313
+ "do_sample": false,
314
+ "temperature": 0.0,
315
+ "top_k": null,
316
+ "top_p": null,
317
+ "until": [
318
+ "\n\n"
319
+ ]
320
+ },
321
+ "repeats": 1,
322
+ "filter_list": [
323
+ {
324
+ "name": "all",
325
+ "filter": [
326
+ {
327
+ "function": "number_filter",
328
+ "type": "float",
329
+ "range_min": 1.0,
330
+ "range_max": 5.0,
331
+ "on_outside_range": "clip",
332
+ "fallback": 5.0
333
+ },
334
+ {
335
+ "function": "take_first"
336
+ }
337
+ ]
338
+ }
339
+ ],
340
+ "should_decontaminate": false,
341
+ "metadata": {
342
+ "version": 1.0
343
+ }
344
+ },
345
+ "bluex": {
346
+ "task": "bluex",
347
+ "group": [
348
+ "pt_benchmark",
349
+ "vestibular"
350
+ ],
351
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
352
+ "test_split": "train",
353
+ "fewshot_split": "train",
354
+ "doc_to_text": "<function enem_doc_to_text at 0x7f43b5621120>",
355
+ "doc_to_target": "{{answerKey}}",
356
+ "description": "As perguntas a seguir são questões de multipla escolha de provas de vestibular de Universidades Brasileiras, reponda apenas com as letras A, B, C, D ou E.\n\n",
357
+ "target_delimiter": " ",
358
+ "fewshot_delimiter": "\n\n",
359
+ "fewshot_config": {
360
+ "sampler": "id_sampler",
361
+ "sampler_config": {
362
+ "id_list": [
363
+ "USP_2018_3",
364
+ "UNICAMP_2018_2",
365
+ "USP_2018_35",
366
+ "UNICAMP_2018_16",
367
+ "USP_2018_89"
368
+ ],
369
+ "id_column": "id",
370
+ "exclude_from_task": true
371
+ }
372
+ },
373
+ "num_fewshot": 3,
374
+ "metric_list": [
375
+ {
376
+ "metric": "acc",
377
+ "aggregation": "acc",
378
+ "higher_is_better": true
379
+ }
380
+ ],
381
+ "output_type": "generate_until",
382
+ "generation_kwargs": {
383
+ "max_gen_toks": 32,
384
+ "do_sample": false,
385
+ "temperature": 0.0,
386
+ "top_k": null,
387
+ "top_p": null,
388
+ "until": [
389
+ "\n\n"
390
+ ]
391
+ },
392
+ "repeats": 1,
393
+ "filter_list": [
394
+ {
395
+ "name": "all",
396
+ "filter": [
397
+ {
398
+ "function": "normalize_spaces"
399
+ },
400
+ {
401
+ "function": "remove_accents"
402
+ },
403
+ {
404
+ "function": "find_choices",
405
+ "choices": [
406
+ "A",
407
+ "B",
408
+ "C",
409
+ "D",
410
+ "E"
411
+ ],
412
+ "regex_patterns": [
413
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
414
+ "\\b([ABCDE])\\.",
415
+ "\\b([ABCDE]) ?[.):-]",
416
+ "\\b([ABCDE])$",
417
+ "\\b([ABCDE])\\b"
418
+ ]
419
+ },
420
+ {
421
+ "function": "take_first"
422
+ }
423
+ ],
424
+ "group_by": {
425
+ "column": "exam_id"
426
+ }
427
+ }
428
+ ],
429
+ "should_decontaminate": true,
430
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f43b56213a0>",
431
+ "metadata": {
432
+ "version": 1.0
433
+ }
434
+ },
435
+ "enem_challenge": {
436
+ "task": "enem_challenge",
437
+ "task_alias": "enem",
438
+ "group": [
439
+ "pt_benchmark",
440
+ "vestibular"
441
+ ],
442
+ "dataset_path": "eduagarcia/enem_challenge",
443
+ "test_split": "train",
444
+ "fewshot_split": "train",
445
+ "doc_to_text": "<function enem_doc_to_text at 0x7f43b5621940>",
446
+ "doc_to_target": "{{answerKey}}",
447
+ "description": "As perguntas a seguir são questões de multipla escolha do Exame Nacional do Ensino Médio (ENEM), reponda apenas com as letras A, B, C, D ou E.\n\n",
448
+ "target_delimiter": " ",
449
+ "fewshot_delimiter": "\n\n",
450
+ "fewshot_config": {
451
+ "sampler": "id_sampler",
452
+ "sampler_config": {
453
+ "id_list": [
454
+ "2022_21",
455
+ "2022_88",
456
+ "2022_143"
457
+ ],
458
+ "id_column": "id",
459
+ "exclude_from_task": true
460
+ }
461
+ },
462
+ "num_fewshot": 3,
463
+ "metric_list": [
464
+ {
465
+ "metric": "acc",
466
+ "aggregation": "acc",
467
+ "higher_is_better": true
468
+ }
469
+ ],
470
+ "output_type": "generate_until",
471
+ "generation_kwargs": {
472
+ "max_gen_toks": 32,
473
+ "do_sample": false,
474
+ "temperature": 0.0,
475
+ "top_k": null,
476
+ "top_p": null,
477
+ "until": [
478
+ "\n\n"
479
+ ]
480
+ },
481
+ "repeats": 1,
482
+ "filter_list": [
483
+ {
484
+ "name": "all",
485
+ "filter": [
486
+ {
487
+ "function": "normalize_spaces"
488
+ },
489
+ {
490
+ "function": "remove_accents"
491
+ },
492
+ {
493
+ "function": "find_choices",
494
+ "choices": [
495
+ "A",
496
+ "B",
497
+ "C",
498
+ "D",
499
+ "E"
500
+ ],
501
+ "regex_patterns": [
502
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
503
+ "\\b([ABCDE])\\.",
504
+ "\\b([ABCDE]) ?[.):-]",
505
+ "\\b([ABCDE])$",
506
+ "\\b([ABCDE])\\b"
507
+ ]
508
+ },
509
+ {
510
+ "function": "take_first"
511
+ }
512
+ ],
513
+ "group_by": {
514
+ "column": "exam_id"
515
+ }
516
+ }
517
+ ],
518
+ "should_decontaminate": true,
519
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f43b5621bc0>",
520
+ "metadata": {
521
+ "version": 1.0
522
+ }
523
+ },
524
+ "faquad_nli": {
525
+ "task": "faquad_nli",
526
+ "group": [
527
+ "pt_benchmark"
528
+ ],
529
+ "dataset_path": "ruanchaves/faquad-nli",
530
+ "test_split": "test",
531
+ "fewshot_split": "train",
532
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta satisfaz a pergunta? Sim ou Não?",
533
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
534
+ "description": "Abaixo contém pares de pergunta e reposta, para cada par você deve julgar resposta responde a pergunta de maneira satisfatória e aparenta estar correta, escreva apenas Sim ou Não.\n\n",
535
+ "target_delimiter": " ",
536
+ "fewshot_delimiter": "\n\n",
537
+ "fewshot_config": {
538
+ "sampler": "first_n",
539
+ "sampler_config": {
540
+ "fewshot_indices": [
541
+ 1893,
542
+ 949,
543
+ 663,
544
+ 105,
545
+ 1169,
546
+ 2910,
547
+ 2227,
548
+ 2813,
549
+ 974,
550
+ 558,
551
+ 1503,
552
+ 1958,
553
+ 2918,
554
+ 601,
555
+ 1560,
556
+ 984,
557
+ 2388,
558
+ 995,
559
+ 2233,
560
+ 1982,
561
+ 165,
562
+ 2788,
563
+ 1312,
564
+ 2285,
565
+ 522,
566
+ 1113,
567
+ 1670,
568
+ 323,
569
+ 236,
570
+ 1263,
571
+ 1562,
572
+ 2519,
573
+ 1049,
574
+ 432,
575
+ 1167,
576
+ 1394,
577
+ 2022,
578
+ 2551,
579
+ 2194,
580
+ 2187,
581
+ 2282,
582
+ 2816,
583
+ 108,
584
+ 301,
585
+ 1185,
586
+ 1315,
587
+ 1420,
588
+ 2436,
589
+ 2322,
590
+ 766
591
+ ]
592
+ }
593
+ },
594
+ "num_fewshot": 15,
595
+ "metric_list": [
596
+ {
597
+ "metric": "f1_macro",
598
+ "aggregation": "f1_macro",
599
+ "higher_is_better": true
600
+ },
601
+ {
602
+ "metric": "acc",
603
+ "aggregation": "acc",
604
+ "higher_is_better": true
605
+ }
606
+ ],
607
+ "output_type": "generate_until",
608
+ "generation_kwargs": {
609
+ "max_gen_toks": 32,
610
+ "do_sample": false,
611
+ "temperature": 0.0,
612
+ "top_k": null,
613
+ "top_p": null,
614
+ "until": [
615
+ "\n\n"
616
+ ]
617
+ },
618
+ "repeats": 1,
619
+ "filter_list": [
620
+ {
621
+ "name": "all",
622
+ "filter": [
623
+ {
624
+ "function": "find_similar_label",
625
+ "labels": [
626
+ "Sim",
627
+ "Não"
628
+ ]
629
+ },
630
+ {
631
+ "function": "take_first"
632
+ }
633
+ ]
634
+ }
635
+ ],
636
+ "should_decontaminate": false,
637
+ "metadata": {
638
+ "version": 1.0
639
+ }
640
+ },
641
+ "oab_exams": {
642
+ "task": "oab_exams",
643
+ "group": [
644
+ "legal_benchmark",
645
+ "pt_benchmark"
646
+ ],
647
+ "dataset_path": "eduagarcia/oab_exams",
648
+ "test_split": "train",
649
+ "fewshot_split": "train",
650
+ "doc_to_text": "<function doc_to_text at 0x7f43b5620ae0>",
651
+ "doc_to_target": "{{answerKey}}",
652
+ "description": "As perguntas a seguir são questões de multipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), reponda apenas com as letras A, B, C ou D.\n\n",
653
+ "target_delimiter": " ",
654
+ "fewshot_delimiter": "\n\n",
655
+ "fewshot_config": {
656
+ "sampler": "id_sampler",
657
+ "sampler_config": {
658
+ "id_list": [
659
+ "2010-01_1",
660
+ "2010-01_11",
661
+ "2010-01_13",
662
+ "2010-01_23",
663
+ "2010-01_26",
664
+ "2010-01_28",
665
+ "2010-01_38",
666
+ "2010-01_48",
667
+ "2010-01_58",
668
+ "2010-01_68",
669
+ "2010-01_76",
670
+ "2010-01_83",
671
+ "2010-01_85",
672
+ "2010-01_91",
673
+ "2010-01_99"
674
+ ],
675
+ "id_column": "id",
676
+ "exclude_from_task": true
677
+ }
678
+ },
679
+ "num_fewshot": 3,
680
+ "metric_list": [
681
+ {
682
+ "metric": "acc",
683
+ "aggregation": "acc",
684
+ "higher_is_better": true
685
+ }
686
+ ],
687
+ "output_type": "generate_until",
688
+ "generation_kwargs": {
689
+ "max_gen_toks": 32,
690
+ "do_sample": false,
691
+ "temperature": 0.0,
692
+ "top_k": null,
693
+ "top_p": null,
694
+ "until": [
695
+ "\n\n"
696
+ ]
697
+ },
698
+ "repeats": 1,
699
+ "filter_list": [
700
+ {
701
+ "name": "all",
702
+ "filter": [
703
+ {
704
+ "function": "normalize_spaces"
705
+ },
706
+ {
707
+ "function": "remove_accents"
708
+ },
709
+ {
710
+ "function": "find_choices",
711
+ "choices": [
712
+ "A",
713
+ "B",
714
+ "C",
715
+ "D"
716
+ ],
717
+ "regex_patterns": [
718
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
719
+ "\\b([ABCD])\\)",
720
+ "\\b([ABCD]) ?[.):-]",
721
+ "\\b([ABCD])$",
722
+ "\\b([ABCD])\\b"
723
+ ]
724
+ },
725
+ {
726
+ "function": "take_first"
727
+ }
728
+ ],
729
+ "group_by": {
730
+ "column": "exam_id"
731
+ }
732
+ }
733
+ ],
734
+ "should_decontaminate": true,
735
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f43b5620d60>",
736
+ "metadata": {
737
+ "version": 1.4
738
+ }
739
+ },
740
+ "sparrow_emotion-2021-cortiz-por": {
741
+ "task": "sparrow_emotion-2021-cortiz-por",
742
+ "task_alias": "emotion-2021-cortiz-por",
743
+ "group": [
744
+ "pt_benchmark",
745
+ "sparrow"
746
+ ],
747
+ "dataset_path": "UBC-NLP/sparrow",
748
+ "dataset_name": "emotion-2021-cortiz-por",
749
+ "test_split": "validation",
750
+ "fewshot_split": "train",
751
+ "doc_to_text": "Texto: {{content}}\nPergunta: Qual a principal emoção apresentada no texto?\nResposta:",
752
+ "doc_to_target": "<function sparrow_emotion_por_trans_label at 0x7f43b5620fe0>",
753
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é extrair qual a principal emoção dos textos. Responda com apenas uma das seguintes opções:\n Admiração, Diversão, Raiva, Aborrecimento, Aprovação, Compaixão, Confusão, Curiosidade, Desejo, Decepção, Desaprovação, Nojo, Vergonha, Inveja, Entusiasmo, Medo, Gratidão, Luto, Alegria, Saudade, Amor, Nervosismo, Otimismo, Orgulho, Alívio, Remorso, Tristeza ou Surpresa.\n\n",
754
+ "target_delimiter": " ",
755
+ "fewshot_delimiter": "\n\n",
756
+ "fewshot_config": {
757
+ "sampler": "first_n"
758
+ },
759
+ "num_fewshot": 15,
760
+ "metric_list": [
761
+ {
762
+ "metric": "f1_macro",
763
+ "aggregation": "f1_macro",
764
+ "higher_is_better": true
765
+ },
766
+ {
767
+ "metric": "acc",
768
+ "aggregation": "acc",
769
+ "higher_is_better": true
770
+ }
771
+ ],
772
+ "output_type": "generate_until",
773
+ "generation_kwargs": {
774
+ "max_gen_toks": 32,
775
+ "do_sample": false,
776
+ "temperature": 0.0,
777
+ "top_k": null,
778
+ "top_p": null,
779
+ "until": [
780
+ "\n\n"
781
+ ]
782
+ },
783
+ "repeats": 1,
784
+ "filter_list": [
785
+ {
786
+ "name": "all",
787
+ "filter": [
788
+ {
789
+ "function": "find_similar_label",
790
+ "labels": [
791
+ "Admiração",
792
+ "Diversão",
793
+ "Raiva",
794
+ "Aborrecimento",
795
+ "Aprovação",
796
+ "Compaixão",
797
+ "Confusão",
798
+ "Curiosidade",
799
+ "Desejo",
800
+ "Decepção",
801
+ "Desaprovação",
802
+ "Nojo",
803
+ " Vergonha",
804
+ "Inveja",
805
+ "Entusiasmo",
806
+ "Medo",
807
+ "Gratidão",
808
+ "Luto",
809
+ "Alegria",
810
+ "Saudade",
811
+ "Amor",
812
+ "Nervosismo",
813
+ "Otimismo",
814
+ "Orgulho",
815
+ "Alívio",
816
+ "Remorso",
817
+ "Tristeza",
818
+ "Surpresa"
819
+ ]
820
+ },
821
+ {
822
+ "function": "take_first"
823
+ }
824
+ ]
825
+ }
826
+ ],
827
+ "should_decontaminate": false,
828
+ "limit": 500,
829
+ "metadata": {
830
+ "version": 1.0
831
+ }
832
+ },
833
+ "sparrow_hate-2019-fortuna-por": {
834
+ "task": "sparrow_hate-2019-fortuna-por",
835
+ "task_alias": "hate-2019-fortuna-por",
836
+ "group": [
837
+ "pt_benchmark",
838
+ "sparrow"
839
+ ],
840
+ "dataset_path": "UBC-NLP/sparrow",
841
+ "dataset_name": "hate-2019-fortuna-por",
842
+ "test_split": "validation",
843
+ "fewshot_split": "train",
844
+ "doc_to_text": "Texto: {{content}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
845
+ "doc_to_target": "{{'Sim' if label == 'Hate' else 'Não'}}",
846
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o texto contem discurso de ódio our não. Responda apenas com Sim ou Não.\n\n",
847
+ "target_delimiter": " ",
848
+ "fewshot_delimiter": "\n\n",
849
+ "fewshot_config": {
850
+ "sampler": "first_n"
851
+ },
852
+ "num_fewshot": 15,
853
+ "metric_list": [
854
+ {
855
+ "metric": "f1_macro",
856
+ "aggregation": "f1_macro",
857
+ "higher_is_better": true
858
+ },
859
+ {
860
+ "metric": "acc",
861
+ "aggregation": "acc",
862
+ "higher_is_better": true
863
+ }
864
+ ],
865
+ "output_type": "generate_until",
866
+ "generation_kwargs": {
867
+ "max_gen_toks": 32,
868
+ "do_sample": false,
869
+ "temperature": 0.0,
870
+ "top_k": null,
871
+ "top_p": null,
872
+ "until": [
873
+ "\n\n"
874
+ ]
875
+ },
876
+ "repeats": 1,
877
+ "filter_list": [
878
+ {
879
+ "name": "all",
880
+ "filter": [
881
+ {
882
+ "function": "find_similar_label",
883
+ "labels": [
884
+ "Sim",
885
+ "Não"
886
+ ]
887
+ },
888
+ {
889
+ "function": "take_first"
890
+ }
891
+ ]
892
+ }
893
+ ],
894
+ "should_decontaminate": false,
895
+ "limit": 500,
896
+ "metadata": {
897
+ "version": 1.0
898
+ }
899
+ },
900
+ "sparrow_sentiment-2016-mozetic-por": {
901
+ "task": "sparrow_sentiment-2016-mozetic-por",
902
+ "task_alias": "sentiment-2016-mozetic-por",
903
+ "group": [
904
+ "pt_benchmark",
905
+ "sparrow"
906
+ ],
907
+ "dataset_path": "UBC-NLP/sparrow",
908
+ "dataset_name": "sentiment-2016-mozetic-por",
909
+ "test_split": "validation",
910
+ "fewshot_split": "train",
911
+ "doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
912
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
913
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
914
+ "target_delimiter": " ",
915
+ "fewshot_delimiter": "\n\n",
916
+ "fewshot_config": {
917
+ "sampler": "first_n"
918
+ },
919
+ "num_fewshot": 15,
920
+ "metric_list": [
921
+ {
922
+ "metric": "f1_macro",
923
+ "aggregation": "f1_macro",
924
+ "higher_is_better": true
925
+ },
926
+ {
927
+ "metric": "acc",
928
+ "aggregation": "acc",
929
+ "higher_is_better": true
930
+ }
931
+ ],
932
+ "output_type": "generate_until",
933
+ "generation_kwargs": {
934
+ "max_gen_toks": 32,
935
+ "do_sample": false,
936
+ "temperature": 0.0,
937
+ "top_k": null,
938
+ "top_p": null,
939
+ "until": [
940
+ "\n\n"
941
+ ]
942
+ },
943
+ "repeats": 1,
944
+ "filter_list": [
945
+ {
946
+ "name": "all",
947
+ "filter": [
948
+ {
949
+ "function": "find_similar_label",
950
+ "labels": [
951
+ "Positivo",
952
+ "Neutro",
953
+ "Negativo"
954
+ ]
955
+ },
956
+ {
957
+ "function": "take_first"
958
+ }
959
+ ]
960
+ }
961
+ ],
962
+ "should_decontaminate": false,
963
+ "limit": 500,
964
+ "metadata": {
965
+ "version": 1.0
966
+ }
967
+ },
968
+ "sparrow_sentiment-2018-brum-por": {
969
+ "task": "sparrow_sentiment-2018-brum-por",
970
+ "task_alias": "sentiment-2018-brum-por",
971
+ "group": [
972
+ "pt_benchmark",
973
+ "sparrow"
974
+ ],
975
+ "dataset_path": "UBC-NLP/sparrow",
976
+ "dataset_name": "sentiment-2018-brum-por",
977
+ "test_split": "validation",
978
+ "fewshot_split": "train",
979
+ "doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
980
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
981
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
982
+ "target_delimiter": " ",
983
+ "fewshot_delimiter": "\n\n",
984
+ "fewshot_config": {
985
+ "sampler": "first_n"
986
+ },
987
+ "num_fewshot": 15,
988
+ "metric_list": [
989
+ {
990
+ "metric": "f1_macro",
991
+ "aggregation": "f1_macro",
992
+ "higher_is_better": true
993
+ },
994
+ {
995
+ "metric": "acc",
996
+ "aggregation": "acc",
997
+ "higher_is_better": true
998
+ }
999
+ ],
1000
+ "output_type": "generate_until",
1001
+ "generation_kwargs": {
1002
+ "max_gen_toks": 32,
1003
+ "do_sample": false,
1004
+ "temperature": 0.0,
1005
+ "top_k": null,
1006
+ "top_p": null,
1007
+ "until": [
1008
+ "\n\n"
1009
+ ]
1010
+ },
1011
+ "repeats": 1,
1012
+ "filter_list": [
1013
+ {
1014
+ "name": "all",
1015
+ "filter": [
1016
+ {
1017
+ "function": "find_similar_label",
1018
+ "labels": [
1019
+ "Positivo",
1020
+ "Neutro",
1021
+ "Negativo"
1022
+ ]
1023
+ },
1024
+ {
1025
+ "function": "take_first"
1026
+ }
1027
+ ]
1028
+ }
1029
+ ],
1030
+ "should_decontaminate": false,
1031
+ "limit": 500,
1032
+ "metadata": {
1033
+ "version": 1.0
1034
+ }
1035
+ }
1036
+ },
1037
+ "versions": {
1038
+ "assin2_rte": 1.0,
1039
+ "assin2_sts": 1.0,
1040
+ "bluex": 1.0,
1041
+ "enem_challenge": 1.0,
1042
+ "faquad_nli": 1.0,
1043
+ "oab_exams": 1.4,
1044
+ "sparrow_emotion-2021-cortiz-por": 1.0,
1045
+ "sparrow_hate-2019-fortuna-por": 1.0,
1046
+ "sparrow_sentiment-2016-mozetic-por": 1.0,
1047
+ "sparrow_sentiment-2018-brum-por": 1.0
1048
+ },
1049
+ "n-shot": {
1050
+ "assin2_rte": 15,
1051
+ "assin2_sts": 15,
1052
+ "bluex": 3,
1053
+ "enem_challenge": 3,
1054
+ "faquad_nli": 15,
1055
+ "oab_exams": 3,
1056
+ "sparrow_emotion-2021-cortiz-por": 15,
1057
+ "sparrow_hate-2019-fortuna-por": 15,
1058
+ "sparrow_sentiment-2016-mozetic-por": 15,
1059
+ "sparrow_sentiment-2018-brum-por": 15
1060
+ },
1061
+ "model_meta": {
1062
+ "truncated": 16,
1063
+ "non_truncated": 11873,
1064
+ "padded": 0,
1065
+ "non_padded": 11889,
1066
+ "fewshots_truncated": 18,
1067
+ "has_chat_template": false,
1068
+ "chat_type": null,
1069
+ "n_gpus": 1,
1070
+ "accelerate_num_process": null,
1071
+ "model_sha": "8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16",
1072
+ "model_dtype": "torch.float16",
1073
+ "model_memory_footprint": 13510393856,
1074
+ "model_num_parameters": 6738415616,
1075
+ "model_is_loaded_in_4bit": false,
1076
+ "model_is_loaded_in_8bit": false,
1077
+ "model_is_quantized": null,
1078
+ "model_device": "cuda:2",
1079
+ "batch_size": 32,
1080
+ "max_length": 2048,
1081
+ "max_ctx_length": 2016,
1082
+ "max_gen_toks": 32
1083
+ },
1084
+ "task_model_meta": {
1085
+ "assin2_rte": {
1086
+ "sample_size": 2448,
1087
+ "truncated": 0,
1088
+ "non_truncated": 2448,
1089
+ "padded": 0,
1090
+ "non_padded": 2448,
1091
+ "fewshots_truncated": 0,
1092
+ "mean_seq_length": 1182.9889705882354,
1093
+ "min_seq_length": 1160,
1094
+ "max_seq_length": 1249,
1095
+ "max_ctx_length": 2016,
1096
+ "max_gen_toks": 32,
1097
+ "mean_original_fewshots_size": 15.0,
1098
+ "mean_effective_fewshot_size": 15.0
1099
+ },
1100
+ "assin2_sts": {
1101
+ "sample_size": 2448,
1102
+ "truncated": 0,
1103
+ "non_truncated": 2448,
1104
+ "padded": 0,
1105
+ "non_padded": 2448,
1106
+ "fewshots_truncated": 0,
1107
+ "mean_seq_length": 1415.9889705882354,
1108
+ "min_seq_length": 1393,
1109
+ "max_seq_length": 1482,
1110
+ "max_ctx_length": 2016,
1111
+ "max_gen_toks": 32,
1112
+ "mean_original_fewshots_size": 15.0,
1113
+ "mean_effective_fewshot_size": 15.0
1114
+ },
1115
+ "bluex": {
1116
+ "sample_size": 719,
1117
+ "truncated": 4,
1118
+ "non_truncated": 715,
1119
+ "padded": 0,
1120
+ "non_padded": 719,
1121
+ "fewshots_truncated": 4,
1122
+ "mean_seq_length": 1364.7426981919332,
1123
+ "min_seq_length": 998,
1124
+ "max_seq_length": 2124,
1125
+ "max_ctx_length": 2016,
1126
+ "max_gen_toks": 32,
1127
+ "mean_original_fewshots_size": 3.0,
1128
+ "mean_effective_fewshot_size": 2.9944367176634215
1129
+ },
1130
+ "enem_challenge": {
1131
+ "sample_size": 1429,
1132
+ "truncated": 12,
1133
+ "non_truncated": 1417,
1134
+ "padded": 0,
1135
+ "non_padded": 1429,
1136
+ "fewshots_truncated": 14,
1137
+ "mean_seq_length": 1532.9881035689293,
1138
+ "min_seq_length": 1280,
1139
+ "max_seq_length": 2572,
1140
+ "max_ctx_length": 2016,
1141
+ "max_gen_toks": 32,
1142
+ "mean_original_fewshots_size": 3.0,
1143
+ "mean_effective_fewshot_size": 2.9902029391182645
1144
+ },
1145
+ "faquad_nli": {
1146
+ "sample_size": 650,
1147
+ "truncated": 0,
1148
+ "non_truncated": 650,
1149
+ "padded": 0,
1150
+ "non_padded": 650,
1151
+ "fewshots_truncated": 0,
1152
+ "mean_seq_length": 1473.1184615384616,
1153
+ "min_seq_length": 1421,
1154
+ "max_seq_length": 1580,
1155
+ "max_ctx_length": 2016,
1156
+ "max_gen_toks": 32,
1157
+ "mean_original_fewshots_size": 15.0,
1158
+ "mean_effective_fewshot_size": 15.0
1159
+ },
1160
+ "oab_exams": {
1161
+ "sample_size": 2195,
1162
+ "truncated": 0,
1163
+ "non_truncated": 2195,
1164
+ "padded": 0,
1165
+ "non_padded": 2195,
1166
+ "fewshots_truncated": 0,
1167
+ "mean_seq_length": 1306.4145785876992,
1168
+ "min_seq_length": 1051,
1169
+ "max_seq_length": 1788,
1170
+ "max_ctx_length": 2016,
1171
+ "max_gen_toks": 32,
1172
+ "mean_original_fewshots_size": 3.0,
1173
+ "mean_effective_fewshot_size": 3.0
1174
+ },
1175
+ "sparrow_emotion-2021-cortiz-por": {
1176
+ "sample_size": 500,
1177
+ "truncated": 0,
1178
+ "non_truncated": 500,
1179
+ "padded": 0,
1180
+ "non_padded": 500,
1181
+ "fewshots_truncated": 0,
1182
+ "mean_seq_length": 1132.204,
1183
+ "min_seq_length": 1110,
1184
+ "max_seq_length": 1180,
1185
+ "max_ctx_length": 2016,
1186
+ "max_gen_toks": 32,
1187
+ "mean_original_fewshots_size": 15.0,
1188
+ "mean_effective_fewshot_size": 15.0
1189
+ },
1190
+ "sparrow_hate-2019-fortuna-por": {
1191
+ "sample_size": 500,
1192
+ "truncated": 0,
1193
+ "non_truncated": 500,
1194
+ "padded": 0,
1195
+ "non_padded": 500,
1196
+ "fewshots_truncated": 0,
1197
+ "mean_seq_length": 1044.588,
1198
+ "min_seq_length": 1020,
1199
+ "max_seq_length": 1103,
1200
+ "max_ctx_length": 2016,
1201
+ "max_gen_toks": 32,
1202
+ "mean_original_fewshots_size": 15.0,
1203
+ "mean_effective_fewshot_size": 15.0
1204
+ },
1205
+ "sparrow_sentiment-2016-mozetic-por": {
1206
+ "sample_size": 500,
1207
+ "truncated": 0,
1208
+ "non_truncated": 500,
1209
+ "padded": 0,
1210
+ "non_padded": 500,
1211
+ "fewshots_truncated": 0,
1212
+ "mean_seq_length": 916.44,
1213
+ "min_seq_length": 899,
1214
+ "max_seq_length": 952,
1215
+ "max_ctx_length": 2016,
1216
+ "max_gen_toks": 32,
1217
+ "mean_original_fewshots_size": 15.0,
1218
+ "mean_effective_fewshot_size": 15.0
1219
+ },
1220
+ "sparrow_sentiment-2018-brum-por": {
1221
+ "sample_size": 500,
1222
+ "truncated": 0,
1223
+ "non_truncated": 500,
1224
+ "padded": 0,
1225
+ "non_padded": 500,
1226
+ "fewshots_truncated": 0,
1227
+ "mean_seq_length": 1011.854,
1228
+ "min_seq_length": 994,
1229
+ "max_seq_length": 1043,
1230
+ "max_ctx_length": 2016,
1231
+ "max_gen_toks": 32,
1232
+ "mean_original_fewshots_size": 15.0,
1233
+ "mean_effective_fewshot_size": 15.0
1234
+ }
1235
+ },
1236
+ "config": {
1237
+ "model": "huggingface",
1238
+ "model_args": "pretrained=huggyllama/llama-7b,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=4096",
1239
+ "batch_size": "auto",
1240
+ "batch_sizes": [],
1241
+ "device": null,
1242
+ "use_cache": null,
1243
+ "limit": [
1244
+ null,
1245
+ null,
1246
+ null,
1247
+ null,
1248
+ null,
1249
+ null,
1250
+ 500.0,
1251
+ 500.0,
1252
+ 500.0,
1253
+ 500.0
1254
+ ],
1255
+ "bootstrap_iters": 0,
1256
+ "gen_kwargs": null
1257
+ },
1258
+ "git_hash": "637ac6b"
1259
+ }
huggyllama/llama-7b/raw_2024-02-06T03-05-10.639848/results.json ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3333333333333333,
5
+ "acc,all": 0.5,
6
+ "alias": "assin2_rte"
7
+ },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.07703722378109175,
10
+ "mse,all": 2.5286274509803923,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.2614742698191933,
15
+ "acc,exam_id__USP_2024": 0.14634146341463414,
16
+ "acc,exam_id__UNICAMP_2020": 0.18181818181818182,
17
+ "acc,exam_id__UNICAMP_2019": 0.2,
18
+ "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
19
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
20
+ "acc,exam_id__UNICAMP_2024": 0.26666666666666666,
21
+ "acc,exam_id__USP_2022": 0.24489795918367346,
22
+ "acc,exam_id__UNICAMP_2022": 0.3076923076923077,
23
+ "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914,
24
+ "acc,exam_id__USP_2023": 0.18181818181818182,
25
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
26
+ "acc,exam_id__USP_2018": 0.18518518518518517,
27
+ "acc,exam_id__USP_2019": 0.325,
28
+ "acc,exam_id__USP_2021": 0.23076923076923078,
29
+ "acc,exam_id__USP_2020": 0.30357142857142855,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.2365290412876137,
35
+ "acc,exam_id__2016_2": 0.22764227642276422,
36
+ "acc,exam_id__2023": 0.25925925925925924,
37
+ "acc,exam_id__2011": 0.2222222222222222,
38
+ "acc,exam_id__2015": 0.19327731092436976,
39
+ "acc,exam_id__2010": 0.1794871794871795,
40
+ "acc,exam_id__2012": 0.33620689655172414,
41
+ "acc,exam_id__2009": 0.2782608695652174,
42
+ "acc,exam_id__2022": 0.24812030075187969,
43
+ "acc,exam_id__2014": 0.1926605504587156,
44
+ "acc,exam_id__2017": 0.2413793103448276,
45
+ "acc,exam_id__2016": 0.23140495867768596,
46
+ "acc,exam_id__2013": 0.2222222222222222
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.5548892542799111,
50
+ "acc,all": 0.7276923076923076,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "oab_exams": {
54
+ "acc,all": 0.2742596810933941,
55
+ "acc,exam_id__2017-23": 0.325,
56
+ "acc,exam_id__2013-10": 0.2125,
57
+ "acc,exam_id__2016-20a": 0.25,
58
+ "acc,exam_id__2016-20": 0.2375,
59
+ "acc,exam_id__2014-14": 0.2375,
60
+ "acc,exam_id__2010-02": 0.34,
61
+ "acc,exam_id__2014-15": 0.3333333333333333,
62
+ "acc,exam_id__2010-01": 0.27058823529411763,
63
+ "acc,exam_id__2011-03": 0.2727272727272727,
64
+ "acc,exam_id__2016-19": 0.3076923076923077,
65
+ "acc,exam_id__2016-21": 0.3,
66
+ "acc,exam_id__2018-25": 0.3,
67
+ "acc,exam_id__2013-12": 0.2,
68
+ "acc,exam_id__2011-04": 0.3125,
69
+ "acc,exam_id__2011-05": 0.275,
70
+ "acc,exam_id__2012-07": 0.25,
71
+ "acc,exam_id__2012-06": 0.2875,
72
+ "acc,exam_id__2012-09": 0.15584415584415584,
73
+ "acc,exam_id__2013-11": 0.3,
74
+ "acc,exam_id__2012-06a": 0.2,
75
+ "acc,exam_id__2012-08": 0.2875,
76
+ "acc,exam_id__2015-18": 0.275,
77
+ "acc,exam_id__2017-22": 0.3,
78
+ "acc,exam_id__2015-17": 0.3333333333333333,
79
+ "acc,exam_id__2017-24": 0.2875,
80
+ "acc,exam_id__2014-13": 0.275,
81
+ "acc,exam_id__2015-16": 0.2625,
82
+ "alias": "oab_exams"
83
+ },
84
+ "sparrow_emotion-2021-cortiz-por": {
85
+ "alias": "emotion-2021-cortiz-por",
86
+ "f1_macro,all": 0.06400860793605274,
87
+ "acc,all": 0.166
88
+ },
89
+ "sparrow_hate-2019-fortuna-por": {
90
+ "alias": "hate-2019-fortuna-por",
91
+ "f1_macro,all": 0.4300911854103343,
92
+ "acc,all": 0.652
93
+ },
94
+ "sparrow_sentiment-2016-mozetic-por": {
95
+ "alias": "sentiment-2016-mozetic-por",
96
+ "f1_macro,all": 0.4465537433018192,
97
+ "acc,all": 0.6
98
+ },
99
+ "sparrow_sentiment-2018-brum-por": {
100
+ "alias": "sentiment-2018-brum-por",
101
+ "f1_macro,all": 0.30253419320943437,
102
+ "acc,all": 0.376
103
+ }
104
+ },
105
+ "configs": {
106
+ "assin2_rte": {
107
+ "task": "assin2_rte",
108
+ "group": [
109
+ "pt_benchmark",
110
+ "assin2"
111
+ ],
112
+ "dataset_path": "assin2",
113
+ "test_split": "test",
114
+ "fewshot_split": "train",
115
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa?\nResposta:",
116
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
117
+ "description": "Abaixo contém pares de premissa e hipótese, para cada par você deve julgar se a hipótese pode ser inferida a partir da premissa, responda apenas com Sim ou Não.\n\n",
118
+ "target_delimiter": " ",
119
+ "fewshot_delimiter": "\n\n",
120
+ "fewshot_config": {
121
+ "sampler": "id_sampler",
122
+ "sampler_config": {
123
+ "id_list": [
124
+ 1,
125
+ 3251,
126
+ 2,
127
+ 3252,
128
+ 3,
129
+ 4,
130
+ 5,
131
+ 6,
132
+ 3253,
133
+ 7,
134
+ 3254,
135
+ 3255,
136
+ 3256,
137
+ 8,
138
+ 9,
139
+ 10,
140
+ 3257,
141
+ 11,
142
+ 3258,
143
+ 12,
144
+ 13,
145
+ 14,
146
+ 15,
147
+ 3259,
148
+ 3260,
149
+ 3261,
150
+ 3262,
151
+ 3263,
152
+ 16,
153
+ 17,
154
+ 3264,
155
+ 18,
156
+ 3265,
157
+ 3266,
158
+ 3267,
159
+ 19,
160
+ 20,
161
+ 3268,
162
+ 3269,
163
+ 21,
164
+ 3270,
165
+ 3271,
166
+ 22,
167
+ 3272,
168
+ 3273,
169
+ 23,
170
+ 3274,
171
+ 24,
172
+ 25,
173
+ 3275
174
+ ],
175
+ "id_column": "sentence_pair_id"
176
+ }
177
+ },
178
+ "num_fewshot": 15,
179
+ "metric_list": [
180
+ {
181
+ "metric": "f1_macro",
182
+ "aggregation": "f1_macro",
183
+ "higher_is_better": true
184
+ },
185
+ {
186
+ "metric": "acc",
187
+ "aggregation": "acc",
188
+ "higher_is_better": true
189
+ }
190
+ ],
191
+ "output_type": "generate_until",
192
+ "generation_kwargs": {
193
+ "max_gen_toks": 32,
194
+ "do_sample": false,
195
+ "temperature": 0.0,
196
+ "top_k": null,
197
+ "top_p": null,
198
+ "until": [
199
+ "\n\n"
200
+ ]
201
+ },
202
+ "repeats": 1,
203
+ "filter_list": [
204
+ {
205
+ "name": "all",
206
+ "filter": [
207
+ {
208
+ "function": "find_similar_label",
209
+ "labels": [
210
+ "Sim",
211
+ "Não"
212
+ ]
213
+ },
214
+ {
215
+ "function": "take_first"
216
+ }
217
+ ]
218
+ }
219
+ ],
220
+ "should_decontaminate": false,
221
+ "metadata": {
222
+ "version": 1.0
223
+ }
224
+ },
225
+ "assin2_sts": {
226
+ "task": "assin2_sts",
227
+ "group": [
228
+ "pt_benchmark",
229
+ "assin2"
230
+ ],
231
+ "dataset_path": "assin2",
232
+ "test_split": "test",
233
+ "fewshot_split": "train",
234
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Qual o grau de similaridade entre as duas frases de 1,0 a 5,0?\nResposta:",
235
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fa487821760>",
236
+ "description": "Abaixo contém pares de frases, para cada par você deve julgar o grau de similaridade de 1,0 a 5,0, responda apenas com o número.\n\n",
237
+ "target_delimiter": " ",
238
+ "fewshot_delimiter": "\n\n",
239
+ "fewshot_config": {
240
+ "sampler": "id_sampler",
241
+ "sampler_config": {
242
+ "id_list": [
243
+ 1,
244
+ 3251,
245
+ 2,
246
+ 3252,
247
+ 3,
248
+ 4,
249
+ 5,
250
+ 6,
251
+ 3253,
252
+ 7,
253
+ 3254,
254
+ 3255,
255
+ 3256,
256
+ 8,
257
+ 9,
258
+ 10,
259
+ 3257,
260
+ 11,
261
+ 3258,
262
+ 12,
263
+ 13,
264
+ 14,
265
+ 15,
266
+ 3259,
267
+ 3260,
268
+ 3261,
269
+ 3262,
270
+ 3263,
271
+ 16,
272
+ 17,
273
+ 3264,
274
+ 18,
275
+ 3265,
276
+ 3266,
277
+ 3267,
278
+ 19,
279
+ 20,
280
+ 3268,
281
+ 3269,
282
+ 21,
283
+ 3270,
284
+ 3271,
285
+ 22,
286
+ 3272,
287
+ 3273,
288
+ 23,
289
+ 3274,
290
+ 24,
291
+ 25,
292
+ 3275
293
+ ],
294
+ "id_column": "sentence_pair_id"
295
+ }
296
+ },
297
+ "num_fewshot": 15,
298
+ "metric_list": [
299
+ {
300
+ "metric": "pearson",
301
+ "aggregation": "pearsonr",
302
+ "higher_is_better": true
303
+ },
304
+ {
305
+ "metric": "mse",
306
+ "aggregation": "mean_squared_error",
307
+ "higher_is_better": false
308
+ }
309
+ ],
310
+ "output_type": "generate_until",
311
+ "generation_kwargs": {
312
+ "max_gen_toks": 32,
313
+ "do_sample": false,
314
+ "temperature": 0.0,
315
+ "top_k": null,
316
+ "top_p": null,
317
+ "until": [
318
+ "\n\n"
319
+ ]
320
+ },
321
+ "repeats": 1,
322
+ "filter_list": [
323
+ {
324
+ "name": "all",
325
+ "filter": [
326
+ {
327
+ "function": "number_filter",
328
+ "type": "float",
329
+ "range_min": 1.0,
330
+ "range_max": 5.0,
331
+ "on_outside_range": "clip",
332
+ "fallback": 5.0
333
+ },
334
+ {
335
+ "function": "take_first"
336
+ }
337
+ ]
338
+ }
339
+ ],
340
+ "should_decontaminate": false,
341
+ "metadata": {
342
+ "version": 1.0
343
+ }
344
+ },
345
+ "bluex": {
346
+ "task": "bluex",
347
+ "group": [
348
+ "pt_benchmark",
349
+ "vestibular"
350
+ ],
351
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
352
+ "test_split": "train",
353
+ "fewshot_split": "train",
354
+ "doc_to_text": "<function enem_doc_to_text at 0x7fa487821120>",
355
+ "doc_to_target": "{{answerKey}}",
356
+ "description": "As perguntas a seguir são questões de multipla escolha de provas de vestibular de Universidades Brasileiras, reponda apenas com as letras A, B, C, D ou E.\n\n",
357
+ "target_delimiter": " ",
358
+ "fewshot_delimiter": "\n\n",
359
+ "fewshot_config": {
360
+ "sampler": "id_sampler",
361
+ "sampler_config": {
362
+ "id_list": [
363
+ "USP_2018_3",
364
+ "UNICAMP_2018_2",
365
+ "USP_2018_35",
366
+ "UNICAMP_2018_16",
367
+ "USP_2018_89"
368
+ ],
369
+ "id_column": "id",
370
+ "exclude_from_task": true
371
+ }
372
+ },
373
+ "num_fewshot": 3,
374
+ "metric_list": [
375
+ {
376
+ "metric": "acc",
377
+ "aggregation": "acc",
378
+ "higher_is_better": true
379
+ }
380
+ ],
381
+ "output_type": "generate_until",
382
+ "generation_kwargs": {
383
+ "max_gen_toks": 32,
384
+ "do_sample": false,
385
+ "temperature": 0.0,
386
+ "top_k": null,
387
+ "top_p": null,
388
+ "until": [
389
+ "\n\n"
390
+ ]
391
+ },
392
+ "repeats": 1,
393
+ "filter_list": [
394
+ {
395
+ "name": "all",
396
+ "filter": [
397
+ {
398
+ "function": "normalize_spaces"
399
+ },
400
+ {
401
+ "function": "remove_accents"
402
+ },
403
+ {
404
+ "function": "find_choices",
405
+ "choices": [
406
+ "A",
407
+ "B",
408
+ "C",
409
+ "D",
410
+ "E"
411
+ ],
412
+ "regex_patterns": [
413
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
414
+ "\\b([ABCDE])\\.",
415
+ "\\b([ABCDE]) ?[.):-]",
416
+ "\\b([ABCDE])$",
417
+ "\\b([ABCDE])\\b"
418
+ ]
419
+ },
420
+ {
421
+ "function": "take_first"
422
+ }
423
+ ],
424
+ "group_by": {
425
+ "column": "exam_id"
426
+ }
427
+ }
428
+ ],
429
+ "should_decontaminate": true,
430
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa4878213a0>",
431
+ "metadata": {
432
+ "version": 1.0
433
+ }
434
+ },
435
+ "enem_challenge": {
436
+ "task": "enem_challenge",
437
+ "task_alias": "enem",
438
+ "group": [
439
+ "pt_benchmark",
440
+ "vestibular"
441
+ ],
442
+ "dataset_path": "eduagarcia/enem_challenge",
443
+ "test_split": "train",
444
+ "fewshot_split": "train",
445
+ "doc_to_text": "<function enem_doc_to_text at 0x7fa487821940>",
446
+ "doc_to_target": "{{answerKey}}",
447
+ "description": "As perguntas a seguir são questões de multipla escolha do Exame Nacional do Ensino Médio (ENEM), reponda apenas com as letras A, B, C, D ou E.\n\n",
448
+ "target_delimiter": " ",
449
+ "fewshot_delimiter": "\n\n",
450
+ "fewshot_config": {
451
+ "sampler": "id_sampler",
452
+ "sampler_config": {
453
+ "id_list": [
454
+ "2022_21",
455
+ "2022_88",
456
+ "2022_143"
457
+ ],
458
+ "id_column": "id",
459
+ "exclude_from_task": true
460
+ }
461
+ },
462
+ "num_fewshot": 3,
463
+ "metric_list": [
464
+ {
465
+ "metric": "acc",
466
+ "aggregation": "acc",
467
+ "higher_is_better": true
468
+ }
469
+ ],
470
+ "output_type": "generate_until",
471
+ "generation_kwargs": {
472
+ "max_gen_toks": 32,
473
+ "do_sample": false,
474
+ "temperature": 0.0,
475
+ "top_k": null,
476
+ "top_p": null,
477
+ "until": [
478
+ "\n\n"
479
+ ]
480
+ },
481
+ "repeats": 1,
482
+ "filter_list": [
483
+ {
484
+ "name": "all",
485
+ "filter": [
486
+ {
487
+ "function": "normalize_spaces"
488
+ },
489
+ {
490
+ "function": "remove_accents"
491
+ },
492
+ {
493
+ "function": "find_choices",
494
+ "choices": [
495
+ "A",
496
+ "B",
497
+ "C",
498
+ "D",
499
+ "E"
500
+ ],
501
+ "regex_patterns": [
502
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
503
+ "\\b([ABCDE])\\.",
504
+ "\\b([ABCDE]) ?[.):-]",
505
+ "\\b([ABCDE])$",
506
+ "\\b([ABCDE])\\b"
507
+ ]
508
+ },
509
+ {
510
+ "function": "take_first"
511
+ }
512
+ ],
513
+ "group_by": {
514
+ "column": "exam_id"
515
+ }
516
+ }
517
+ ],
518
+ "should_decontaminate": true,
519
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa487821bc0>",
520
+ "metadata": {
521
+ "version": 1.0
522
+ }
523
+ },
524
+ "faquad_nli": {
525
+ "task": "faquad_nli",
526
+ "group": [
527
+ "pt_benchmark"
528
+ ],
529
+ "dataset_path": "ruanchaves/faquad-nli",
530
+ "test_split": "test",
531
+ "fewshot_split": "train",
532
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta satisfaz a pergunta? Sim ou Não?",
533
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
534
+ "description": "Abaixo contém pares de pergunta e reposta, para cada par você deve julgar resposta responde a pergunta de maneira satisfatória e aparenta estar correta, escreva apenas Sim ou Não.\n\n",
535
+ "target_delimiter": " ",
536
+ "fewshot_delimiter": "\n\n",
537
+ "fewshot_config": {
538
+ "sampler": "first_n",
539
+ "sampler_config": {
540
+ "fewshot_indices": [
541
+ 1893,
542
+ 949,
543
+ 663,
544
+ 105,
545
+ 1169,
546
+ 2910,
547
+ 2227,
548
+ 2813,
549
+ 974,
550
+ 558,
551
+ 1503,
552
+ 1958,
553
+ 2918,
554
+ 601,
555
+ 1560,
556
+ 984,
557
+ 2388,
558
+ 995,
559
+ 2233,
560
+ 1982,
561
+ 165,
562
+ 2788,
563
+ 1312,
564
+ 2285,
565
+ 522,
566
+ 1113,
567
+ 1670,
568
+ 323,
569
+ 236,
570
+ 1263,
571
+ 1562,
572
+ 2519,
573
+ 1049,
574
+ 432,
575
+ 1167,
576
+ 1394,
577
+ 2022,
578
+ 2551,
579
+ 2194,
580
+ 2187,
581
+ 2282,
582
+ 2816,
583
+ 108,
584
+ 301,
585
+ 1185,
586
+ 1315,
587
+ 1420,
588
+ 2436,
589
+ 2322,
590
+ 766
591
+ ]
592
+ }
593
+ },
594
+ "num_fewshot": 15,
595
+ "metric_list": [
596
+ {
597
+ "metric": "f1_macro",
598
+ "aggregation": "f1_macro",
599
+ "higher_is_better": true
600
+ },
601
+ {
602
+ "metric": "acc",
603
+ "aggregation": "acc",
604
+ "higher_is_better": true
605
+ }
606
+ ],
607
+ "output_type": "generate_until",
608
+ "generation_kwargs": {
609
+ "max_gen_toks": 32,
610
+ "do_sample": false,
611
+ "temperature": 0.0,
612
+ "top_k": null,
613
+ "top_p": null,
614
+ "until": [
615
+ "\n\n"
616
+ ]
617
+ },
618
+ "repeats": 1,
619
+ "filter_list": [
620
+ {
621
+ "name": "all",
622
+ "filter": [
623
+ {
624
+ "function": "find_similar_label",
625
+ "labels": [
626
+ "Sim",
627
+ "Não"
628
+ ]
629
+ },
630
+ {
631
+ "function": "take_first"
632
+ }
633
+ ]
634
+ }
635
+ ],
636
+ "should_decontaminate": false,
637
+ "metadata": {
638
+ "version": 1.0
639
+ }
640
+ },
641
+ "oab_exams": {
642
+ "task": "oab_exams",
643
+ "group": [
644
+ "legal_benchmark",
645
+ "pt_benchmark"
646
+ ],
647
+ "dataset_path": "eduagarcia/oab_exams",
648
+ "test_split": "train",
649
+ "fewshot_split": "train",
650
+ "doc_to_text": "<function doc_to_text at 0x7fa487820ae0>",
651
+ "doc_to_target": "{{answerKey}}",
652
+ "description": "As perguntas a seguir são questões de multipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), reponda apenas com as letras A, B, C ou D.\n\n",
653
+ "target_delimiter": " ",
654
+ "fewshot_delimiter": "\n\n",
655
+ "fewshot_config": {
656
+ "sampler": "id_sampler",
657
+ "sampler_config": {
658
+ "id_list": [
659
+ "2010-01_1",
660
+ "2010-01_11",
661
+ "2010-01_13",
662
+ "2010-01_23",
663
+ "2010-01_26",
664
+ "2010-01_28",
665
+ "2010-01_38",
666
+ "2010-01_48",
667
+ "2010-01_58",
668
+ "2010-01_68",
669
+ "2010-01_76",
670
+ "2010-01_83",
671
+ "2010-01_85",
672
+ "2010-01_91",
673
+ "2010-01_99"
674
+ ],
675
+ "id_column": "id",
676
+ "exclude_from_task": true
677
+ }
678
+ },
679
+ "num_fewshot": 3,
680
+ "metric_list": [
681
+ {
682
+ "metric": "acc",
683
+ "aggregation": "acc",
684
+ "higher_is_better": true
685
+ }
686
+ ],
687
+ "output_type": "generate_until",
688
+ "generation_kwargs": {
689
+ "max_gen_toks": 32,
690
+ "do_sample": false,
691
+ "temperature": 0.0,
692
+ "top_k": null,
693
+ "top_p": null,
694
+ "until": [
695
+ "\n\n"
696
+ ]
697
+ },
698
+ "repeats": 1,
699
+ "filter_list": [
700
+ {
701
+ "name": "all",
702
+ "filter": [
703
+ {
704
+ "function": "normalize_spaces"
705
+ },
706
+ {
707
+ "function": "remove_accents"
708
+ },
709
+ {
710
+ "function": "find_choices",
711
+ "choices": [
712
+ "A",
713
+ "B",
714
+ "C",
715
+ "D"
716
+ ],
717
+ "regex_patterns": [
718
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta[Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
719
+ "\\b([ABCD])\\)",
720
+ "\\b([ABCD]) ?[.):-]",
721
+ "\\b([ABCD])$",
722
+ "\\b([ABCD])\\b"
723
+ ]
724
+ },
725
+ {
726
+ "function": "take_first"
727
+ }
728
+ ],
729
+ "group_by": {
730
+ "column": "exam_id"
731
+ }
732
+ }
733
+ ],
734
+ "should_decontaminate": true,
735
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fa487820d60>",
736
+ "metadata": {
737
+ "version": 1.4
738
+ }
739
+ },
740
+ "sparrow_emotion-2021-cortiz-por": {
741
+ "task": "sparrow_emotion-2021-cortiz-por",
742
+ "task_alias": "emotion-2021-cortiz-por",
743
+ "group": [
744
+ "pt_benchmark",
745
+ "sparrow"
746
+ ],
747
+ "dataset_path": "UBC-NLP/sparrow",
748
+ "dataset_name": "emotion-2021-cortiz-por",
749
+ "test_split": "validation",
750
+ "fewshot_split": "train",
751
+ "doc_to_text": "Texto: {{content}}\nPergunta: Qual a principal emoção apresentada no texto?\nResposta:",
752
+ "doc_to_target": "<function sparrow_emotion_por_trans_label at 0x7fa487820fe0>",
753
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é extrair qual a principal emoção dos textos. Responda com apenas uma das seguintes opções:\n Admiração, Diversão, Raiva, Aborrecimento, Aprovação, Compaixão, Confusão, Curiosidade, Desejo, Decepção, Desaprovação, Nojo, Vergonha, Inveja, Entusiasmo, Medo, Gratidão, Luto, Alegria, Saudade, Amor, Nervosismo, Otimismo, Orgulho, Alívio, Remorso, Tristeza ou Surpresa.\n\n",
754
+ "target_delimiter": " ",
755
+ "fewshot_delimiter": "\n\n",
756
+ "fewshot_config": {
757
+ "sampler": "first_n"
758
+ },
759
+ "num_fewshot": 25,
760
+ "metric_list": [
761
+ {
762
+ "metric": "f1_macro",
763
+ "aggregation": "f1_macro",
764
+ "higher_is_better": true
765
+ },
766
+ {
767
+ "metric": "acc",
768
+ "aggregation": "acc",
769
+ "higher_is_better": true
770
+ }
771
+ ],
772
+ "output_type": "generate_until",
773
+ "generation_kwargs": {
774
+ "max_gen_toks": 32,
775
+ "do_sample": false,
776
+ "temperature": 0.0,
777
+ "top_k": null,
778
+ "top_p": null,
779
+ "until": [
780
+ "\n\n"
781
+ ]
782
+ },
783
+ "repeats": 1,
784
+ "filter_list": [
785
+ {
786
+ "name": "all",
787
+ "filter": [
788
+ {
789
+ "function": "find_similar_label",
790
+ "labels": [
791
+ "Admiração",
792
+ "Diversão",
793
+ "Raiva",
794
+ "Aborrecimento",
795
+ "Aprovação",
796
+ "Compaixão",
797
+ "Confusão",
798
+ "Curiosidade",
799
+ "Desejo",
800
+ "Decepção",
801
+ "Desaprovação",
802
+ "Nojo",
803
+ " Vergonha",
804
+ "Inveja",
805
+ "Entusiasmo",
806
+ "Medo",
807
+ "Gratidão",
808
+ "Luto",
809
+ "Alegria",
810
+ "Saudade",
811
+ "Amor",
812
+ "Nervosismo",
813
+ "Otimismo",
814
+ "Orgulho",
815
+ "Alívio",
816
+ "Remorso",
817
+ "Tristeza",
818
+ "Surpresa"
819
+ ]
820
+ },
821
+ {
822
+ "function": "take_first"
823
+ }
824
+ ]
825
+ }
826
+ ],
827
+ "should_decontaminate": false,
828
+ "metadata": {
829
+ "version": 1.0
830
+ }
831
+ },
832
+ "sparrow_hate-2019-fortuna-por": {
833
+ "task": "sparrow_hate-2019-fortuna-por",
834
+ "task_alias": "hate-2019-fortuna-por",
835
+ "group": [
836
+ "pt_benchmark",
837
+ "sparrow"
838
+ ],
839
+ "dataset_path": "UBC-NLP/sparrow",
840
+ "dataset_name": "hate-2019-fortuna-por",
841
+ "test_split": "validation",
842
+ "fewshot_split": "train",
843
+ "doc_to_text": "Texto: {{content}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
844
+ "doc_to_target": "{{'Sim' if label == 'Hate' else 'Não'}}",
845
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o texto contem discurso de ódio our não. Responda apenas com Sim ou Não.\n\n",
846
+ "target_delimiter": " ",
847
+ "fewshot_delimiter": "\n\n",
848
+ "fewshot_config": {
849
+ "sampler": "first_n"
850
+ },
851
+ "num_fewshot": 25,
852
+ "metric_list": [
853
+ {
854
+ "metric": "f1_macro",
855
+ "aggregation": "f1_macro",
856
+ "higher_is_better": true
857
+ },
858
+ {
859
+ "metric": "acc",
860
+ "aggregation": "acc",
861
+ "higher_is_better": true
862
+ }
863
+ ],
864
+ "output_type": "generate_until",
865
+ "generation_kwargs": {
866
+ "max_gen_toks": 32,
867
+ "do_sample": false,
868
+ "temperature": 0.0,
869
+ "top_k": null,
870
+ "top_p": null,
871
+ "until": [
872
+ "\n\n"
873
+ ]
874
+ },
875
+ "repeats": 1,
876
+ "filter_list": [
877
+ {
878
+ "name": "all",
879
+ "filter": [
880
+ {
881
+ "function": "find_similar_label",
882
+ "labels": [
883
+ "Sim",
884
+ "Não"
885
+ ]
886
+ },
887
+ {
888
+ "function": "take_first"
889
+ }
890
+ ]
891
+ }
892
+ ],
893
+ "should_decontaminate": false,
894
+ "metadata": {
895
+ "version": 1.0
896
+ }
897
+ },
898
+ "sparrow_sentiment-2016-mozetic-por": {
899
+ "task": "sparrow_sentiment-2016-mozetic-por",
900
+ "task_alias": "sentiment-2016-mozetic-por",
901
+ "group": [
902
+ "pt_benchmark",
903
+ "sparrow"
904
+ ],
905
+ "dataset_path": "UBC-NLP/sparrow",
906
+ "dataset_name": "sentiment-2016-mozetic-por",
907
+ "test_split": "validation",
908
+ "fewshot_split": "train",
909
+ "doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
910
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
911
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
912
+ "target_delimiter": " ",
913
+ "fewshot_delimiter": "\n\n",
914
+ "fewshot_config": {
915
+ "sampler": "first_n"
916
+ },
917
+ "num_fewshot": 25,
918
+ "metric_list": [
919
+ {
920
+ "metric": "f1_macro",
921
+ "aggregation": "f1_macro",
922
+ "higher_is_better": true
923
+ },
924
+ {
925
+ "metric": "acc",
926
+ "aggregation": "acc",
927
+ "higher_is_better": true
928
+ }
929
+ ],
930
+ "output_type": "generate_until",
931
+ "generation_kwargs": {
932
+ "max_gen_toks": 32,
933
+ "do_sample": false,
934
+ "temperature": 0.0,
935
+ "top_k": null,
936
+ "top_p": null,
937
+ "until": [
938
+ "\n\n"
939
+ ]
940
+ },
941
+ "repeats": 1,
942
+ "filter_list": [
943
+ {
944
+ "name": "all",
945
+ "filter": [
946
+ {
947
+ "function": "find_similar_label",
948
+ "labels": [
949
+ "Positivo",
950
+ "Neutro",
951
+ "Negativo"
952
+ ]
953
+ },
954
+ {
955
+ "function": "take_first"
956
+ }
957
+ ]
958
+ }
959
+ ],
960
+ "should_decontaminate": false,
961
+ "metadata": {
962
+ "version": 1.0
963
+ }
964
+ },
965
+ "sparrow_sentiment-2018-brum-por": {
966
+ "task": "sparrow_sentiment-2018-brum-por",
967
+ "task_alias": "sentiment-2018-brum-por",
968
+ "group": [
969
+ "pt_benchmark",
970
+ "sparrow"
971
+ ],
972
+ "dataset_path": "UBC-NLP/sparrow",
973
+ "dataset_name": "sentiment-2018-brum-por",
974
+ "test_split": "validation",
975
+ "fewshot_split": "train",
976
+ "doc_to_text": "Texto: {{content}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
977
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
978
+ "description": "Abaixo contém o conteúdo de tweets de usuarios do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
979
+ "target_delimiter": " ",
980
+ "fewshot_delimiter": "\n\n",
981
+ "fewshot_config": {
982
+ "sampler": "first_n"
983
+ },
984
+ "num_fewshot": 25,
985
+ "metric_list": [
986
+ {
987
+ "metric": "f1_macro",
988
+ "aggregation": "f1_macro",
989
+ "higher_is_better": true
990
+ },
991
+ {
992
+ "metric": "acc",
993
+ "aggregation": "acc",
994
+ "higher_is_better": true
995
+ }
996
+ ],
997
+ "output_type": "generate_until",
998
+ "generation_kwargs": {
999
+ "max_gen_toks": 32,
1000
+ "do_sample": false,
1001
+ "temperature": 0.0,
1002
+ "top_k": null,
1003
+ "top_p": null,
1004
+ "until": [
1005
+ "\n\n"
1006
+ ]
1007
+ },
1008
+ "repeats": 1,
1009
+ "filter_list": [
1010
+ {
1011
+ "name": "all",
1012
+ "filter": [
1013
+ {
1014
+ "function": "find_similar_label",
1015
+ "labels": [
1016
+ "Positivo",
1017
+ "Neutro",
1018
+ "Negativo"
1019
+ ]
1020
+ },
1021
+ {
1022
+ "function": "take_first"
1023
+ }
1024
+ ]
1025
+ }
1026
+ ],
1027
+ "should_decontaminate": false,
1028
+ "metadata": {
1029
+ "version": 1.0
1030
+ }
1031
+ }
1032
+ },
1033
+ "versions": {
1034
+ "assin2_rte": 1.0,
1035
+ "assin2_sts": 1.0,
1036
+ "bluex": 1.0,
1037
+ "enem_challenge": 1.0,
1038
+ "faquad_nli": 1.0,
1039
+ "oab_exams": 1.4,
1040
+ "sparrow_emotion-2021-cortiz-por": 1.0,
1041
+ "sparrow_hate-2019-fortuna-por": 1.0,
1042
+ "sparrow_sentiment-2016-mozetic-por": 1.0,
1043
+ "sparrow_sentiment-2018-brum-por": 1.0
1044
+ },
1045
+ "n-shot": {
1046
+ "assin2_rte": 15,
1047
+ "assin2_sts": 15,
1048
+ "bluex": 3,
1049
+ "enem_challenge": 3,
1050
+ "faquad_nli": 15,
1051
+ "oab_exams": 3,
1052
+ "sparrow_emotion-2021-cortiz-por": 25,
1053
+ "sparrow_hate-2019-fortuna-por": 25,
1054
+ "sparrow_sentiment-2016-mozetic-por": 25,
1055
+ "sparrow_sentiment-2018-brum-por": 25
1056
+ },
1057
+ "model_meta": {
1058
+ "truncated": 16,
1059
+ "non_truncated": 11873,
1060
+ "padded": 0,
1061
+ "non_padded": 11889,
1062
+ "fewshots_truncated": 18,
1063
+ "has_chat_template": false,
1064
+ "chat_type": null,
1065
+ "n_gpus": 1,
1066
+ "accelerate_num_process": null,
1067
+ "model_sha": "8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16",
1068
+ "model_dtype": "torch.float16",
1069
+ "model_memory_footprint": 13510393856,
1070
+ "model_num_parameters": 6738415616,
1071
+ "model_is_loaded_in_4bit": false,
1072
+ "model_is_loaded_in_8bit": false,
1073
+ "model_is_quantized": null,
1074
+ "model_device": "cuda:0",
1075
+ "batch_size": 16,
1076
+ "max_length": 2048,
1077
+ "max_ctx_length": 2016,
1078
+ "max_gen_toks": 32
1079
+ },
1080
+ "task_model_meta": {
1081
+ "assin2_rte": {
1082
+ "sample_size": 2448,
1083
+ "truncated": 0,
1084
+ "non_truncated": 2448,
1085
+ "padded": 0,
1086
+ "non_padded": 2448,
1087
+ "fewshots_truncated": 0,
1088
+ "mean_seq_length": 1182.9889705882354,
1089
+ "min_seq_length": 1160,
1090
+ "max_seq_length": 1249,
1091
+ "max_ctx_length": 2016,
1092
+ "max_gen_toks": 32,
1093
+ "mean_original_fewshots_size": 15.0,
1094
+ "mean_effective_fewshot_size": 15.0
1095
+ },
1096
+ "assin2_sts": {
1097
+ "sample_size": 2448,
1098
+ "truncated": 0,
1099
+ "non_truncated": 2448,
1100
+ "padded": 0,
1101
+ "non_padded": 2448,
1102
+ "fewshots_truncated": 0,
1103
+ "mean_seq_length": 1415.9889705882354,
1104
+ "min_seq_length": 1393,
1105
+ "max_seq_length": 1482,
1106
+ "max_ctx_length": 2016,
1107
+ "max_gen_toks": 32,
1108
+ "mean_original_fewshots_size": 15.0,
1109
+ "mean_effective_fewshot_size": 15.0
1110
+ },
1111
+ "bluex": {
1112
+ "sample_size": 719,
1113
+ "truncated": 4,
1114
+ "non_truncated": 715,
1115
+ "padded": 0,
1116
+ "non_padded": 719,
1117
+ "fewshots_truncated": 4,
1118
+ "mean_seq_length": 1364.7426981919332,
1119
+ "min_seq_length": 998,
1120
+ "max_seq_length": 2124,
1121
+ "max_ctx_length": 2016,
1122
+ "max_gen_toks": 32,
1123
+ "mean_original_fewshots_size": 3.0,
1124
+ "mean_effective_fewshot_size": 2.9944367176634215
1125
+ },
1126
+ "enem_challenge": {
1127
+ "sample_size": 1429,
1128
+ "truncated": 12,
1129
+ "non_truncated": 1417,
1130
+ "padded": 0,
1131
+ "non_padded": 1429,
1132
+ "fewshots_truncated": 14,
1133
+ "mean_seq_length": 1532.9881035689293,
1134
+ "min_seq_length": 1280,
1135
+ "max_seq_length": 2572,
1136
+ "max_ctx_length": 2016,
1137
+ "max_gen_toks": 32,
1138
+ "mean_original_fewshots_size": 3.0,
1139
+ "mean_effective_fewshot_size": 2.9902029391182645
1140
+ },
1141
+ "faquad_nli": {
1142
+ "sample_size": 650,
1143
+ "truncated": 0,
1144
+ "non_truncated": 650,
1145
+ "padded": 0,
1146
+ "non_padded": 650,
1147
+ "fewshots_truncated": 0,
1148
+ "mean_seq_length": 1473.1184615384616,
1149
+ "min_seq_length": 1421,
1150
+ "max_seq_length": 1580,
1151
+ "max_ctx_length": 2016,
1152
+ "max_gen_toks": 32,
1153
+ "mean_original_fewshots_size": 15.0,
1154
+ "mean_effective_fewshot_size": 15.0
1155
+ },
1156
+ "oab_exams": {
1157
+ "sample_size": 2195,
1158
+ "truncated": 0,
1159
+ "non_truncated": 2195,
1160
+ "padded": 0,
1161
+ "non_padded": 2195,
1162
+ "fewshots_truncated": 0,
1163
+ "mean_seq_length": 1306.4145785876992,
1164
+ "min_seq_length": 1051,
1165
+ "max_seq_length": 1788,
1166
+ "max_ctx_length": 2016,
1167
+ "max_gen_toks": 32,
1168
+ "mean_original_fewshots_size": 3.0,
1169
+ "mean_effective_fewshot_size": 3.0
1170
+ },
1171
+ "sparrow_emotion-2021-cortiz-por": {
1172
+ "sample_size": 500,
1173
+ "truncated": 0,
1174
+ "non_truncated": 500,
1175
+ "padded": 0,
1176
+ "non_padded": 500,
1177
+ "fewshots_truncated": 0,
1178
+ "mean_seq_length": 1696.204,
1179
+ "min_seq_length": 1674,
1180
+ "max_seq_length": 1744,
1181
+ "max_ctx_length": 2016,
1182
+ "max_gen_toks": 32,
1183
+ "mean_original_fewshots_size": 25.0,
1184
+ "mean_effective_fewshot_size": 25.0
1185
+ },
1186
+ "sparrow_hate-2019-fortuna-por": {
1187
+ "sample_size": 500,
1188
+ "truncated": 0,
1189
+ "non_truncated": 500,
1190
+ "padded": 0,
1191
+ "non_padded": 500,
1192
+ "fewshots_truncated": 0,
1193
+ "mean_seq_length": 1708.588,
1194
+ "min_seq_length": 1684,
1195
+ "max_seq_length": 1767,
1196
+ "max_ctx_length": 2016,
1197
+ "max_gen_toks": 32,
1198
+ "mean_original_fewshots_size": 25.0,
1199
+ "mean_effective_fewshot_size": 25.0
1200
+ },
1201
+ "sparrow_sentiment-2016-mozetic-por": {
1202
+ "sample_size": 500,
1203
+ "truncated": 0,
1204
+ "non_truncated": 500,
1205
+ "padded": 0,
1206
+ "non_padded": 500,
1207
+ "fewshots_truncated": 0,
1208
+ "mean_seq_length": 1422.44,
1209
+ "min_seq_length": 1405,
1210
+ "max_seq_length": 1458,
1211
+ "max_ctx_length": 2016,
1212
+ "max_gen_toks": 32,
1213
+ "mean_original_fewshots_size": 25.0,
1214
+ "mean_effective_fewshot_size": 25.0
1215
+ },
1216
+ "sparrow_sentiment-2018-brum-por": {
1217
+ "sample_size": 500,
1218
+ "truncated": 0,
1219
+ "non_truncated": 500,
1220
+ "padded": 0,
1221
+ "non_padded": 500,
1222
+ "fewshots_truncated": 0,
1223
+ "mean_seq_length": 1573.854,
1224
+ "min_seq_length": 1556,
1225
+ "max_seq_length": 1605,
1226
+ "max_ctx_length": 2016,
1227
+ "max_gen_toks": 32,
1228
+ "mean_original_fewshots_size": 25.0,
1229
+ "mean_effective_fewshot_size": 25.0
1230
+ }
1231
+ },
1232
+ "config": {
1233
+ "model": "huggingface",
1234
+ "model_args": "pretrained=huggyllama/llama-7b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1235
+ "batch_size": "auto",
1236
+ "batch_sizes": [],
1237
+ "device": null,
1238
+ "use_cache": null,
1239
+ "limit": [
1240
+ null,
1241
+ null,
1242
+ null,
1243
+ null,
1244
+ null,
1245
+ null,
1246
+ 500.0,
1247
+ 500.0,
1248
+ 500.0,
1249
+ 500.0
1250
+ ],
1251
+ "bootstrap_iters": 0,
1252
+ "gen_kwargs": null
1253
+ },
1254
+ "git_hash": "637ac6b"
1255
+ }
huggyllama/llama-7b/results_2024-02-05T23-45-55.633251.json ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config_general": {
3
+ "start_date": "2024-02-05T23-45-55.633251",
4
+ "start_time": 1707176779.2900355,
5
+ "end_time": 1707181201.1673322,
6
+ "total_evaluation_time_seconds": 4421.8772966861725,
7
+ "has_chat_template": false,
8
+ "chat_type": null,
9
+ "n_gpus": 1,
10
+ "accelerate_num_process": null,
11
+ "model_sha": "8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16",
12
+ "model_dtype": "float16",
13
+ "model_memory_footprint": 13510393856,
14
+ "model_num_parameters": 6738415616,
15
+ "model_is_loaded_in_4bit": false,
16
+ "model_is_loaded_in_8bit": false,
17
+ "model_is_quantized": null,
18
+ "model_device": "cuda:2",
19
+ "batch_size": 32,
20
+ "max_length": 2048,
21
+ "max_ctx_length": 2016,
22
+ "max_gen_toks": 32,
23
+ "model_name": "huggyllama/llama-7b",
24
+ "job_id": 2,
25
+ "model_id": "huggyllama/llama-7b_eval_request_False_float16_Original",
26
+ "model_base_model": "",
27
+ "model_weight_type": "Original",
28
+ "model_revision": "main",
29
+ "model_private": false,
30
+ "model_type": "🟢 : pretrained",
31
+ "model_architectures": "LlamaForCausalLM",
32
+ "submitted_time": "2024-02-05T22:59:55Z",
33
+ "lm_eval_model_type": "huggingface",
34
+ "eval_version": "1.0.0"
35
+ },
36
+ "results": {
37
+ "all_grouped_average": 0.29724944957428684,
38
+ "all_grouped": {
39
+ "enem_challenge": 0.2365290412876137,
40
+ "bluex": 0.2614742698191933,
41
+ "oab_exams": 0.2742596810933941,
42
+ "assin2_rte": 0.3333333333333333,
43
+ "assin2_sts": 0.07703722378109175,
44
+ "faquad_nli": 0.5548892542799111,
45
+ "sparrow_pt": 0.3432233434254706
46
+ },
47
+ "all": {
48
+ "harness|enem_challenge|enem_challenge|None|3": 0.2365290412876137,
49
+ "harness|bluex|bluex|None|3": 0.2614742698191933,
50
+ "harness|oab_exams|oab_exams|None|3": 0.2742596810933941,
51
+ "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333,
52
+ "harness|assin2_sts|assin2_sts|None|15": 0.07703722378109175,
53
+ "harness|faquad_nli|faquad_nli|None|15": 0.5548892542799111,
54
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|15": 0.08365150307457563,
55
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|15": 0.5377762837213211,
56
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|15": 0.42328641755398966,
57
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|15": 0.32817916935199615
58
+ },
59
+ "harness|enem_challenge|enem_challenge|None|3": {
60
+ "acc,all": 0.2365290412876137,
61
+ "acc,exam_id__2016": 0.23140495867768596,
62
+ "acc,exam_id__2015": 0.19327731092436976,
63
+ "acc,exam_id__2017": 0.2413793103448276,
64
+ "acc,exam_id__2010": 0.1794871794871795,
65
+ "acc,exam_id__2023": 0.25925925925925924,
66
+ "acc,exam_id__2013": 0.2222222222222222,
67
+ "acc,exam_id__2009": 0.2782608695652174,
68
+ "acc,exam_id__2022": 0.24812030075187969,
69
+ "acc,exam_id__2014": 0.1926605504587156,
70
+ "acc,exam_id__2016_2": 0.22764227642276422,
71
+ "acc,exam_id__2011": 0.2222222222222222,
72
+ "acc,exam_id__2012": 0.33620689655172414,
73
+ "main_score": 0.2365290412876137
74
+ },
75
+ "harness|bluex|bluex|None|3": {
76
+ "acc,all": 0.2614742698191933,
77
+ "acc,exam_id__USP_2021": 0.23076923076923078,
78
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
79
+ "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914,
80
+ "acc,exam_id__UNICAMP_2022": 0.3076923076923077,
81
+ "acc,exam_id__USP_2020": 0.30357142857142855,
82
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
83
+ "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
84
+ "acc,exam_id__UNICAMP_2020": 0.18181818181818182,
85
+ "acc,exam_id__UNICAMP_2019": 0.2,
86
+ "acc,exam_id__USP_2019": 0.325,
87
+ "acc,exam_id__USP_2022": 0.24489795918367346,
88
+ "acc,exam_id__USP_2024": 0.14634146341463414,
89
+ "acc,exam_id__USP_2023": 0.18181818181818182,
90
+ "acc,exam_id__USP_2018": 0.18518518518518517,
91
+ "acc,exam_id__UNICAMP_2024": 0.26666666666666666,
92
+ "main_score": 0.2614742698191933
93
+ },
94
+ "harness|oab_exams|oab_exams|None|3": {
95
+ "acc,all": 0.2742596810933941,
96
+ "acc,exam_id__2012-08": 0.2875,
97
+ "acc,exam_id__2013-10": 0.2125,
98
+ "acc,exam_id__2012-06a": 0.2,
99
+ "acc,exam_id__2012-06": 0.2875,
100
+ "acc,exam_id__2015-17": 0.3333333333333333,
101
+ "acc,exam_id__2016-21": 0.3,
102
+ "acc,exam_id__2011-03": 0.2727272727272727,
103
+ "acc,exam_id__2017-23": 0.325,
104
+ "acc,exam_id__2012-09": 0.15584415584415584,
105
+ "acc,exam_id__2013-11": 0.3,
106
+ "acc,exam_id__2017-24": 0.2875,
107
+ "acc,exam_id__2011-04": 0.3125,
108
+ "acc,exam_id__2014-15": 0.3333333333333333,
109
+ "acc,exam_id__2013-12": 0.2,
110
+ "acc,exam_id__2010-02": 0.34,
111
+ "acc,exam_id__2016-20": 0.2375,
112
+ "acc,exam_id__2017-22": 0.3,
113
+ "acc,exam_id__2015-18": 0.275,
114
+ "acc,exam_id__2010-01": 0.27058823529411763,
115
+ "acc,exam_id__2018-25": 0.3,
116
+ "acc,exam_id__2014-14": 0.2375,
117
+ "acc,exam_id__2014-13": 0.275,
118
+ "acc,exam_id__2012-07": 0.25,
119
+ "acc,exam_id__2016-19": 0.3076923076923077,
120
+ "acc,exam_id__2016-20a": 0.25,
121
+ "acc,exam_id__2015-16": 0.2625,
122
+ "acc,exam_id__2011-05": 0.275,
123
+ "main_score": 0.2742596810933941
124
+ },
125
+ "harness|assin2_rte|assin2_rte|None|15": {
126
+ "f1_macro,all": 0.3333333333333333,
127
+ "acc,all": 0.5,
128
+ "main_score": 0.3333333333333333
129
+ },
130
+ "harness|assin2_sts|assin2_sts|None|15": {
131
+ "pearson,all": 0.07703722378109175,
132
+ "mse,all": 2.5286274509803923,
133
+ "main_score": 0.07703722378109175
134
+ },
135
+ "harness|faquad_nli|faquad_nli|None|15": {
136
+ "f1_macro,all": 0.5548892542799111,
137
+ "acc,all": 0.7276923076923076,
138
+ "main_score": 0.5548892542799111
139
+ },
140
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|15": {
141
+ "f1_macro,all": 0.08365150307457563,
142
+ "acc,all": 0.142,
143
+ "main_score": 0.08365150307457563
144
+ },
145
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|15": {
146
+ "f1_macro,all": 0.5377762837213211,
147
+ "acc,all": 0.538,
148
+ "main_score": 0.5377762837213211
149
+ },
150
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|15": {
151
+ "f1_macro,all": 0.42328641755398966,
152
+ "acc,all": 0.638,
153
+ "main_score": 0.42328641755398966
154
+ },
155
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|15": {
156
+ "f1_macro,all": 0.32817916935199615,
157
+ "acc,all": 0.372,
158
+ "main_score": 0.32817916935199615
159
+ }
160
+ },
161
+ "config_tasks": {
162
+ "harness|enem_challenge|enem_challenge": "LM Harness task",
163
+ "harness|bluex|bluex": "LM Harness task",
164
+ "harness|oab_exams|oab_exams": "LM Harness task",
165
+ "harness|assin2_rte|assin2_rte": "LM Harness task",
166
+ "harness|assin2_sts|assin2_sts": "LM Harness task",
167
+ "harness|faquad_nli|faquad_nli": "LM Harness task",
168
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por": "LM Harness task",
169
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por": "LM Harness task",
170
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por": "LM Harness task",
171
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por": "LM Harness task"
172
+ },
173
+ "versions": {
174
+ "all": 0,
175
+ "harness|enem_challenge|enem_challenge": 1.0,
176
+ "harness|bluex|bluex": 1.0,
177
+ "harness|oab_exams|oab_exams": 1.4,
178
+ "harness|assin2_rte|assin2_rte": 1.0,
179
+ "harness|assin2_sts|assin2_sts": 1.0,
180
+ "harness|faquad_nli|faquad_nli": 1.0,
181
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por": 1.0,
182
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por": 1.0,
183
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por": 1.0,
184
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por": 1.0
185
+ },
186
+ "summary_tasks": {
187
+ "harness|enem_challenge|enem_challenge|None|3": {
188
+ "sample_size": 1429,
189
+ "truncated": 12,
190
+ "non_truncated": 1417,
191
+ "padded": 0,
192
+ "non_padded": 1429,
193
+ "fewshots_truncated": 14,
194
+ "mean_seq_length": 1532.9881035689293,
195
+ "min_seq_length": 1280,
196
+ "max_seq_length": 2572,
197
+ "max_ctx_length": 2016,
198
+ "max_gen_toks": 32,
199
+ "mean_original_fewshots_size": 3.0,
200
+ "mean_effective_fewshot_size": 2.9902029391182645
201
+ },
202
+ "harness|bluex|bluex|None|3": {
203
+ "sample_size": 719,
204
+ "truncated": 4,
205
+ "non_truncated": 715,
206
+ "padded": 0,
207
+ "non_padded": 719,
208
+ "fewshots_truncated": 4,
209
+ "mean_seq_length": 1364.7426981919332,
210
+ "min_seq_length": 998,
211
+ "max_seq_length": 2124,
212
+ "max_ctx_length": 2016,
213
+ "max_gen_toks": 32,
214
+ "mean_original_fewshots_size": 3.0,
215
+ "mean_effective_fewshot_size": 2.9944367176634215
216
+ },
217
+ "harness|oab_exams|oab_exams|None|3": {
218
+ "sample_size": 2195,
219
+ "truncated": 0,
220
+ "non_truncated": 2195,
221
+ "padded": 0,
222
+ "non_padded": 2195,
223
+ "fewshots_truncated": 0,
224
+ "mean_seq_length": 1306.4145785876992,
225
+ "min_seq_length": 1051,
226
+ "max_seq_length": 1788,
227
+ "max_ctx_length": 2016,
228
+ "max_gen_toks": 32,
229
+ "mean_original_fewshots_size": 3.0,
230
+ "mean_effective_fewshot_size": 3.0
231
+ },
232
+ "harness|assin2_rte|assin2_rte|None|15": {
233
+ "sample_size": 2448,
234
+ "truncated": 0,
235
+ "non_truncated": 2448,
236
+ "padded": 0,
237
+ "non_padded": 2448,
238
+ "fewshots_truncated": 0,
239
+ "mean_seq_length": 1182.9889705882354,
240
+ "min_seq_length": 1160,
241
+ "max_seq_length": 1249,
242
+ "max_ctx_length": 2016,
243
+ "max_gen_toks": 32,
244
+ "mean_original_fewshots_size": 15.0,
245
+ "mean_effective_fewshot_size": 15.0
246
+ },
247
+ "harness|assin2_sts|assin2_sts|None|15": {
248
+ "sample_size": 2448,
249
+ "truncated": 0,
250
+ "non_truncated": 2448,
251
+ "padded": 0,
252
+ "non_padded": 2448,
253
+ "fewshots_truncated": 0,
254
+ "mean_seq_length": 1415.9889705882354,
255
+ "min_seq_length": 1393,
256
+ "max_seq_length": 1482,
257
+ "max_ctx_length": 2016,
258
+ "max_gen_toks": 32,
259
+ "mean_original_fewshots_size": 15.0,
260
+ "mean_effective_fewshot_size": 15.0
261
+ },
262
+ "harness|faquad_nli|faquad_nli|None|15": {
263
+ "sample_size": 650,
264
+ "truncated": 0,
265
+ "non_truncated": 650,
266
+ "padded": 0,
267
+ "non_padded": 650,
268
+ "fewshots_truncated": 0,
269
+ "mean_seq_length": 1473.1184615384616,
270
+ "min_seq_length": 1421,
271
+ "max_seq_length": 1580,
272
+ "max_ctx_length": 2016,
273
+ "max_gen_toks": 32,
274
+ "mean_original_fewshots_size": 15.0,
275
+ "mean_effective_fewshot_size": 15.0
276
+ },
277
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|15": {
278
+ "sample_size": 500,
279
+ "truncated": 0,
280
+ "non_truncated": 500,
281
+ "padded": 0,
282
+ "non_padded": 500,
283
+ "fewshots_truncated": 0,
284
+ "mean_seq_length": 1132.204,
285
+ "min_seq_length": 1110,
286
+ "max_seq_length": 1180,
287
+ "max_ctx_length": 2016,
288
+ "max_gen_toks": 32,
289
+ "mean_original_fewshots_size": 15.0,
290
+ "mean_effective_fewshot_size": 15.0
291
+ },
292
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|15": {
293
+ "sample_size": 500,
294
+ "truncated": 0,
295
+ "non_truncated": 500,
296
+ "padded": 0,
297
+ "non_padded": 500,
298
+ "fewshots_truncated": 0,
299
+ "mean_seq_length": 1044.588,
300
+ "min_seq_length": 1020,
301
+ "max_seq_length": 1103,
302
+ "max_ctx_length": 2016,
303
+ "max_gen_toks": 32,
304
+ "mean_original_fewshots_size": 15.0,
305
+ "mean_effective_fewshot_size": 15.0
306
+ },
307
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|15": {
308
+ "sample_size": 500,
309
+ "truncated": 0,
310
+ "non_truncated": 500,
311
+ "padded": 0,
312
+ "non_padded": 500,
313
+ "fewshots_truncated": 0,
314
+ "mean_seq_length": 916.44,
315
+ "min_seq_length": 899,
316
+ "max_seq_length": 952,
317
+ "max_ctx_length": 2016,
318
+ "max_gen_toks": 32,
319
+ "mean_original_fewshots_size": 15.0,
320
+ "mean_effective_fewshot_size": 15.0
321
+ },
322
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|15": {
323
+ "sample_size": 500,
324
+ "truncated": 0,
325
+ "non_truncated": 500,
326
+ "padded": 0,
327
+ "non_padded": 500,
328
+ "fewshots_truncated": 0,
329
+ "mean_seq_length": 1011.854,
330
+ "min_seq_length": 994,
331
+ "max_seq_length": 1043,
332
+ "max_ctx_length": 2016,
333
+ "max_gen_toks": 32,
334
+ "mean_original_fewshots_size": 15.0,
335
+ "mean_effective_fewshot_size": 15.0
336
+ }
337
+ },
338
+ "summary_general": {
339
+ "truncated": 16,
340
+ "non_truncated": 11873,
341
+ "padded": 0,
342
+ "non_padded": 11889,
343
+ "fewshots_truncated": 18
344
+ }
345
+ }
huggyllama/llama-7b/results_2024-02-06T03-05-10.639848.json ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config_general": {
3
+ "start_date": "2024-02-06T03-05-10.639848",
4
+ "start_time": 1707188711.8299422,
5
+ "end_time": 1707194323.3227339,
6
+ "total_evaluation_time_seconds": 5611.492791652679,
7
+ "has_chat_template": false,
8
+ "chat_type": null,
9
+ "n_gpus": 1,
10
+ "accelerate_num_process": null,
11
+ "model_sha": "8416d3fefb0cb3ff5775a7b13c1692d10ff1aa16",
12
+ "model_dtype": "float16",
13
+ "model_memory_footprint": 13510393856,
14
+ "model_num_parameters": 6738415616,
15
+ "model_is_loaded_in_4bit": false,
16
+ "model_is_loaded_in_8bit": false,
17
+ "model_is_quantized": null,
18
+ "model_device": "cuda:0",
19
+ "batch_size": 16,
20
+ "max_length": 2048,
21
+ "max_ctx_length": 2016,
22
+ "max_gen_toks": 32,
23
+ "model_name": "huggyllama/llama-7b",
24
+ "job_id": 5,
25
+ "model_id": "huggyllama/llama-7b_eval_request_False_float16_Original",
26
+ "model_base_model": "",
27
+ "model_weight_type": "Original",
28
+ "model_revision": "main",
29
+ "model_private": false,
30
+ "model_type": "🟢 : pretrained",
31
+ "model_architectures": "LlamaForCausalLM",
32
+ "submitted_time": "2024-02-05T22:59:55Z",
33
+ "lm_eval_model_type": "huggingface",
34
+ "eval_version": "1.0.0"
35
+ },
36
+ "results": {
37
+ "all_grouped_average": 0.2926171051512782,
38
+ "all_grouped": {
39
+ "enem_challenge": 0.2365290412876137,
40
+ "bluex": 0.2614742698191933,
41
+ "oab_exams": 0.2742596810933941,
42
+ "assin2_rte": 0.3333333333333333,
43
+ "assin2_sts": 0.07703722378109175,
44
+ "faquad_nli": 0.5548892542799111,
45
+ "sparrow_pt": 0.31079693246441015
46
+ },
47
+ "all": {
48
+ "harness|enem_challenge|enem_challenge|None|3": 0.2365290412876137,
49
+ "harness|bluex|bluex|None|3": 0.2614742698191933,
50
+ "harness|oab_exams|oab_exams|None|3": 0.2742596810933941,
51
+ "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333,
52
+ "harness|assin2_sts|assin2_sts|None|15": 0.07703722378109175,
53
+ "harness|faquad_nli|faquad_nli|None|15": 0.5548892542799111,
54
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|25": 0.06400860793605274,
55
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|25": 0.4300911854103343,
56
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|25": 0.4465537433018192,
57
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|25": 0.30253419320943437
58
+ },
59
+ "harness|enem_challenge|enem_challenge|None|3": {
60
+ "acc,all": 0.2365290412876137,
61
+ "acc,exam_id__2016_2": 0.22764227642276422,
62
+ "acc,exam_id__2023": 0.25925925925925924,
63
+ "acc,exam_id__2011": 0.2222222222222222,
64
+ "acc,exam_id__2015": 0.19327731092436976,
65
+ "acc,exam_id__2010": 0.1794871794871795,
66
+ "acc,exam_id__2012": 0.33620689655172414,
67
+ "acc,exam_id__2009": 0.2782608695652174,
68
+ "acc,exam_id__2022": 0.24812030075187969,
69
+ "acc,exam_id__2014": 0.1926605504587156,
70
+ "acc,exam_id__2017": 0.2413793103448276,
71
+ "acc,exam_id__2016": 0.23140495867768596,
72
+ "acc,exam_id__2013": 0.2222222222222222,
73
+ "main_score": 0.2365290412876137
74
+ },
75
+ "harness|bluex|bluex|None|3": {
76
+ "acc,all": 0.2614742698191933,
77
+ "acc,exam_id__USP_2024": 0.14634146341463414,
78
+ "acc,exam_id__UNICAMP_2020": 0.18181818181818182,
79
+ "acc,exam_id__UNICAMP_2019": 0.2,
80
+ "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
81
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
82
+ "acc,exam_id__UNICAMP_2024": 0.26666666666666666,
83
+ "acc,exam_id__USP_2022": 0.24489795918367346,
84
+ "acc,exam_id__UNICAMP_2022": 0.3076923076923077,
85
+ "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914,
86
+ "acc,exam_id__USP_2023": 0.18181818181818182,
87
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
88
+ "acc,exam_id__USP_2018": 0.18518518518518517,
89
+ "acc,exam_id__USP_2019": 0.325,
90
+ "acc,exam_id__USP_2021": 0.23076923076923078,
91
+ "acc,exam_id__USP_2020": 0.30357142857142855,
92
+ "main_score": 0.2614742698191933
93
+ },
94
+ "harness|oab_exams|oab_exams|None|3": {
95
+ "acc,all": 0.2742596810933941,
96
+ "acc,exam_id__2017-23": 0.325,
97
+ "acc,exam_id__2013-10": 0.2125,
98
+ "acc,exam_id__2016-20a": 0.25,
99
+ "acc,exam_id__2016-20": 0.2375,
100
+ "acc,exam_id__2014-14": 0.2375,
101
+ "acc,exam_id__2010-02": 0.34,
102
+ "acc,exam_id__2014-15": 0.3333333333333333,
103
+ "acc,exam_id__2010-01": 0.27058823529411763,
104
+ "acc,exam_id__2011-03": 0.2727272727272727,
105
+ "acc,exam_id__2016-19": 0.3076923076923077,
106
+ "acc,exam_id__2016-21": 0.3,
107
+ "acc,exam_id__2018-25": 0.3,
108
+ "acc,exam_id__2013-12": 0.2,
109
+ "acc,exam_id__2011-04": 0.3125,
110
+ "acc,exam_id__2011-05": 0.275,
111
+ "acc,exam_id__2012-07": 0.25,
112
+ "acc,exam_id__2012-06": 0.2875,
113
+ "acc,exam_id__2012-09": 0.15584415584415584,
114
+ "acc,exam_id__2013-11": 0.3,
115
+ "acc,exam_id__2012-06a": 0.2,
116
+ "acc,exam_id__2012-08": 0.2875,
117
+ "acc,exam_id__2015-18": 0.275,
118
+ "acc,exam_id__2017-22": 0.3,
119
+ "acc,exam_id__2015-17": 0.3333333333333333,
120
+ "acc,exam_id__2017-24": 0.2875,
121
+ "acc,exam_id__2014-13": 0.275,
122
+ "acc,exam_id__2015-16": 0.2625,
123
+ "main_score": 0.2742596810933941
124
+ },
125
+ "harness|assin2_rte|assin2_rte|None|15": {
126
+ "f1_macro,all": 0.3333333333333333,
127
+ "acc,all": 0.5,
128
+ "main_score": 0.3333333333333333
129
+ },
130
+ "harness|assin2_sts|assin2_sts|None|15": {
131
+ "pearson,all": 0.07703722378109175,
132
+ "mse,all": 2.5286274509803923,
133
+ "main_score": 0.07703722378109175
134
+ },
135
+ "harness|faquad_nli|faquad_nli|None|15": {
136
+ "f1_macro,all": 0.5548892542799111,
137
+ "acc,all": 0.7276923076923076,
138
+ "main_score": 0.5548892542799111
139
+ },
140
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|25": {
141
+ "f1_macro,all": 0.06400860793605274,
142
+ "acc,all": 0.166,
143
+ "main_score": 0.06400860793605274
144
+ },
145
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|25": {
146
+ "f1_macro,all": 0.4300911854103343,
147
+ "acc,all": 0.652,
148
+ "main_score": 0.4300911854103343
149
+ },
150
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|25": {
151
+ "f1_macro,all": 0.4465537433018192,
152
+ "acc,all": 0.6,
153
+ "main_score": 0.4465537433018192
154
+ },
155
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|25": {
156
+ "f1_macro,all": 0.30253419320943437,
157
+ "acc,all": 0.376,
158
+ "main_score": 0.30253419320943437
159
+ }
160
+ },
161
+ "config_tasks": {
162
+ "harness|enem_challenge|enem_challenge": "LM Harness task",
163
+ "harness|bluex|bluex": "LM Harness task",
164
+ "harness|oab_exams|oab_exams": "LM Harness task",
165
+ "harness|assin2_rte|assin2_rte": "LM Harness task",
166
+ "harness|assin2_sts|assin2_sts": "LM Harness task",
167
+ "harness|faquad_nli|faquad_nli": "LM Harness task",
168
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por": "LM Harness task",
169
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por": "LM Harness task",
170
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por": "LM Harness task",
171
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por": "LM Harness task"
172
+ },
173
+ "versions": {
174
+ "all": 0,
175
+ "harness|enem_challenge|enem_challenge": 1.0,
176
+ "harness|bluex|bluex": 1.0,
177
+ "harness|oab_exams|oab_exams": 1.4,
178
+ "harness|assin2_rte|assin2_rte": 1.0,
179
+ "harness|assin2_sts|assin2_sts": 1.0,
180
+ "harness|faquad_nli|faquad_nli": 1.0,
181
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por": 1.0,
182
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por": 1.0,
183
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por": 1.0,
184
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por": 1.0
185
+ },
186
+ "summary_tasks": {
187
+ "harness|enem_challenge|enem_challenge|None|3": {
188
+ "sample_size": 1429,
189
+ "truncated": 12,
190
+ "non_truncated": 1417,
191
+ "padded": 0,
192
+ "non_padded": 1429,
193
+ "fewshots_truncated": 14,
194
+ "mean_seq_length": 1532.9881035689293,
195
+ "min_seq_length": 1280,
196
+ "max_seq_length": 2572,
197
+ "max_ctx_length": 2016,
198
+ "max_gen_toks": 32,
199
+ "mean_original_fewshots_size": 3.0,
200
+ "mean_effective_fewshot_size": 2.9902029391182645
201
+ },
202
+ "harness|bluex|bluex|None|3": {
203
+ "sample_size": 719,
204
+ "truncated": 4,
205
+ "non_truncated": 715,
206
+ "padded": 0,
207
+ "non_padded": 719,
208
+ "fewshots_truncated": 4,
209
+ "mean_seq_length": 1364.7426981919332,
210
+ "min_seq_length": 998,
211
+ "max_seq_length": 2124,
212
+ "max_ctx_length": 2016,
213
+ "max_gen_toks": 32,
214
+ "mean_original_fewshots_size": 3.0,
215
+ "mean_effective_fewshot_size": 2.9944367176634215
216
+ },
217
+ "harness|oab_exams|oab_exams|None|3": {
218
+ "sample_size": 2195,
219
+ "truncated": 0,
220
+ "non_truncated": 2195,
221
+ "padded": 0,
222
+ "non_padded": 2195,
223
+ "fewshots_truncated": 0,
224
+ "mean_seq_length": 1306.4145785876992,
225
+ "min_seq_length": 1051,
226
+ "max_seq_length": 1788,
227
+ "max_ctx_length": 2016,
228
+ "max_gen_toks": 32,
229
+ "mean_original_fewshots_size": 3.0,
230
+ "mean_effective_fewshot_size": 3.0
231
+ },
232
+ "harness|assin2_rte|assin2_rte|None|15": {
233
+ "sample_size": 2448,
234
+ "truncated": 0,
235
+ "non_truncated": 2448,
236
+ "padded": 0,
237
+ "non_padded": 2448,
238
+ "fewshots_truncated": 0,
239
+ "mean_seq_length": 1182.9889705882354,
240
+ "min_seq_length": 1160,
241
+ "max_seq_length": 1249,
242
+ "max_ctx_length": 2016,
243
+ "max_gen_toks": 32,
244
+ "mean_original_fewshots_size": 15.0,
245
+ "mean_effective_fewshot_size": 15.0
246
+ },
247
+ "harness|assin2_sts|assin2_sts|None|15": {
248
+ "sample_size": 2448,
249
+ "truncated": 0,
250
+ "non_truncated": 2448,
251
+ "padded": 0,
252
+ "non_padded": 2448,
253
+ "fewshots_truncated": 0,
254
+ "mean_seq_length": 1415.9889705882354,
255
+ "min_seq_length": 1393,
256
+ "max_seq_length": 1482,
257
+ "max_ctx_length": 2016,
258
+ "max_gen_toks": 32,
259
+ "mean_original_fewshots_size": 15.0,
260
+ "mean_effective_fewshot_size": 15.0
261
+ },
262
+ "harness|faquad_nli|faquad_nli|None|15": {
263
+ "sample_size": 650,
264
+ "truncated": 0,
265
+ "non_truncated": 650,
266
+ "padded": 0,
267
+ "non_padded": 650,
268
+ "fewshots_truncated": 0,
269
+ "mean_seq_length": 1473.1184615384616,
270
+ "min_seq_length": 1421,
271
+ "max_seq_length": 1580,
272
+ "max_ctx_length": 2016,
273
+ "max_gen_toks": 32,
274
+ "mean_original_fewshots_size": 15.0,
275
+ "mean_effective_fewshot_size": 15.0
276
+ },
277
+ "harness|sparrow_pt|sparrow_emotion-2021-cortiz-por|500|25": {
278
+ "sample_size": 500,
279
+ "truncated": 0,
280
+ "non_truncated": 500,
281
+ "padded": 0,
282
+ "non_padded": 500,
283
+ "fewshots_truncated": 0,
284
+ "mean_seq_length": 1696.204,
285
+ "min_seq_length": 1674,
286
+ "max_seq_length": 1744,
287
+ "max_ctx_length": 2016,
288
+ "max_gen_toks": 32,
289
+ "mean_original_fewshots_size": 25.0,
290
+ "mean_effective_fewshot_size": 25.0
291
+ },
292
+ "harness|sparrow_pt|sparrow_hate-2019-fortuna-por|500|25": {
293
+ "sample_size": 500,
294
+ "truncated": 0,
295
+ "non_truncated": 500,
296
+ "padded": 0,
297
+ "non_padded": 500,
298
+ "fewshots_truncated": 0,
299
+ "mean_seq_length": 1708.588,
300
+ "min_seq_length": 1684,
301
+ "max_seq_length": 1767,
302
+ "max_ctx_length": 2016,
303
+ "max_gen_toks": 32,
304
+ "mean_original_fewshots_size": 25.0,
305
+ "mean_effective_fewshot_size": 25.0
306
+ },
307
+ "harness|sparrow_pt|sparrow_sentiment-2016-mozetic-por|500|25": {
308
+ "sample_size": 500,
309
+ "truncated": 0,
310
+ "non_truncated": 500,
311
+ "padded": 0,
312
+ "non_padded": 500,
313
+ "fewshots_truncated": 0,
314
+ "mean_seq_length": 1422.44,
315
+ "min_seq_length": 1405,
316
+ "max_seq_length": 1458,
317
+ "max_ctx_length": 2016,
318
+ "max_gen_toks": 32,
319
+ "mean_original_fewshots_size": 25.0,
320
+ "mean_effective_fewshot_size": 25.0
321
+ },
322
+ "harness|sparrow_pt|sparrow_sentiment-2018-brum-por|500|25": {
323
+ "sample_size": 500,
324
+ "truncated": 0,
325
+ "non_truncated": 500,
326
+ "padded": 0,
327
+ "non_padded": 500,
328
+ "fewshots_truncated": 0,
329
+ "mean_seq_length": 1573.854,
330
+ "min_seq_length": 1556,
331
+ "max_seq_length": 1605,
332
+ "max_ctx_length": 2016,
333
+ "max_gen_toks": 32,
334
+ "mean_original_fewshots_size": 25.0,
335
+ "mean_effective_fewshot_size": 25.0
336
+ }
337
+ },
338
+ "summary_general": {
339
+ "truncated": 16,
340
+ "non_truncated": 11873,
341
+ "padded": 0,
342
+ "non_padded": 11889,
343
+ "fewshots_truncated": 18
344
+ }
345
+ }