eduagarcia commited on
Commit
d89686b
1 Parent(s): a5a0ca7

Uploading raw results for bigscience/bloom-7b1

Browse files
Files changed (12) hide show
  1. .gitattributes +8 -0
  2. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_rte.jsonl +3 -0
  3. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_sts.jsonl +3 -0
  4. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_bluex.jsonl +3 -0
  5. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_enem_challenge.jsonl +3 -0
  6. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_faquad_nli.jsonl +0 -0
  7. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_hatebr_offensive.jsonl +3 -0
  8. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_oab_exams.jsonl +3 -0
  9. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_portuguese_hate_speech.jsonl +3 -0
  10. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_tweetsentbr.jsonl +3 -0
  11. bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/results.json +1324 -0
  12. bigscience/bloom-7b1/results_2024-02-22T11-33-09.451361.json +325 -0
.gitattributes CHANGED
@@ -1191,3 +1191,11 @@ bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloo
1191
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
1192
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_portuguese_hate_speech.jsonl filter=lfs diff=lfs merge=lfs -text
1193
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_tweetsentbr.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1191
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
1192
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_portuguese_hate_speech.jsonl filter=lfs diff=lfs merge=lfs -text
1193
  bigscience/bloom-1b7/raw_2024-02-22T11-06-26.653643/pretrained__bigscience__bloom-1b7,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_tweetsentbr.jsonl filter=lfs diff=lfs merge=lfs -text
1194
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_rte.jsonl filter=lfs diff=lfs merge=lfs -text
1195
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_sts.jsonl filter=lfs diff=lfs merge=lfs -text
1196
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_bluex.jsonl filter=lfs diff=lfs merge=lfs -text
1197
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_enem_challenge.jsonl filter=lfs diff=lfs merge=lfs -text
1198
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_hatebr_offensive.jsonl filter=lfs diff=lfs merge=lfs -text
1199
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
1200
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_portuguese_hate_speech.jsonl filter=lfs diff=lfs merge=lfs -text
1201
+ bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_tweetsentbr.jsonl filter=lfs diff=lfs merge=lfs -text
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_rte.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3905825e42ee169f3131329f69bc21943ad9a88ed3a6eae8f781a415b394ef58
3
+ size 31780365
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_assin2_sts.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1089865c64b98a0b95d1a19f3edd0a4233093ffdfb2084680907266e5cf664
3
+ size 33201357
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_bluex.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74370cdf073230236863ea2766fc7eab56a7c81a97333387400b774cbe711b89
3
+ size 11686367
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_enem_challenge.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb32f73234e5764662fd4296c1db779f5ca03b751bb118ec7f9982c9fc8b52f9
3
+ size 23553645
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_faquad_nli.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_hatebr_offensive.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2547f23851c73ff8b01163b5f60189d7839b90f32e4181d22cdec1fa73c8749
3
+ size 15144143
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_oab_exams.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fb81b60f734e981e4416836ce18584f021ae7a1b64a9176f70dba43e4d75a0d
3
+ size 32925418
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_portuguese_hate_speech.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6757174c5655b21fbc04a29286b76813f8594d2b6c3d8a35d515eea3711cb829
3
+ size 12370030
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/pretrained__bigscience__bloom-7b1,dtype__float16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__4096_tweetsentbr.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b406a510ac1a05ee79a1b332512fe5a9957c1aabf1ae9103146f6e4a3ae1c16
3
+ size 27284146
bigscience/bloom-7b1/raw_2024-02-22T11-33-09.451361/results.json ADDED
@@ -0,0 +1,1324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3483892974147847,
5
+ "acc,all": 0.5065359477124183,
6
+ "alias": "assin2_rte"
7
+ },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.0940150443614523,
10
+ "mse,all": 2.323558006535948,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.2364394993045897,
15
+ "acc,exam_id__USP_2019": 0.2,
16
+ "acc,exam_id__USP_2021": 0.17307692307692307,
17
+ "acc,exam_id__UNICAMP_2023": 0.3488372093023256,
18
+ "acc,exam_id__USP_2023": 0.20454545454545456,
19
+ "acc,exam_id__UNICAMP_2020": 0.3090909090909091,
20
+ "acc,exam_id__USP_2024": 0.0975609756097561,
21
+ "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435,
22
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
23
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
24
+ "acc,exam_id__USP_2018": 0.14814814814814814,
25
+ "acc,exam_id__USP_2020": 0.21428571428571427,
26
+ "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
27
+ "acc,exam_id__USP_2022": 0.24489795918367346,
28
+ "acc,exam_id__UNICAMP_2024": 0.3333333333333333,
29
+ "acc,exam_id__UNICAMP_2019": 0.26,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.19454163750874737,
35
+ "acc,exam_id__2016": 0.1652892561983471,
36
+ "acc,exam_id__2012": 0.29310344827586204,
37
+ "acc,exam_id__2013": 0.1574074074074074,
38
+ "acc,exam_id__2010": 0.19658119658119658,
39
+ "acc,exam_id__2017": 0.22413793103448276,
40
+ "acc,exam_id__2022": 0.21052631578947367,
41
+ "acc,exam_id__2009": 0.20869565217391303,
42
+ "acc,exam_id__2011": 0.18803418803418803,
43
+ "acc,exam_id__2023": 0.13333333333333333,
44
+ "acc,exam_id__2014": 0.1743119266055046,
45
+ "acc,exam_id__2015": 0.20168067226890757,
46
+ "acc,exam_id__2016_2": 0.18699186991869918
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.4518479958355024,
50
+ "acc,all": 0.7507692307692307,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.4930101744884911,
56
+ "acc,all": 0.575
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.2542141230068337,
60
+ "acc,exam_id__2011-03": 0.18181818181818182,
61
+ "acc,exam_id__2014-15": 0.1794871794871795,
62
+ "acc,exam_id__2011-05": 0.1625,
63
+ "acc,exam_id__2014-14": 0.2875,
64
+ "acc,exam_id__2018-25": 0.25,
65
+ "acc,exam_id__2015-17": 0.23076923076923078,
66
+ "acc,exam_id__2015-18": 0.2375,
67
+ "acc,exam_id__2012-07": 0.2625,
68
+ "acc,exam_id__2014-13": 0.3,
69
+ "acc,exam_id__2017-24": 0.35,
70
+ "acc,exam_id__2012-06a": 0.25,
71
+ "acc,exam_id__2013-11": 0.275,
72
+ "acc,exam_id__2015-16": 0.2375,
73
+ "acc,exam_id__2017-22": 0.2125,
74
+ "acc,exam_id__2010-02": 0.3,
75
+ "acc,exam_id__2016-21": 0.2875,
76
+ "acc,exam_id__2011-04": 0.275,
77
+ "acc,exam_id__2016-19": 0.2564102564102564,
78
+ "acc,exam_id__2013-12": 0.275,
79
+ "acc,exam_id__2017-23": 0.225,
80
+ "acc,exam_id__2013-10": 0.3,
81
+ "acc,exam_id__2012-09": 0.2727272727272727,
82
+ "acc,exam_id__2010-01": 0.2235294117647059,
83
+ "acc,exam_id__2012-06": 0.25,
84
+ "acc,exam_id__2016-20": 0.3125,
85
+ "acc,exam_id__2016-20a": 0.2375,
86
+ "acc,exam_id__2012-08": 0.2375,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.2374735398826133,
92
+ "acc,all": 0.299647473560517
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.2884430449383611,
96
+ "acc,all": 0.4746268656716418,
97
+ "alias": "tweetsentbr"
98
+ }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
+ },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
+ }
219
+ },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fc649311120>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
+ },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
315
+ },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fc649310ae0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
+ },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
+ },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc649310d60>",
426
+ "metadata": {
427
+ "version": 1.1
428
+ }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fc649311300>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
456
+ },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
+ },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc649311580>",
515
+ "metadata": {
516
+ "version": 1.1
517
+ }
518
+ },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
+ },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
+ }
635
+ },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
+ },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
732
+ },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fc6493104a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
+ },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
+ },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fc649310720>",
851
+ "metadata": {
852
+ "version": 1.5
853
+ }
854
+ },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
+ },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
+ }
974
+ },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
+ },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
+ }
1118
+ }
1119
+ },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
1130
+ },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
1141
+ },
1142
+ "model_meta": {
1143
+ "truncated": 1,
1144
+ "non_truncated": 14149,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 1,
1148
+ "has_chat_template": false,
1149
+ "chat_type": null,
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "6232703e399354503377bf59dfbb8397fd569e4a",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 14138032128,
1155
+ "model_num_parameters": 7069016064,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 8,
1161
+ "max_length": 2048,
1162
+ "max_ctx_length": 2016,
1163
+ "max_gen_toks": 32
1164
+ },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 937.9178921568628,
1174
+ "min_seq_length": 923,
1175
+ "max_seq_length": 984,
1176
+ "max_ctx_length": 2016,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1100.9178921568628,
1189
+ "min_seq_length": 1086,
1190
+ "max_seq_length": 1147,
1191
+ "max_ctx_length": 2016,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 0,
1199
+ "non_truncated": 719,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 0,
1203
+ "mean_seq_length": 1189.6175243393602,
1204
+ "min_seq_length": 934,
1205
+ "max_seq_length": 1715,
1206
+ "max_ctx_length": 2016,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 3.0
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 1,
1214
+ "non_truncated": 1428,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 1,
1218
+ "mean_seq_length": 1085.372988103569,
1219
+ "min_seq_length": 910,
1220
+ "max_seq_length": 2035,
1221
+ "max_ctx_length": 2016,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 2.9993002099370187
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1015.5061538461539,
1234
+ "min_seq_length": 980,
1235
+ "max_seq_length": 1086,
1236
+ "max_ctx_length": 2016,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 898.7235714285714,
1249
+ "min_seq_length": 882,
1250
+ "max_seq_length": 1080,
1251
+ "max_ctx_length": 2016,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 873.5266514806378,
1264
+ "min_seq_length": 694,
1265
+ "max_seq_length": 1159,
1266
+ "max_ctx_length": 2016,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1197.1927144535841,
1279
+ "min_seq_length": 1172,
1280
+ "max_seq_length": 1233,
1281
+ "max_ctx_length": 2016,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1027.7278606965174,
1294
+ "min_seq_length": 1012,
1295
+ "max_seq_length": 1078,
1296
+ "max_ctx_length": 2016,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
+ },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=bigscience/bloom-7b1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
+ },
1323
+ "git_hash": "804df15"
1324
+ }
bigscience/bloom-7b1/results_2024-02-22T11-33-09.451361.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config_general": {
3
+ "start_date": "2024-02-22T11-33-09.451361",
4
+ "start_time": 1708601590.0709443,
5
+ "end_time": 1708604397.9782293,
6
+ "total_evaluation_time_seconds": 2807.907284975052,
7
+ "has_chat_template": false,
8
+ "chat_type": null,
9
+ "n_gpus": 1,
10
+ "accelerate_num_process": null,
11
+ "model_sha": "6232703e399354503377bf59dfbb8397fd569e4a",
12
+ "model_dtype": "float16",
13
+ "model_memory_footprint": 14138032128,
14
+ "model_num_parameters": 7069016064,
15
+ "model_is_loaded_in_4bit": null,
16
+ "model_is_loaded_in_8bit": null,
17
+ "model_is_quantized": null,
18
+ "model_device": "cuda:0",
19
+ "batch_size": 8,
20
+ "max_length": 2048,
21
+ "max_ctx_length": 2016,
22
+ "max_gen_toks": 32,
23
+ "model_name": "bigscience/bloom-7b1",
24
+ "job_id": 253,
25
+ "model_id": "bigscience/bloom-7b1_eval_request_False_float16_Original",
26
+ "model_base_model": "",
27
+ "model_weight_type": "Original",
28
+ "model_revision": "main",
29
+ "model_private": false,
30
+ "model_type": "🟢 : pretrained",
31
+ "model_architectures": "BloomForCausalLM",
32
+ "submitted_time": "2024-02-05T23:04:44Z",
33
+ "lm_eval_model_type": "huggingface",
34
+ "eval_version": "1.1.0"
35
+ },
36
+ "results": {
37
+ "all_grouped_average": 0.2887082618601528,
38
+ "all_grouped_npm": -0.08218986934474465,
39
+ "all_grouped": {
40
+ "enem_challenge": 0.19454163750874737,
41
+ "bluex": 0.2364394993045897,
42
+ "oab_exams": 0.2542141230068337,
43
+ "assin2_rte": 0.3483892974147847,
44
+ "assin2_sts": 0.0940150443614523,
45
+ "faquad_nli": 0.4518479958355024,
46
+ "hatebr_offensive": 0.4930101744884911,
47
+ "portuguese_hate_speech": 0.2374735398826133,
48
+ "tweetsentbr": 0.2884430449383611
49
+ },
50
+ "all": {
51
+ "harness|enem_challenge|enem_challenge|None|3": 0.19454163750874737,
52
+ "harness|bluex|bluex|None|3": 0.2364394993045897,
53
+ "harness|oab_exams|oab_exams|None|3": 0.2542141230068337,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.3483892974147847,
55
+ "harness|assin2_sts|assin2_sts|None|15": 0.0940150443614523,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.4518479958355024,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4930101744884911,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2374735398826133,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.2884430449383611
60
+ },
61
+ "harness|enem_challenge|enem_challenge|None|3": {
62
+ "acc,all": 0.19454163750874737,
63
+ "acc,exam_id__2016": 0.1652892561983471,
64
+ "acc,exam_id__2012": 0.29310344827586204,
65
+ "acc,exam_id__2013": 0.1574074074074074,
66
+ "acc,exam_id__2010": 0.19658119658119658,
67
+ "acc,exam_id__2017": 0.22413793103448276,
68
+ "acc,exam_id__2022": 0.21052631578947367,
69
+ "acc,exam_id__2009": 0.20869565217391303,
70
+ "acc,exam_id__2011": 0.18803418803418803,
71
+ "acc,exam_id__2023": 0.13333333333333333,
72
+ "acc,exam_id__2014": 0.1743119266055046,
73
+ "acc,exam_id__2015": 0.20168067226890757,
74
+ "acc,exam_id__2016_2": 0.18699186991869918,
75
+ "main_score": 0.19454163750874737
76
+ },
77
+ "harness|bluex|bluex|None|3": {
78
+ "acc,all": 0.2364394993045897,
79
+ "acc,exam_id__USP_2019": 0.2,
80
+ "acc,exam_id__USP_2021": 0.17307692307692307,
81
+ "acc,exam_id__UNICAMP_2023": 0.3488372093023256,
82
+ "acc,exam_id__USP_2023": 0.20454545454545456,
83
+ "acc,exam_id__UNICAMP_2020": 0.3090909090909091,
84
+ "acc,exam_id__USP_2024": 0.0975609756097561,
85
+ "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435,
86
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
87
+ "acc,exam_id__UNICAMP_2018": 0.2777777777777778,
88
+ "acc,exam_id__USP_2018": 0.14814814814814814,
89
+ "acc,exam_id__USP_2020": 0.21428571428571427,
90
+ "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
91
+ "acc,exam_id__USP_2022": 0.24489795918367346,
92
+ "acc,exam_id__UNICAMP_2024": 0.3333333333333333,
93
+ "acc,exam_id__UNICAMP_2019": 0.26,
94
+ "main_score": 0.2364394993045897
95
+ },
96
+ "harness|oab_exams|oab_exams|None|3": {
97
+ "acc,all": 0.2542141230068337,
98
+ "acc,exam_id__2011-03": 0.18181818181818182,
99
+ "acc,exam_id__2014-15": 0.1794871794871795,
100
+ "acc,exam_id__2011-05": 0.1625,
101
+ "acc,exam_id__2014-14": 0.2875,
102
+ "acc,exam_id__2018-25": 0.25,
103
+ "acc,exam_id__2015-17": 0.23076923076923078,
104
+ "acc,exam_id__2015-18": 0.2375,
105
+ "acc,exam_id__2012-07": 0.2625,
106
+ "acc,exam_id__2014-13": 0.3,
107
+ "acc,exam_id__2017-24": 0.35,
108
+ "acc,exam_id__2012-06a": 0.25,
109
+ "acc,exam_id__2013-11": 0.275,
110
+ "acc,exam_id__2015-16": 0.2375,
111
+ "acc,exam_id__2017-22": 0.2125,
112
+ "acc,exam_id__2010-02": 0.3,
113
+ "acc,exam_id__2016-21": 0.2875,
114
+ "acc,exam_id__2011-04": 0.275,
115
+ "acc,exam_id__2016-19": 0.2564102564102564,
116
+ "acc,exam_id__2013-12": 0.275,
117
+ "acc,exam_id__2017-23": 0.225,
118
+ "acc,exam_id__2013-10": 0.3,
119
+ "acc,exam_id__2012-09": 0.2727272727272727,
120
+ "acc,exam_id__2010-01": 0.2235294117647059,
121
+ "acc,exam_id__2012-06": 0.25,
122
+ "acc,exam_id__2016-20": 0.3125,
123
+ "acc,exam_id__2016-20a": 0.2375,
124
+ "acc,exam_id__2012-08": 0.2375,
125
+ "main_score": 0.2542141230068337
126
+ },
127
+ "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.3483892974147847,
129
+ "acc,all": 0.5065359477124183,
130
+ "main_score": 0.3483892974147847
131
+ },
132
+ "harness|assin2_sts|assin2_sts|None|15": {
133
+ "pearson,all": 0.0940150443614523,
134
+ "mse,all": 2.323558006535948,
135
+ "main_score": 0.0940150443614523
136
+ },
137
+ "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.4518479958355024,
139
+ "acc,all": 0.7507692307692307,
140
+ "main_score": 0.4518479958355024
141
+ },
142
+ "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.4930101744884911,
144
+ "acc,all": 0.575,
145
+ "main_score": 0.4930101744884911
146
+ },
147
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.2374735398826133,
149
+ "acc,all": 0.299647473560517,
150
+ "main_score": 0.2374735398826133
151
+ },
152
+ "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.2884430449383611,
154
+ "acc,all": 0.4746268656716418,
155
+ "main_score": 0.2884430449383611
156
+ }
157
+ },
158
+ "config_tasks": {
159
+ "harness|enem_challenge|enem_challenge": "LM Harness task",
160
+ "harness|bluex|bluex": "LM Harness task",
161
+ "harness|oab_exams|oab_exams": "LM Harness task",
162
+ "harness|assin2_rte|assin2_rte": "LM Harness task",
163
+ "harness|assin2_sts|assin2_sts": "LM Harness task",
164
+ "harness|faquad_nli|faquad_nli": "LM Harness task",
165
+ "harness|hatebr_offensive|hatebr_offensive": "LM Harness task",
166
+ "harness|portuguese_hate_speech|portuguese_hate_speech": "LM Harness task",
167
+ "harness|tweetsentbr|tweetsentbr": "LM Harness task"
168
+ },
169
+ "versions": {
170
+ "all": 0,
171
+ "harness|enem_challenge|enem_challenge": 1.1,
172
+ "harness|bluex|bluex": 1.1,
173
+ "harness|oab_exams|oab_exams": 1.5,
174
+ "harness|assin2_rte|assin2_rte": 1.1,
175
+ "harness|assin2_sts|assin2_sts": 1.1,
176
+ "harness|faquad_nli|faquad_nli": 1.1,
177
+ "harness|hatebr_offensive|hatebr_offensive": 1.0,
178
+ "harness|portuguese_hate_speech|portuguese_hate_speech": 1.0,
179
+ "harness|tweetsentbr|tweetsentbr": 1.0
180
+ },
181
+ "summary_tasks": {
182
+ "harness|enem_challenge|enem_challenge|None|3": {
183
+ "sample_size": 1429,
184
+ "truncated": 1,
185
+ "non_truncated": 1428,
186
+ "padded": 0,
187
+ "non_padded": 1429,
188
+ "fewshots_truncated": 1,
189
+ "mean_seq_length": 1085.372988103569,
190
+ "min_seq_length": 910,
191
+ "max_seq_length": 2035,
192
+ "max_ctx_length": 2016,
193
+ "max_gen_toks": 32,
194
+ "mean_original_fewshots_size": 3.0,
195
+ "mean_effective_fewshot_size": 2.9993002099370187
196
+ },
197
+ "harness|bluex|bluex|None|3": {
198
+ "sample_size": 719,
199
+ "truncated": 0,
200
+ "non_truncated": 719,
201
+ "padded": 0,
202
+ "non_padded": 719,
203
+ "fewshots_truncated": 0,
204
+ "mean_seq_length": 1189.6175243393602,
205
+ "min_seq_length": 934,
206
+ "max_seq_length": 1715,
207
+ "max_ctx_length": 2016,
208
+ "max_gen_toks": 32,
209
+ "mean_original_fewshots_size": 3.0,
210
+ "mean_effective_fewshot_size": 3.0
211
+ },
212
+ "harness|oab_exams|oab_exams|None|3": {
213
+ "sample_size": 2195,
214
+ "truncated": 0,
215
+ "non_truncated": 2195,
216
+ "padded": 0,
217
+ "non_padded": 2195,
218
+ "fewshots_truncated": 0,
219
+ "mean_seq_length": 873.5266514806378,
220
+ "min_seq_length": 694,
221
+ "max_seq_length": 1159,
222
+ "max_ctx_length": 2016,
223
+ "max_gen_toks": 32,
224
+ "mean_original_fewshots_size": 3.0,
225
+ "mean_effective_fewshot_size": 3.0
226
+ },
227
+ "harness|assin2_rte|assin2_rte|None|15": {
228
+ "sample_size": 2448,
229
+ "truncated": 0,
230
+ "non_truncated": 2448,
231
+ "padded": 0,
232
+ "non_padded": 2448,
233
+ "fewshots_truncated": 0,
234
+ "mean_seq_length": 937.9178921568628,
235
+ "min_seq_length": 923,
236
+ "max_seq_length": 984,
237
+ "max_ctx_length": 2016,
238
+ "max_gen_toks": 32,
239
+ "mean_original_fewshots_size": 15.0,
240
+ "mean_effective_fewshot_size": 15.0
241
+ },
242
+ "harness|assin2_sts|assin2_sts|None|15": {
243
+ "sample_size": 2448,
244
+ "truncated": 0,
245
+ "non_truncated": 2448,
246
+ "padded": 0,
247
+ "non_padded": 2448,
248
+ "fewshots_truncated": 0,
249
+ "mean_seq_length": 1100.9178921568628,
250
+ "min_seq_length": 1086,
251
+ "max_seq_length": 1147,
252
+ "max_ctx_length": 2016,
253
+ "max_gen_toks": 32,
254
+ "mean_original_fewshots_size": 15.0,
255
+ "mean_effective_fewshot_size": 15.0
256
+ },
257
+ "harness|faquad_nli|faquad_nli|None|15": {
258
+ "sample_size": 650,
259
+ "truncated": 0,
260
+ "non_truncated": 650,
261
+ "padded": 0,
262
+ "non_padded": 650,
263
+ "fewshots_truncated": 0,
264
+ "mean_seq_length": 1015.5061538461539,
265
+ "min_seq_length": 980,
266
+ "max_seq_length": 1086,
267
+ "max_ctx_length": 2016,
268
+ "max_gen_toks": 32,
269
+ "mean_original_fewshots_size": 15.0,
270
+ "mean_effective_fewshot_size": 15.0
271
+ },
272
+ "harness|hatebr_offensive|hatebr_offensive|None|25": {
273
+ "sample_size": 1400,
274
+ "truncated": 0,
275
+ "non_truncated": 1400,
276
+ "padded": 0,
277
+ "non_padded": 1400,
278
+ "fewshots_truncated": 0,
279
+ "mean_seq_length": 898.7235714285714,
280
+ "min_seq_length": 882,
281
+ "max_seq_length": 1080,
282
+ "max_ctx_length": 2016,
283
+ "max_gen_toks": 32,
284
+ "mean_original_fewshots_size": 25.0,
285
+ "mean_effective_fewshot_size": 25.0
286
+ },
287
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
288
+ "sample_size": 851,
289
+ "truncated": 0,
290
+ "non_truncated": 851,
291
+ "padded": 0,
292
+ "non_padded": 851,
293
+ "fewshots_truncated": 0,
294
+ "mean_seq_length": 1197.1927144535841,
295
+ "min_seq_length": 1172,
296
+ "max_seq_length": 1233,
297
+ "max_ctx_length": 2016,
298
+ "max_gen_toks": 32,
299
+ "mean_original_fewshots_size": 25.0,
300
+ "mean_effective_fewshot_size": 25.0
301
+ },
302
+ "harness|tweetsentbr|tweetsentbr|None|25": {
303
+ "sample_size": 2010,
304
+ "truncated": 0,
305
+ "non_truncated": 2010,
306
+ "padded": 0,
307
+ "non_padded": 2010,
308
+ "fewshots_truncated": 0,
309
+ "mean_seq_length": 1027.7278606965174,
310
+ "min_seq_length": 1012,
311
+ "max_seq_length": 1078,
312
+ "max_ctx_length": 2016,
313
+ "max_gen_toks": 32,
314
+ "mean_original_fewshots_size": 25.0,
315
+ "mean_effective_fewshot_size": 25.0
316
+ }
317
+ },
318
+ "summary_general": {
319
+ "truncated": 1,
320
+ "non_truncated": 14149,
321
+ "padded": 0,
322
+ "non_padded": 14150,
323
+ "fewshots_truncated": 1
324
+ }
325
+ }