picocreator commited on
Commit
3e67663
1 Parent(s): ec8e005

datapack updates

Browse files
Files changed (32) hide show
  1. lm-eval-output/rwkv-x-dev/R4-1B5-With-Mask/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +0 -0
  2. lm-eval-output/rwkv-x-dev/R4-1B5-With-Mask/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  3. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +132 -0
  4. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  5. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +161 -0
  6. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  7. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +2249 -0
  8. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  9. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +0 -0
  10. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  11. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +67 -0
  12. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  13. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +126 -0
  14. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  15. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +252 -0
  16. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  17. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +66 -0
  18. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  19. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +2594 -0
  20. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  21. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +66 -0
  22. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  23. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +283 -0
  24. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  25. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +64 -0
  26. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  27. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/sciq/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +65 -0
  28. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/sciq/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  29. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/winogrande/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +58 -0
  30. lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/winogrande/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  31. lm-eval-output/rwkv-x-dev/R4-no-shuffle-rwkv-53/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +67 -0
  32. lm-eval-output/rwkv-x-dev/R4-no-shuffle-rwkv-53/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
lm-eval-output/rwkv-x-dev/R4-1B5-With-Mask/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
The diff for this file is too large to render. See raw diff
 
lm-eval-output/rwkv-x-dev/R4-1B5-With-Mask/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d060507c1028d5988048b82d93480f5b4107ce93d813c29dd07707bf36596cd
3
+ size 407135
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ai2_arc": {
4
+ "acc,none": 0.6169673055242391,
5
+ "acc_stderr,none": 0.10976273750365907,
6
+ "acc_norm,none": 0.6073844419391207,
7
+ "acc_norm_stderr,none": 0.09214502715899463,
8
+ "alias": "ai2_arc"
9
+ },
10
+ "arc_challenge": {
11
+ "acc,none": 0.3848122866894198,
12
+ "acc_stderr,none": 0.014218371065251104,
13
+ "acc_norm,none": 0.4129692832764505,
14
+ "acc_norm_stderr,none": 0.014388344935398326,
15
+ "alias": " - arc_challenge"
16
+ },
17
+ "arc_easy": {
18
+ "acc,none": 0.7314814814814815,
19
+ "acc_stderr,none": 0.009094042554994854,
20
+ "acc_norm,none": 0.7032828282828283,
21
+ "acc_norm_stderr,none": 0.009373559492986853,
22
+ "alias": " - arc_easy"
23
+ }
24
+ },
25
+ "groups": {
26
+ "ai2_arc": {
27
+ "acc,none": 0.6169673055242391,
28
+ "acc_stderr,none": 0.10976273750365907,
29
+ "acc_norm,none": 0.6073844419391207,
30
+ "acc_norm_stderr,none": 0.09214502715899463,
31
+ "alias": "ai2_arc"
32
+ }
33
+ },
34
+ "configs": {
35
+ "arc_challenge": {
36
+ "task": "arc_challenge",
37
+ "group": [
38
+ "ai2_arc"
39
+ ],
40
+ "dataset_path": "allenai/ai2_arc",
41
+ "dataset_name": "ARC-Challenge",
42
+ "training_split": "train",
43
+ "validation_split": "validation",
44
+ "test_split": "test",
45
+ "doc_to_text": "Question: {{question}}\nAnswer:",
46
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
47
+ "doc_to_choice": "{{choices.text}}",
48
+ "description": "",
49
+ "target_delimiter": " ",
50
+ "fewshot_delimiter": "\n\n",
51
+ "metric_list": [
52
+ {
53
+ "metric": "acc",
54
+ "aggregation": "mean",
55
+ "higher_is_better": true
56
+ },
57
+ {
58
+ "metric": "acc_norm",
59
+ "aggregation": "mean",
60
+ "higher_is_better": true
61
+ }
62
+ ],
63
+ "output_type": "multiple_choice",
64
+ "repeats": 1,
65
+ "should_decontaminate": true,
66
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
67
+ "metadata": {
68
+ "version": 1.0
69
+ }
70
+ },
71
+ "arc_easy": {
72
+ "task": "arc_easy",
73
+ "group": [
74
+ "ai2_arc"
75
+ ],
76
+ "dataset_path": "allenai/ai2_arc",
77
+ "dataset_name": "ARC-Easy",
78
+ "training_split": "train",
79
+ "validation_split": "validation",
80
+ "test_split": "test",
81
+ "doc_to_text": "Question: {{question}}\nAnswer:",
82
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
83
+ "doc_to_choice": "{{choices.text}}",
84
+ "description": "",
85
+ "target_delimiter": " ",
86
+ "fewshot_delimiter": "\n\n",
87
+ "metric_list": [
88
+ {
89
+ "metric": "acc",
90
+ "aggregation": "mean",
91
+ "higher_is_better": true
92
+ },
93
+ {
94
+ "metric": "acc_norm",
95
+ "aggregation": "mean",
96
+ "higher_is_better": true
97
+ }
98
+ ],
99
+ "output_type": "multiple_choice",
100
+ "repeats": 1,
101
+ "should_decontaminate": true,
102
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
103
+ "metadata": {
104
+ "version": 1.0
105
+ }
106
+ }
107
+ },
108
+ "versions": {
109
+ "ai2_arc": "N/A",
110
+ "arc_challenge": 1.0,
111
+ "arc_easy": 1.0
112
+ },
113
+ "n-shot": {
114
+ "ai2_arc": 0,
115
+ "arc_challenge": 0,
116
+ "arc_easy": 0
117
+ },
118
+ "config": {
119
+ "model": "hf",
120
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
121
+ "batch_size": "auto",
122
+ "batch_sizes": [
123
+ 64
124
+ ],
125
+ "device": null,
126
+ "use_cache": null,
127
+ "limit": null,
128
+ "bootstrap_iters": 100000,
129
+ "gen_kwargs": null
130
+ },
131
+ "git_hash": "8281e96"
132
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c1a773212ab17a1eedeb744cc0df9fddb28e91d40d95ff63dda014d8710d9ac
3
+ size 48084
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "anli": {
4
+ "acc,none": 0.36125,
5
+ "acc_stderr,none": 0.014724861303290397,
6
+ "alias": "anli"
7
+ },
8
+ "anli_r1": {
9
+ "acc,none": 0.36,
10
+ "acc_stderr,none": 0.015186527932040127,
11
+ "alias": " - anli_r1"
12
+ },
13
+ "anli_r2": {
14
+ "acc,none": 0.361,
15
+ "acc_stderr,none": 0.015195720118175115,
16
+ "alias": " - anli_r2"
17
+ },
18
+ "anli_r3": {
19
+ "acc,none": 0.3625,
20
+ "acc_stderr,none": 0.013883037874225516,
21
+ "alias": " - anli_r3"
22
+ }
23
+ },
24
+ "groups": {
25
+ "anli": {
26
+ "acc,none": 0.36125,
27
+ "acc_stderr,none": 0.014724861303290397,
28
+ "alias": "anli"
29
+ }
30
+ },
31
+ "configs": {
32
+ "anli_r1": {
33
+ "task": "anli_r1",
34
+ "group": [
35
+ "anli"
36
+ ],
37
+ "dataset_path": "anli",
38
+ "training_split": "train_r1",
39
+ "validation_split": "dev_r1",
40
+ "test_split": "test_r1",
41
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
42
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
43
+ "doc_to_choice": [
44
+ "True",
45
+ "Neither",
46
+ "False"
47
+ ],
48
+ "description": "",
49
+ "target_delimiter": " ",
50
+ "fewshot_delimiter": "\n\n",
51
+ "metric_list": [
52
+ {
53
+ "metric": "acc",
54
+ "aggregation": "mean",
55
+ "higher_is_better": true
56
+ }
57
+ ],
58
+ "output_type": "multiple_choice",
59
+ "repeats": 1,
60
+ "should_decontaminate": true,
61
+ "doc_to_decontamination_query": "premise",
62
+ "metadata": {
63
+ "version": 1.0
64
+ }
65
+ },
66
+ "anli_r2": {
67
+ "task": "anli_r2",
68
+ "group": [
69
+ "anli"
70
+ ],
71
+ "dataset_path": "anli",
72
+ "training_split": "train_r2",
73
+ "validation_split": "dev_r2",
74
+ "test_split": "test_r2",
75
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
76
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
77
+ "doc_to_choice": [
78
+ "True",
79
+ "Neither",
80
+ "False"
81
+ ],
82
+ "description": "",
83
+ "target_delimiter": " ",
84
+ "fewshot_delimiter": "\n\n",
85
+ "metric_list": [
86
+ {
87
+ "metric": "acc",
88
+ "aggregation": "mean",
89
+ "higher_is_better": true
90
+ }
91
+ ],
92
+ "output_type": "multiple_choice",
93
+ "repeats": 1,
94
+ "should_decontaminate": true,
95
+ "doc_to_decontamination_query": "premise",
96
+ "metadata": {
97
+ "version": 1.0
98
+ }
99
+ },
100
+ "anli_r3": {
101
+ "task": "anli_r3",
102
+ "group": [
103
+ "anli"
104
+ ],
105
+ "dataset_path": "anli",
106
+ "training_split": "train_r3",
107
+ "validation_split": "dev_r3",
108
+ "test_split": "test_r3",
109
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
110
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
111
+ "doc_to_choice": [
112
+ "True",
113
+ "Neither",
114
+ "False"
115
+ ],
116
+ "description": "",
117
+ "target_delimiter": " ",
118
+ "fewshot_delimiter": "\n\n",
119
+ "metric_list": [
120
+ {
121
+ "metric": "acc",
122
+ "aggregation": "mean",
123
+ "higher_is_better": true
124
+ }
125
+ ],
126
+ "output_type": "multiple_choice",
127
+ "repeats": 1,
128
+ "should_decontaminate": true,
129
+ "doc_to_decontamination_query": "premise",
130
+ "metadata": {
131
+ "version": 1.0
132
+ }
133
+ }
134
+ },
135
+ "versions": {
136
+ "anli": "N/A",
137
+ "anli_r1": 1.0,
138
+ "anli_r2": 1.0,
139
+ "anli_r3": 1.0
140
+ },
141
+ "n-shot": {
142
+ "anli": 0,
143
+ "anli_r1": 0,
144
+ "anli_r2": 0,
145
+ "anli_r3": 0
146
+ },
147
+ "config": {
148
+ "model": "hf",
149
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
150
+ "batch_size": "auto",
151
+ "batch_sizes": [
152
+ 64
153
+ ],
154
+ "device": null,
155
+ "use_cache": null,
156
+ "limit": null,
157
+ "bootstrap_iters": 100000,
158
+ "gen_kwargs": null
159
+ },
160
+ "git_hash": "8281e96"
161
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6fe8e5ae65dd7cb1ae0ad057e1c1ffab76cb0846c146bec8e6db4ad60327971
3
+ size 47935
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,2249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp": {
4
+ "acc,none": 0.8223731343283582,
5
+ "acc_stderr,none": 0.16119006470239358,
6
+ "alias": "blimp"
7
+ },
8
+ "blimp_adjunct_island": {
9
+ "acc,none": 0.906,
10
+ "acc_stderr,none": 0.009233052000787735,
11
+ "alias": " - blimp_adjunct_island"
12
+ },
13
+ "blimp_anaphor_gender_agreement": {
14
+ "acc,none": 0.989,
15
+ "acc_stderr,none": 0.0032999833166078166,
16
+ "alias": " - blimp_anaphor_gender_agreement"
17
+ },
18
+ "blimp_anaphor_number_agreement": {
19
+ "acc,none": 0.998,
20
+ "acc_stderr,none": 0.0014135055705577929,
21
+ "alias": " - blimp_anaphor_number_agreement"
22
+ },
23
+ "blimp_animate_subject_passive": {
24
+ "acc,none": 0.829,
25
+ "acc_stderr,none": 0.0119122164562646,
26
+ "alias": " - blimp_animate_subject_passive"
27
+ },
28
+ "blimp_animate_subject_trans": {
29
+ "acc,none": 0.894,
30
+ "acc_stderr,none": 0.009739551265785136,
31
+ "alias": " - blimp_animate_subject_trans"
32
+ },
33
+ "blimp_causative": {
34
+ "acc,none": 0.75,
35
+ "acc_stderr,none": 0.013699915608779773,
36
+ "alias": " - blimp_causative"
37
+ },
38
+ "blimp_complex_NP_island": {
39
+ "acc,none": 0.58,
40
+ "acc_stderr,none": 0.015615500115072956,
41
+ "alias": " - blimp_complex_NP_island"
42
+ },
43
+ "blimp_coordinate_structure_constraint_complex_left_branch": {
44
+ "acc,none": 0.798,
45
+ "acc_stderr,none": 0.012702651587655147,
46
+ "alias": " - blimp_coordinate_structure_constraint_complex_left_branch"
47
+ },
48
+ "blimp_coordinate_structure_constraint_object_extraction": {
49
+ "acc,none": 0.87,
50
+ "acc_stderr,none": 0.01064016979249935,
51
+ "alias": " - blimp_coordinate_structure_constraint_object_extraction"
52
+ },
53
+ "blimp_determiner_noun_agreement_1": {
54
+ "acc,none": 0.996,
55
+ "acc_stderr,none": 0.001996994739098728,
56
+ "alias": " - blimp_determiner_noun_agreement_1"
57
+ },
58
+ "blimp_determiner_noun_agreement_2": {
59
+ "acc,none": 0.988,
60
+ "acc_stderr,none": 0.0034449771940998617,
61
+ "alias": " - blimp_determiner_noun_agreement_2"
62
+ },
63
+ "blimp_determiner_noun_agreement_irregular_1": {
64
+ "acc,none": 0.962,
65
+ "acc_stderr,none": 0.006049181150584948,
66
+ "alias": " - blimp_determiner_noun_agreement_irregular_1"
67
+ },
68
+ "blimp_determiner_noun_agreement_irregular_2": {
69
+ "acc,none": 0.953,
70
+ "acc_stderr,none": 0.006695956678163041,
71
+ "alias": " - blimp_determiner_noun_agreement_irregular_2"
72
+ },
73
+ "blimp_determiner_noun_agreement_with_adj_2": {
74
+ "acc,none": 0.96,
75
+ "acc_stderr,none": 0.0061998740663370775,
76
+ "alias": " - blimp_determiner_noun_agreement_with_adj_2"
77
+ },
78
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": {
79
+ "acc,none": 0.927,
80
+ "acc_stderr,none": 0.008230354715244057,
81
+ "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_1"
82
+ },
83
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": {
84
+ "acc,none": 0.932,
85
+ "acc_stderr,none": 0.007964887911291603,
86
+ "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_2"
87
+ },
88
+ "blimp_determiner_noun_agreement_with_adjective_1": {
89
+ "acc,none": 0.982,
90
+ "acc_stderr,none": 0.004206387249611444,
91
+ "alias": " - blimp_determiner_noun_agreement_with_adjective_1"
92
+ },
93
+ "blimp_distractor_agreement_relational_noun": {
94
+ "acc,none": 0.893,
95
+ "acc_stderr,none": 0.009779910359847165,
96
+ "alias": " - blimp_distractor_agreement_relational_noun"
97
+ },
98
+ "blimp_distractor_agreement_relative_clause": {
99
+ "acc,none": 0.717,
100
+ "acc_stderr,none": 0.014251810906481735,
101
+ "alias": " - blimp_distractor_agreement_relative_clause"
102
+ },
103
+ "blimp_drop_argument": {
104
+ "acc,none": 0.747,
105
+ "acc_stderr,none": 0.01375427861358708,
106
+ "alias": " - blimp_drop_argument"
107
+ },
108
+ "blimp_ellipsis_n_bar_1": {
109
+ "acc,none": 0.768,
110
+ "acc_stderr,none": 0.01335493745228157,
111
+ "alias": " - blimp_ellipsis_n_bar_1"
112
+ },
113
+ "blimp_ellipsis_n_bar_2": {
114
+ "acc,none": 0.939,
115
+ "acc_stderr,none": 0.007572076091557418,
116
+ "alias": " - blimp_ellipsis_n_bar_2"
117
+ },
118
+ "blimp_existential_there_object_raising": {
119
+ "acc,none": 0.858,
120
+ "acc_stderr,none": 0.011043457699378234,
121
+ "alias": " - blimp_existential_there_object_raising"
122
+ },
123
+ "blimp_existential_there_quantifiers_1": {
124
+ "acc,none": 0.984,
125
+ "acc_stderr,none": 0.00396985639031942,
126
+ "alias": " - blimp_existential_there_quantifiers_1"
127
+ },
128
+ "blimp_existential_there_quantifiers_2": {
129
+ "acc,none": 0.274,
130
+ "acc_stderr,none": 0.014111099288259585,
131
+ "alias": " - blimp_existential_there_quantifiers_2"
132
+ },
133
+ "blimp_existential_there_subject_raising": {
134
+ "acc,none": 0.91,
135
+ "acc_stderr,none": 0.009054390204866435,
136
+ "alias": " - blimp_existential_there_subject_raising"
137
+ },
138
+ "blimp_expletive_it_object_raising": {
139
+ "acc,none": 0.794,
140
+ "acc_stderr,none": 0.01279561361278654,
141
+ "alias": " - blimp_expletive_it_object_raising"
142
+ },
143
+ "blimp_inchoative": {
144
+ "acc,none": 0.617,
145
+ "acc_stderr,none": 0.015380102325652715,
146
+ "alias": " - blimp_inchoative"
147
+ },
148
+ "blimp_intransitive": {
149
+ "acc,none": 0.777,
150
+ "acc_stderr,none": 0.013169830843425679,
151
+ "alias": " - blimp_intransitive"
152
+ },
153
+ "blimp_irregular_past_participle_adjectives": {
154
+ "acc,none": 0.929,
155
+ "acc_stderr,none": 0.008125578442487914,
156
+ "alias": " - blimp_irregular_past_participle_adjectives"
157
+ },
158
+ "blimp_irregular_past_participle_verbs": {
159
+ "acc,none": 0.891,
160
+ "acc_stderr,none": 0.009859828407037188,
161
+ "alias": " - blimp_irregular_past_participle_verbs"
162
+ },
163
+ "blimp_irregular_plural_subject_verb_agreement_1": {
164
+ "acc,none": 0.926,
165
+ "acc_stderr,none": 0.008282064512704156,
166
+ "alias": " - blimp_irregular_plural_subject_verb_agreement_1"
167
+ },
168
+ "blimp_irregular_plural_subject_verb_agreement_2": {
169
+ "acc,none": 0.915,
170
+ "acc_stderr,none": 0.008823426366942314,
171
+ "alias": " - blimp_irregular_plural_subject_verb_agreement_2"
172
+ },
173
+ "blimp_left_branch_island_echo_question": {
174
+ "acc,none": 0.623,
175
+ "acc_stderr,none": 0.015333170125779843,
176
+ "alias": " - blimp_left_branch_island_echo_question"
177
+ },
178
+ "blimp_left_branch_island_simple_question": {
179
+ "acc,none": 0.917,
180
+ "acc_stderr,none": 0.008728527206074796,
181
+ "alias": " - blimp_left_branch_island_simple_question"
182
+ },
183
+ "blimp_matrix_question_npi_licensor_present": {
184
+ "acc,none": 0.438,
185
+ "acc_stderr,none": 0.01569721001969469,
186
+ "alias": " - blimp_matrix_question_npi_licensor_present"
187
+ },
188
+ "blimp_npi_present_1": {
189
+ "acc,none": 0.606,
190
+ "acc_stderr,none": 0.015459721957493375,
191
+ "alias": " - blimp_npi_present_1"
192
+ },
193
+ "blimp_npi_present_2": {
194
+ "acc,none": 0.568,
195
+ "acc_stderr,none": 0.015672320237336213,
196
+ "alias": " - blimp_npi_present_2"
197
+ },
198
+ "blimp_only_npi_licensor_present": {
199
+ "acc,none": 0.867,
200
+ "acc_stderr,none": 0.010743669132397344,
201
+ "alias": " - blimp_only_npi_licensor_present"
202
+ },
203
+ "blimp_only_npi_scope": {
204
+ "acc,none": 0.586,
205
+ "acc_stderr,none": 0.015583544104177527,
206
+ "alias": " - blimp_only_npi_scope"
207
+ },
208
+ "blimp_passive_1": {
209
+ "acc,none": 0.891,
210
+ "acc_stderr,none": 0.009859828407037188,
211
+ "alias": " - blimp_passive_1"
212
+ },
213
+ "blimp_passive_2": {
214
+ "acc,none": 0.896,
215
+ "acc_stderr,none": 0.009658016218524294,
216
+ "alias": " - blimp_passive_2"
217
+ },
218
+ "blimp_principle_A_c_command": {
219
+ "acc,none": 0.766,
220
+ "acc_stderr,none": 0.01339490288966001,
221
+ "alias": " - blimp_principle_A_c_command"
222
+ },
223
+ "blimp_principle_A_case_1": {
224
+ "acc,none": 1.0,
225
+ "acc_stderr,none": 0.0,
226
+ "alias": " - blimp_principle_A_case_1"
227
+ },
228
+ "blimp_principle_A_case_2": {
229
+ "acc,none": 0.942,
230
+ "acc_stderr,none": 0.007395315455792926,
231
+ "alias": " - blimp_principle_A_case_2"
232
+ },
233
+ "blimp_principle_A_domain_1": {
234
+ "acc,none": 0.997,
235
+ "acc_stderr,none": 0.001730316154346939,
236
+ "alias": " - blimp_principle_A_domain_1"
237
+ },
238
+ "blimp_principle_A_domain_2": {
239
+ "acc,none": 0.904,
240
+ "acc_stderr,none": 0.009320454434783243,
241
+ "alias": " - blimp_principle_A_domain_2"
242
+ },
243
+ "blimp_principle_A_domain_3": {
244
+ "acc,none": 0.831,
245
+ "acc_stderr,none": 0.01185662597789013,
246
+ "alias": " - blimp_principle_A_domain_3"
247
+ },
248
+ "blimp_principle_A_reconstruction": {
249
+ "acc,none": 0.485,
250
+ "acc_stderr,none": 0.015812179641814895,
251
+ "alias": " - blimp_principle_A_reconstruction"
252
+ },
253
+ "blimp_regular_plural_subject_verb_agreement_1": {
254
+ "acc,none": 0.948,
255
+ "acc_stderr,none": 0.007024624213817139,
256
+ "alias": " - blimp_regular_plural_subject_verb_agreement_1"
257
+ },
258
+ "blimp_regular_plural_subject_verb_agreement_2": {
259
+ "acc,none": 0.884,
260
+ "acc_stderr,none": 0.010131468138756967,
261
+ "alias": " - blimp_regular_plural_subject_verb_agreement_2"
262
+ },
263
+ "blimp_sentential_negation_npi_licensor_present": {
264
+ "acc,none": 0.991,
265
+ "acc_stderr,none": 0.0029879638431426513,
266
+ "alias": " - blimp_sentential_negation_npi_licensor_present"
267
+ },
268
+ "blimp_sentential_negation_npi_scope": {
269
+ "acc,none": 0.744,
270
+ "acc_stderr,none": 0.013807775152234206,
271
+ "alias": " - blimp_sentential_negation_npi_scope"
272
+ },
273
+ "blimp_sentential_subject_island": {
274
+ "acc,none": 0.452,
275
+ "acc_stderr,none": 0.015746235865880677,
276
+ "alias": " - blimp_sentential_subject_island"
277
+ },
278
+ "blimp_superlative_quantifiers_1": {
279
+ "acc,none": 0.911,
280
+ "acc_stderr,none": 0.009008893392651523,
281
+ "alias": " - blimp_superlative_quantifiers_1"
282
+ },
283
+ "blimp_superlative_quantifiers_2": {
284
+ "acc,none": 0.956,
285
+ "acc_stderr,none": 0.006488921798427425,
286
+ "alias": " - blimp_superlative_quantifiers_2"
287
+ },
288
+ "blimp_tough_vs_raising_1": {
289
+ "acc,none": 0.617,
290
+ "acc_stderr,none": 0.015380102325652699,
291
+ "alias": " - blimp_tough_vs_raising_1"
292
+ },
293
+ "blimp_tough_vs_raising_2": {
294
+ "acc,none": 0.875,
295
+ "acc_stderr,none": 0.010463483381956722,
296
+ "alias": " - blimp_tough_vs_raising_2"
297
+ },
298
+ "blimp_transitive": {
299
+ "acc,none": 0.88,
300
+ "acc_stderr,none": 0.01028132801274739,
301
+ "alias": " - blimp_transitive"
302
+ },
303
+ "blimp_wh_island": {
304
+ "acc,none": 0.817,
305
+ "acc_stderr,none": 0.012233587399477825,
306
+ "alias": " - blimp_wh_island"
307
+ },
308
+ "blimp_wh_questions_object_gap": {
309
+ "acc,none": 0.857,
310
+ "acc_stderr,none": 0.01107581480856704,
311
+ "alias": " - blimp_wh_questions_object_gap"
312
+ },
313
+ "blimp_wh_questions_subject_gap": {
314
+ "acc,none": 0.943,
315
+ "acc_stderr,none": 0.007335175853706826,
316
+ "alias": " - blimp_wh_questions_subject_gap"
317
+ },
318
+ "blimp_wh_questions_subject_gap_long_distance": {
319
+ "acc,none": 0.934,
320
+ "acc_stderr,none": 0.0078552979386976,
321
+ "alias": " - blimp_wh_questions_subject_gap_long_distance"
322
+ },
323
+ "blimp_wh_vs_that_no_gap": {
324
+ "acc,none": 0.984,
325
+ "acc_stderr,none": 0.003969856390319412,
326
+ "alias": " - blimp_wh_vs_that_no_gap"
327
+ },
328
+ "blimp_wh_vs_that_no_gap_long_distance": {
329
+ "acc,none": 0.973,
330
+ "acc_stderr,none": 0.00512808904927529,
331
+ "alias": " - blimp_wh_vs_that_no_gap_long_distance"
332
+ },
333
+ "blimp_wh_vs_that_with_gap": {
334
+ "acc,none": 0.41,
335
+ "acc_stderr,none": 0.015560917136921662,
336
+ "alias": " - blimp_wh_vs_that_with_gap"
337
+ },
338
+ "blimp_wh_vs_that_with_gap_long_distance": {
339
+ "acc,none": 0.353,
340
+ "acc_stderr,none": 0.015120172605483694,
341
+ "alias": " - blimp_wh_vs_that_with_gap_long_distance"
342
+ }
343
+ },
344
+ "groups": {
345
+ "blimp": {
346
+ "acc,none": 0.8223731343283582,
347
+ "acc_stderr,none": 0.16119006470239358,
348
+ "alias": "blimp"
349
+ }
350
+ },
351
+ "configs": {
352
+ "blimp_adjunct_island": {
353
+ "task": "blimp_adjunct_island",
354
+ "group": "blimp",
355
+ "dataset_path": "blimp",
356
+ "dataset_name": "adjunct_island",
357
+ "validation_split": "train",
358
+ "doc_to_text": "",
359
+ "doc_to_target": 0,
360
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
361
+ "description": "",
362
+ "target_delimiter": " ",
363
+ "fewshot_delimiter": "\n\n",
364
+ "num_fewshot": 0,
365
+ "metric_list": [
366
+ {
367
+ "metric": "acc"
368
+ }
369
+ ],
370
+ "output_type": "multiple_choice",
371
+ "repeats": 1,
372
+ "should_decontaminate": true,
373
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
374
+ "metadata": {
375
+ "version": 1.0
376
+ }
377
+ },
378
+ "blimp_anaphor_gender_agreement": {
379
+ "task": "blimp_anaphor_gender_agreement",
380
+ "group": "blimp",
381
+ "dataset_path": "blimp",
382
+ "dataset_name": "anaphor_gender_agreement",
383
+ "validation_split": "train",
384
+ "doc_to_text": "",
385
+ "doc_to_target": 0,
386
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
387
+ "description": "",
388
+ "target_delimiter": " ",
389
+ "fewshot_delimiter": "\n\n",
390
+ "num_fewshot": 0,
391
+ "metric_list": [
392
+ {
393
+ "metric": "acc"
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": true,
399
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
400
+ "metadata": {
401
+ "version": 1.0
402
+ }
403
+ },
404
+ "blimp_anaphor_number_agreement": {
405
+ "task": "blimp_anaphor_number_agreement",
406
+ "group": "blimp",
407
+ "dataset_path": "blimp",
408
+ "dataset_name": "anaphor_number_agreement",
409
+ "validation_split": "train",
410
+ "doc_to_text": "",
411
+ "doc_to_target": 0,
412
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
413
+ "description": "",
414
+ "target_delimiter": " ",
415
+ "fewshot_delimiter": "\n\n",
416
+ "num_fewshot": 0,
417
+ "metric_list": [
418
+ {
419
+ "metric": "acc"
420
+ }
421
+ ],
422
+ "output_type": "multiple_choice",
423
+ "repeats": 1,
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
426
+ "metadata": {
427
+ "version": 1.0
428
+ }
429
+ },
430
+ "blimp_animate_subject_passive": {
431
+ "task": "blimp_animate_subject_passive",
432
+ "group": "blimp",
433
+ "dataset_path": "blimp",
434
+ "dataset_name": "animate_subject_passive",
435
+ "validation_split": "train",
436
+ "doc_to_text": "",
437
+ "doc_to_target": 0,
438
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
439
+ "description": "",
440
+ "target_delimiter": " ",
441
+ "fewshot_delimiter": "\n\n",
442
+ "num_fewshot": 0,
443
+ "metric_list": [
444
+ {
445
+ "metric": "acc"
446
+ }
447
+ ],
448
+ "output_type": "multiple_choice",
449
+ "repeats": 1,
450
+ "should_decontaminate": true,
451
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
452
+ "metadata": {
453
+ "version": 1.0
454
+ }
455
+ },
456
+ "blimp_animate_subject_trans": {
457
+ "task": "blimp_animate_subject_trans",
458
+ "group": "blimp",
459
+ "dataset_path": "blimp",
460
+ "dataset_name": "animate_subject_trans",
461
+ "validation_split": "train",
462
+ "doc_to_text": "",
463
+ "doc_to_target": 0,
464
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
465
+ "description": "",
466
+ "target_delimiter": " ",
467
+ "fewshot_delimiter": "\n\n",
468
+ "num_fewshot": 0,
469
+ "metric_list": [
470
+ {
471
+ "metric": "acc"
472
+ }
473
+ ],
474
+ "output_type": "multiple_choice",
475
+ "repeats": 1,
476
+ "should_decontaminate": true,
477
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
478
+ "metadata": {
479
+ "version": 1.0
480
+ }
481
+ },
482
+ "blimp_causative": {
483
+ "task": "blimp_causative",
484
+ "group": "blimp",
485
+ "dataset_path": "blimp",
486
+ "dataset_name": "causative",
487
+ "validation_split": "train",
488
+ "doc_to_text": "",
489
+ "doc_to_target": 0,
490
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
491
+ "description": "",
492
+ "target_delimiter": " ",
493
+ "fewshot_delimiter": "\n\n",
494
+ "num_fewshot": 0,
495
+ "metric_list": [
496
+ {
497
+ "metric": "acc"
498
+ }
499
+ ],
500
+ "output_type": "multiple_choice",
501
+ "repeats": 1,
502
+ "should_decontaminate": true,
503
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
504
+ "metadata": {
505
+ "version": 1.0
506
+ }
507
+ },
508
+ "blimp_complex_NP_island": {
509
+ "task": "blimp_complex_NP_island",
510
+ "group": "blimp",
511
+ "dataset_path": "blimp",
512
+ "dataset_name": "complex_NP_island",
513
+ "validation_split": "train",
514
+ "doc_to_text": "",
515
+ "doc_to_target": 0,
516
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
517
+ "description": "",
518
+ "target_delimiter": " ",
519
+ "fewshot_delimiter": "\n\n",
520
+ "num_fewshot": 0,
521
+ "metric_list": [
522
+ {
523
+ "metric": "acc"
524
+ }
525
+ ],
526
+ "output_type": "multiple_choice",
527
+ "repeats": 1,
528
+ "should_decontaminate": true,
529
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
530
+ "metadata": {
531
+ "version": 1.0
532
+ }
533
+ },
534
+ "blimp_coordinate_structure_constraint_complex_left_branch": {
535
+ "task": "blimp_coordinate_structure_constraint_complex_left_branch",
536
+ "group": "blimp",
537
+ "dataset_path": "blimp",
538
+ "dataset_name": "coordinate_structure_constraint_complex_left_branch",
539
+ "validation_split": "train",
540
+ "doc_to_text": "",
541
+ "doc_to_target": 0,
542
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
543
+ "description": "",
544
+ "target_delimiter": " ",
545
+ "fewshot_delimiter": "\n\n",
546
+ "num_fewshot": 0,
547
+ "metric_list": [
548
+ {
549
+ "metric": "acc"
550
+ }
551
+ ],
552
+ "output_type": "multiple_choice",
553
+ "repeats": 1,
554
+ "should_decontaminate": true,
555
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
556
+ "metadata": {
557
+ "version": 1.0
558
+ }
559
+ },
560
+ "blimp_coordinate_structure_constraint_object_extraction": {
561
+ "task": "blimp_coordinate_structure_constraint_object_extraction",
562
+ "group": "blimp",
563
+ "dataset_path": "blimp",
564
+ "dataset_name": "coordinate_structure_constraint_object_extraction",
565
+ "validation_split": "train",
566
+ "doc_to_text": "",
567
+ "doc_to_target": 0,
568
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
569
+ "description": "",
570
+ "target_delimiter": " ",
571
+ "fewshot_delimiter": "\n\n",
572
+ "num_fewshot": 0,
573
+ "metric_list": [
574
+ {
575
+ "metric": "acc"
576
+ }
577
+ ],
578
+ "output_type": "multiple_choice",
579
+ "repeats": 1,
580
+ "should_decontaminate": true,
581
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
582
+ "metadata": {
583
+ "version": 1.0
584
+ }
585
+ },
586
+ "blimp_determiner_noun_agreement_1": {
587
+ "task": "blimp_determiner_noun_agreement_1",
588
+ "group": "blimp",
589
+ "dataset_path": "blimp",
590
+ "dataset_name": "determiner_noun_agreement_1",
591
+ "validation_split": "train",
592
+ "doc_to_text": "",
593
+ "doc_to_target": 0,
594
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
595
+ "description": "",
596
+ "target_delimiter": " ",
597
+ "fewshot_delimiter": "\n\n",
598
+ "num_fewshot": 0,
599
+ "metric_list": [
600
+ {
601
+ "metric": "acc"
602
+ }
603
+ ],
604
+ "output_type": "multiple_choice",
605
+ "repeats": 1,
606
+ "should_decontaminate": true,
607
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
608
+ "metadata": {
609
+ "version": 1.0
610
+ }
611
+ },
612
+ "blimp_determiner_noun_agreement_2": {
613
+ "task": "blimp_determiner_noun_agreement_2",
614
+ "group": "blimp",
615
+ "dataset_path": "blimp",
616
+ "dataset_name": "determiner_noun_agreement_2",
617
+ "validation_split": "train",
618
+ "doc_to_text": "",
619
+ "doc_to_target": 0,
620
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
621
+ "description": "",
622
+ "target_delimiter": " ",
623
+ "fewshot_delimiter": "\n\n",
624
+ "num_fewshot": 0,
625
+ "metric_list": [
626
+ {
627
+ "metric": "acc"
628
+ }
629
+ ],
630
+ "output_type": "multiple_choice",
631
+ "repeats": 1,
632
+ "should_decontaminate": true,
633
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
634
+ "metadata": {
635
+ "version": 1.0
636
+ }
637
+ },
638
+ "blimp_determiner_noun_agreement_irregular_1": {
639
+ "task": "blimp_determiner_noun_agreement_irregular_1",
640
+ "group": "blimp",
641
+ "dataset_path": "blimp",
642
+ "dataset_name": "determiner_noun_agreement_irregular_1",
643
+ "validation_split": "train",
644
+ "doc_to_text": "",
645
+ "doc_to_target": 0,
646
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
647
+ "description": "",
648
+ "target_delimiter": " ",
649
+ "fewshot_delimiter": "\n\n",
650
+ "num_fewshot": 0,
651
+ "metric_list": [
652
+ {
653
+ "metric": "acc"
654
+ }
655
+ ],
656
+ "output_type": "multiple_choice",
657
+ "repeats": 1,
658
+ "should_decontaminate": true,
659
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
660
+ "metadata": {
661
+ "version": 1.0
662
+ }
663
+ },
664
+ "blimp_determiner_noun_agreement_irregular_2": {
665
+ "task": "blimp_determiner_noun_agreement_irregular_2",
666
+ "group": "blimp",
667
+ "dataset_path": "blimp",
668
+ "dataset_name": "determiner_noun_agreement_irregular_2",
669
+ "validation_split": "train",
670
+ "doc_to_text": "",
671
+ "doc_to_target": 0,
672
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
673
+ "description": "",
674
+ "target_delimiter": " ",
675
+ "fewshot_delimiter": "\n\n",
676
+ "num_fewshot": 0,
677
+ "metric_list": [
678
+ {
679
+ "metric": "acc"
680
+ }
681
+ ],
682
+ "output_type": "multiple_choice",
683
+ "repeats": 1,
684
+ "should_decontaminate": true,
685
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
686
+ "metadata": {
687
+ "version": 1.0
688
+ }
689
+ },
690
+ "blimp_determiner_noun_agreement_with_adj_2": {
691
+ "task": "blimp_determiner_noun_agreement_with_adj_2",
692
+ "group": "blimp",
693
+ "dataset_path": "blimp",
694
+ "dataset_name": "determiner_noun_agreement_with_adj_2",
695
+ "validation_split": "train",
696
+ "doc_to_text": "",
697
+ "doc_to_target": 0,
698
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
699
+ "description": "",
700
+ "target_delimiter": " ",
701
+ "fewshot_delimiter": "\n\n",
702
+ "num_fewshot": 0,
703
+ "metric_list": [
704
+ {
705
+ "metric": "acc"
706
+ }
707
+ ],
708
+ "output_type": "multiple_choice",
709
+ "repeats": 1,
710
+ "should_decontaminate": true,
711
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
712
+ "metadata": {
713
+ "version": 1.0
714
+ }
715
+ },
716
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": {
717
+ "task": "blimp_determiner_noun_agreement_with_adj_irregular_1",
718
+ "group": "blimp",
719
+ "dataset_path": "blimp",
720
+ "dataset_name": "determiner_noun_agreement_with_adj_irregular_1",
721
+ "validation_split": "train",
722
+ "doc_to_text": "",
723
+ "doc_to_target": 0,
724
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
725
+ "description": "",
726
+ "target_delimiter": " ",
727
+ "fewshot_delimiter": "\n\n",
728
+ "num_fewshot": 0,
729
+ "metric_list": [
730
+ {
731
+ "metric": "acc"
732
+ }
733
+ ],
734
+ "output_type": "multiple_choice",
735
+ "repeats": 1,
736
+ "should_decontaminate": true,
737
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
738
+ "metadata": {
739
+ "version": 1.0
740
+ }
741
+ },
742
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": {
743
+ "task": "blimp_determiner_noun_agreement_with_adj_irregular_2",
744
+ "group": "blimp",
745
+ "dataset_path": "blimp",
746
+ "dataset_name": "determiner_noun_agreement_with_adj_irregular_2",
747
+ "validation_split": "train",
748
+ "doc_to_text": "",
749
+ "doc_to_target": 0,
750
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
751
+ "description": "",
752
+ "target_delimiter": " ",
753
+ "fewshot_delimiter": "\n\n",
754
+ "num_fewshot": 0,
755
+ "metric_list": [
756
+ {
757
+ "metric": "acc"
758
+ }
759
+ ],
760
+ "output_type": "multiple_choice",
761
+ "repeats": 1,
762
+ "should_decontaminate": true,
763
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
764
+ "metadata": {
765
+ "version": 1.0
766
+ }
767
+ },
768
+ "blimp_determiner_noun_agreement_with_adjective_1": {
769
+ "task": "blimp_determiner_noun_agreement_with_adjective_1",
770
+ "group": "blimp",
771
+ "dataset_path": "blimp",
772
+ "dataset_name": "determiner_noun_agreement_with_adjective_1",
773
+ "validation_split": "train",
774
+ "doc_to_text": "",
775
+ "doc_to_target": 0,
776
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
777
+ "description": "",
778
+ "target_delimiter": " ",
779
+ "fewshot_delimiter": "\n\n",
780
+ "num_fewshot": 0,
781
+ "metric_list": [
782
+ {
783
+ "metric": "acc"
784
+ }
785
+ ],
786
+ "output_type": "multiple_choice",
787
+ "repeats": 1,
788
+ "should_decontaminate": true,
789
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
790
+ "metadata": {
791
+ "version": 1.0
792
+ }
793
+ },
794
+ "blimp_distractor_agreement_relational_noun": {
795
+ "task": "blimp_distractor_agreement_relational_noun",
796
+ "group": "blimp",
797
+ "dataset_path": "blimp",
798
+ "dataset_name": "distractor_agreement_relational_noun",
799
+ "validation_split": "train",
800
+ "doc_to_text": "",
801
+ "doc_to_target": 0,
802
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
803
+ "description": "",
804
+ "target_delimiter": " ",
805
+ "fewshot_delimiter": "\n\n",
806
+ "num_fewshot": 0,
807
+ "metric_list": [
808
+ {
809
+ "metric": "acc"
810
+ }
811
+ ],
812
+ "output_type": "multiple_choice",
813
+ "repeats": 1,
814
+ "should_decontaminate": true,
815
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
816
+ "metadata": {
817
+ "version": 1.0
818
+ }
819
+ },
820
+ "blimp_distractor_agreement_relative_clause": {
821
+ "task": "blimp_distractor_agreement_relative_clause",
822
+ "group": "blimp",
823
+ "dataset_path": "blimp",
824
+ "dataset_name": "distractor_agreement_relative_clause",
825
+ "validation_split": "train",
826
+ "doc_to_text": "",
827
+ "doc_to_target": 0,
828
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
829
+ "description": "",
830
+ "target_delimiter": " ",
831
+ "fewshot_delimiter": "\n\n",
832
+ "num_fewshot": 0,
833
+ "metric_list": [
834
+ {
835
+ "metric": "acc"
836
+ }
837
+ ],
838
+ "output_type": "multiple_choice",
839
+ "repeats": 1,
840
+ "should_decontaminate": true,
841
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
842
+ "metadata": {
843
+ "version": 1.0
844
+ }
845
+ },
846
+ "blimp_drop_argument": {
847
+ "task": "blimp_drop_argument",
848
+ "group": "blimp",
849
+ "dataset_path": "blimp",
850
+ "dataset_name": "drop_argument",
851
+ "validation_split": "train",
852
+ "doc_to_text": "",
853
+ "doc_to_target": 0,
854
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
855
+ "description": "",
856
+ "target_delimiter": " ",
857
+ "fewshot_delimiter": "\n\n",
858
+ "num_fewshot": 0,
859
+ "metric_list": [
860
+ {
861
+ "metric": "acc"
862
+ }
863
+ ],
864
+ "output_type": "multiple_choice",
865
+ "repeats": 1,
866
+ "should_decontaminate": true,
867
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
868
+ "metadata": {
869
+ "version": 1.0
870
+ }
871
+ },
872
+ "blimp_ellipsis_n_bar_1": {
873
+ "task": "blimp_ellipsis_n_bar_1",
874
+ "group": "blimp",
875
+ "dataset_path": "blimp",
876
+ "dataset_name": "ellipsis_n_bar_1",
877
+ "validation_split": "train",
878
+ "doc_to_text": "",
879
+ "doc_to_target": 0,
880
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
881
+ "description": "",
882
+ "target_delimiter": " ",
883
+ "fewshot_delimiter": "\n\n",
884
+ "num_fewshot": 0,
885
+ "metric_list": [
886
+ {
887
+ "metric": "acc"
888
+ }
889
+ ],
890
+ "output_type": "multiple_choice",
891
+ "repeats": 1,
892
+ "should_decontaminate": true,
893
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
894
+ "metadata": {
895
+ "version": 1.0
896
+ }
897
+ },
898
+ "blimp_ellipsis_n_bar_2": {
899
+ "task": "blimp_ellipsis_n_bar_2",
900
+ "group": "blimp",
901
+ "dataset_path": "blimp",
902
+ "dataset_name": "ellipsis_n_bar_2",
903
+ "validation_split": "train",
904
+ "doc_to_text": "",
905
+ "doc_to_target": 0,
906
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
907
+ "description": "",
908
+ "target_delimiter": " ",
909
+ "fewshot_delimiter": "\n\n",
910
+ "num_fewshot": 0,
911
+ "metric_list": [
912
+ {
913
+ "metric": "acc"
914
+ }
915
+ ],
916
+ "output_type": "multiple_choice",
917
+ "repeats": 1,
918
+ "should_decontaminate": true,
919
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
920
+ "metadata": {
921
+ "version": 1.0
922
+ }
923
+ },
924
+ "blimp_existential_there_object_raising": {
925
+ "task": "blimp_existential_there_object_raising",
926
+ "group": "blimp",
927
+ "dataset_path": "blimp",
928
+ "dataset_name": "existential_there_object_raising",
929
+ "validation_split": "train",
930
+ "doc_to_text": "",
931
+ "doc_to_target": 0,
932
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
933
+ "description": "",
934
+ "target_delimiter": " ",
935
+ "fewshot_delimiter": "\n\n",
936
+ "num_fewshot": 0,
937
+ "metric_list": [
938
+ {
939
+ "metric": "acc"
940
+ }
941
+ ],
942
+ "output_type": "multiple_choice",
943
+ "repeats": 1,
944
+ "should_decontaminate": true,
945
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
946
+ "metadata": {
947
+ "version": 1.0
948
+ }
949
+ },
950
+ "blimp_existential_there_quantifiers_1": {
951
+ "task": "blimp_existential_there_quantifiers_1",
952
+ "group": "blimp",
953
+ "dataset_path": "blimp",
954
+ "dataset_name": "existential_there_quantifiers_1",
955
+ "validation_split": "train",
956
+ "doc_to_text": "",
957
+ "doc_to_target": 0,
958
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
959
+ "description": "",
960
+ "target_delimiter": " ",
961
+ "fewshot_delimiter": "\n\n",
962
+ "num_fewshot": 0,
963
+ "metric_list": [
964
+ {
965
+ "metric": "acc"
966
+ }
967
+ ],
968
+ "output_type": "multiple_choice",
969
+ "repeats": 1,
970
+ "should_decontaminate": true,
971
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
972
+ "metadata": {
973
+ "version": 1.0
974
+ }
975
+ },
976
+ "blimp_existential_there_quantifiers_2": {
977
+ "task": "blimp_existential_there_quantifiers_2",
978
+ "group": "blimp",
979
+ "dataset_path": "blimp",
980
+ "dataset_name": "existential_there_quantifiers_2",
981
+ "validation_split": "train",
982
+ "doc_to_text": "",
983
+ "doc_to_target": 0,
984
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
985
+ "description": "",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "num_fewshot": 0,
989
+ "metric_list": [
990
+ {
991
+ "metric": "acc"
992
+ }
993
+ ],
994
+ "output_type": "multiple_choice",
995
+ "repeats": 1,
996
+ "should_decontaminate": true,
997
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
998
+ "metadata": {
999
+ "version": 1.0
1000
+ }
1001
+ },
1002
+ "blimp_existential_there_subject_raising": {
1003
+ "task": "blimp_existential_there_subject_raising",
1004
+ "group": "blimp",
1005
+ "dataset_path": "blimp",
1006
+ "dataset_name": "existential_there_subject_raising",
1007
+ "validation_split": "train",
1008
+ "doc_to_text": "",
1009
+ "doc_to_target": 0,
1010
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1011
+ "description": "",
1012
+ "target_delimiter": " ",
1013
+ "fewshot_delimiter": "\n\n",
1014
+ "num_fewshot": 0,
1015
+ "metric_list": [
1016
+ {
1017
+ "metric": "acc"
1018
+ }
1019
+ ],
1020
+ "output_type": "multiple_choice",
1021
+ "repeats": 1,
1022
+ "should_decontaminate": true,
1023
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1024
+ "metadata": {
1025
+ "version": 1.0
1026
+ }
1027
+ },
1028
+ "blimp_expletive_it_object_raising": {
1029
+ "task": "blimp_expletive_it_object_raising",
1030
+ "group": "blimp",
1031
+ "dataset_path": "blimp",
1032
+ "dataset_name": "expletive_it_object_raising",
1033
+ "validation_split": "train",
1034
+ "doc_to_text": "",
1035
+ "doc_to_target": 0,
1036
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1037
+ "description": "",
1038
+ "target_delimiter": " ",
1039
+ "fewshot_delimiter": "\n\n",
1040
+ "num_fewshot": 0,
1041
+ "metric_list": [
1042
+ {
1043
+ "metric": "acc"
1044
+ }
1045
+ ],
1046
+ "output_type": "multiple_choice",
1047
+ "repeats": 1,
1048
+ "should_decontaminate": true,
1049
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1050
+ "metadata": {
1051
+ "version": 1.0
1052
+ }
1053
+ },
1054
+ "blimp_inchoative": {
1055
+ "task": "blimp_inchoative",
1056
+ "group": "blimp",
1057
+ "dataset_path": "blimp",
1058
+ "dataset_name": "inchoative",
1059
+ "validation_split": "train",
1060
+ "doc_to_text": "",
1061
+ "doc_to_target": 0,
1062
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1063
+ "description": "",
1064
+ "target_delimiter": " ",
1065
+ "fewshot_delimiter": "\n\n",
1066
+ "num_fewshot": 0,
1067
+ "metric_list": [
1068
+ {
1069
+ "metric": "acc"
1070
+ }
1071
+ ],
1072
+ "output_type": "multiple_choice",
1073
+ "repeats": 1,
1074
+ "should_decontaminate": true,
1075
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1076
+ "metadata": {
1077
+ "version": 1.0
1078
+ }
1079
+ },
1080
+ "blimp_intransitive": {
1081
+ "task": "blimp_intransitive",
1082
+ "group": "blimp",
1083
+ "dataset_path": "blimp",
1084
+ "dataset_name": "intransitive",
1085
+ "validation_split": "train",
1086
+ "doc_to_text": "",
1087
+ "doc_to_target": 0,
1088
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1089
+ "description": "",
1090
+ "target_delimiter": " ",
1091
+ "fewshot_delimiter": "\n\n",
1092
+ "num_fewshot": 0,
1093
+ "metric_list": [
1094
+ {
1095
+ "metric": "acc"
1096
+ }
1097
+ ],
1098
+ "output_type": "multiple_choice",
1099
+ "repeats": 1,
1100
+ "should_decontaminate": true,
1101
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1102
+ "metadata": {
1103
+ "version": 1.0
1104
+ }
1105
+ },
1106
+ "blimp_irregular_past_participle_adjectives": {
1107
+ "task": "blimp_irregular_past_participle_adjectives",
1108
+ "group": "blimp",
1109
+ "dataset_path": "blimp",
1110
+ "dataset_name": "irregular_past_participle_adjectives",
1111
+ "validation_split": "train",
1112
+ "doc_to_text": "",
1113
+ "doc_to_target": 0,
1114
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1115
+ "description": "",
1116
+ "target_delimiter": " ",
1117
+ "fewshot_delimiter": "\n\n",
1118
+ "num_fewshot": 0,
1119
+ "metric_list": [
1120
+ {
1121
+ "metric": "acc"
1122
+ }
1123
+ ],
1124
+ "output_type": "multiple_choice",
1125
+ "repeats": 1,
1126
+ "should_decontaminate": true,
1127
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1128
+ "metadata": {
1129
+ "version": 1.0
1130
+ }
1131
+ },
1132
+ "blimp_irregular_past_participle_verbs": {
1133
+ "task": "blimp_irregular_past_participle_verbs",
1134
+ "group": "blimp",
1135
+ "dataset_path": "blimp",
1136
+ "dataset_name": "irregular_past_participle_verbs",
1137
+ "validation_split": "train",
1138
+ "doc_to_text": "",
1139
+ "doc_to_target": 0,
1140
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1141
+ "description": "",
1142
+ "target_delimiter": " ",
1143
+ "fewshot_delimiter": "\n\n",
1144
+ "num_fewshot": 0,
1145
+ "metric_list": [
1146
+ {
1147
+ "metric": "acc"
1148
+ }
1149
+ ],
1150
+ "output_type": "multiple_choice",
1151
+ "repeats": 1,
1152
+ "should_decontaminate": true,
1153
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1154
+ "metadata": {
1155
+ "version": 1.0
1156
+ }
1157
+ },
1158
+ "blimp_irregular_plural_subject_verb_agreement_1": {
1159
+ "task": "blimp_irregular_plural_subject_verb_agreement_1",
1160
+ "group": "blimp",
1161
+ "dataset_path": "blimp",
1162
+ "dataset_name": "irregular_plural_subject_verb_agreement_1",
1163
+ "validation_split": "train",
1164
+ "doc_to_text": "",
1165
+ "doc_to_target": 0,
1166
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1167
+ "description": "",
1168
+ "target_delimiter": " ",
1169
+ "fewshot_delimiter": "\n\n",
1170
+ "num_fewshot": 0,
1171
+ "metric_list": [
1172
+ {
1173
+ "metric": "acc"
1174
+ }
1175
+ ],
1176
+ "output_type": "multiple_choice",
1177
+ "repeats": 1,
1178
+ "should_decontaminate": true,
1179
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1180
+ "metadata": {
1181
+ "version": 1.0
1182
+ }
1183
+ },
1184
+ "blimp_irregular_plural_subject_verb_agreement_2": {
1185
+ "task": "blimp_irregular_plural_subject_verb_agreement_2",
1186
+ "group": "blimp",
1187
+ "dataset_path": "blimp",
1188
+ "dataset_name": "irregular_plural_subject_verb_agreement_2",
1189
+ "validation_split": "train",
1190
+ "doc_to_text": "",
1191
+ "doc_to_target": 0,
1192
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1193
+ "description": "",
1194
+ "target_delimiter": " ",
1195
+ "fewshot_delimiter": "\n\n",
1196
+ "num_fewshot": 0,
1197
+ "metric_list": [
1198
+ {
1199
+ "metric": "acc"
1200
+ }
1201
+ ],
1202
+ "output_type": "multiple_choice",
1203
+ "repeats": 1,
1204
+ "should_decontaminate": true,
1205
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1206
+ "metadata": {
1207
+ "version": 1.0
1208
+ }
1209
+ },
1210
+ "blimp_left_branch_island_echo_question": {
1211
+ "task": "blimp_left_branch_island_echo_question",
1212
+ "group": "blimp",
1213
+ "dataset_path": "blimp",
1214
+ "dataset_name": "left_branch_island_echo_question",
1215
+ "validation_split": "train",
1216
+ "doc_to_text": "",
1217
+ "doc_to_target": 0,
1218
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1219
+ "description": "",
1220
+ "target_delimiter": " ",
1221
+ "fewshot_delimiter": "\n\n",
1222
+ "num_fewshot": 0,
1223
+ "metric_list": [
1224
+ {
1225
+ "metric": "acc"
1226
+ }
1227
+ ],
1228
+ "output_type": "multiple_choice",
1229
+ "repeats": 1,
1230
+ "should_decontaminate": true,
1231
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1232
+ "metadata": {
1233
+ "version": 1.0
1234
+ }
1235
+ },
1236
+ "blimp_left_branch_island_simple_question": {
1237
+ "task": "blimp_left_branch_island_simple_question",
1238
+ "group": "blimp",
1239
+ "dataset_path": "blimp",
1240
+ "dataset_name": "left_branch_island_simple_question",
1241
+ "validation_split": "train",
1242
+ "doc_to_text": "",
1243
+ "doc_to_target": 0,
1244
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1245
+ "description": "",
1246
+ "target_delimiter": " ",
1247
+ "fewshot_delimiter": "\n\n",
1248
+ "num_fewshot": 0,
1249
+ "metric_list": [
1250
+ {
1251
+ "metric": "acc"
1252
+ }
1253
+ ],
1254
+ "output_type": "multiple_choice",
1255
+ "repeats": 1,
1256
+ "should_decontaminate": true,
1257
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1258
+ "metadata": {
1259
+ "version": 1.0
1260
+ }
1261
+ },
1262
+ "blimp_matrix_question_npi_licensor_present": {
1263
+ "task": "blimp_matrix_question_npi_licensor_present",
1264
+ "group": "blimp",
1265
+ "dataset_path": "blimp",
1266
+ "dataset_name": "matrix_question_npi_licensor_present",
1267
+ "validation_split": "train",
1268
+ "doc_to_text": "",
1269
+ "doc_to_target": 0,
1270
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1271
+ "description": "",
1272
+ "target_delimiter": " ",
1273
+ "fewshot_delimiter": "\n\n",
1274
+ "num_fewshot": 0,
1275
+ "metric_list": [
1276
+ {
1277
+ "metric": "acc"
1278
+ }
1279
+ ],
1280
+ "output_type": "multiple_choice",
1281
+ "repeats": 1,
1282
+ "should_decontaminate": true,
1283
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1284
+ "metadata": {
1285
+ "version": 1.0
1286
+ }
1287
+ },
1288
+ "blimp_npi_present_1": {
1289
+ "task": "blimp_npi_present_1",
1290
+ "group": "blimp",
1291
+ "dataset_path": "blimp",
1292
+ "dataset_name": "npi_present_1",
1293
+ "validation_split": "train",
1294
+ "doc_to_text": "",
1295
+ "doc_to_target": 0,
1296
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1297
+ "description": "",
1298
+ "target_delimiter": " ",
1299
+ "fewshot_delimiter": "\n\n",
1300
+ "num_fewshot": 0,
1301
+ "metric_list": [
1302
+ {
1303
+ "metric": "acc"
1304
+ }
1305
+ ],
1306
+ "output_type": "multiple_choice",
1307
+ "repeats": 1,
1308
+ "should_decontaminate": true,
1309
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1310
+ "metadata": {
1311
+ "version": 1.0
1312
+ }
1313
+ },
1314
+ "blimp_npi_present_2": {
1315
+ "task": "blimp_npi_present_2",
1316
+ "group": "blimp",
1317
+ "dataset_path": "blimp",
1318
+ "dataset_name": "npi_present_2",
1319
+ "validation_split": "train",
1320
+ "doc_to_text": "",
1321
+ "doc_to_target": 0,
1322
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1323
+ "description": "",
1324
+ "target_delimiter": " ",
1325
+ "fewshot_delimiter": "\n\n",
1326
+ "num_fewshot": 0,
1327
+ "metric_list": [
1328
+ {
1329
+ "metric": "acc"
1330
+ }
1331
+ ],
1332
+ "output_type": "multiple_choice",
1333
+ "repeats": 1,
1334
+ "should_decontaminate": true,
1335
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1336
+ "metadata": {
1337
+ "version": 1.0
1338
+ }
1339
+ },
1340
+ "blimp_only_npi_licensor_present": {
1341
+ "task": "blimp_only_npi_licensor_present",
1342
+ "group": "blimp",
1343
+ "dataset_path": "blimp",
1344
+ "dataset_name": "only_npi_licensor_present",
1345
+ "validation_split": "train",
1346
+ "doc_to_text": "",
1347
+ "doc_to_target": 0,
1348
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1349
+ "description": "",
1350
+ "target_delimiter": " ",
1351
+ "fewshot_delimiter": "\n\n",
1352
+ "num_fewshot": 0,
1353
+ "metric_list": [
1354
+ {
1355
+ "metric": "acc"
1356
+ }
1357
+ ],
1358
+ "output_type": "multiple_choice",
1359
+ "repeats": 1,
1360
+ "should_decontaminate": true,
1361
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1362
+ "metadata": {
1363
+ "version": 1.0
1364
+ }
1365
+ },
1366
+ "blimp_only_npi_scope": {
1367
+ "task": "blimp_only_npi_scope",
1368
+ "group": "blimp",
1369
+ "dataset_path": "blimp",
1370
+ "dataset_name": "only_npi_scope",
1371
+ "validation_split": "train",
1372
+ "doc_to_text": "",
1373
+ "doc_to_target": 0,
1374
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1375
+ "description": "",
1376
+ "target_delimiter": " ",
1377
+ "fewshot_delimiter": "\n\n",
1378
+ "num_fewshot": 0,
1379
+ "metric_list": [
1380
+ {
1381
+ "metric": "acc"
1382
+ }
1383
+ ],
1384
+ "output_type": "multiple_choice",
1385
+ "repeats": 1,
1386
+ "should_decontaminate": true,
1387
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1388
+ "metadata": {
1389
+ "version": 1.0
1390
+ }
1391
+ },
1392
+ "blimp_passive_1": {
1393
+ "task": "blimp_passive_1",
1394
+ "group": "blimp",
1395
+ "dataset_path": "blimp",
1396
+ "dataset_name": "passive_1",
1397
+ "validation_split": "train",
1398
+ "doc_to_text": "",
1399
+ "doc_to_target": 0,
1400
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1401
+ "description": "",
1402
+ "target_delimiter": " ",
1403
+ "fewshot_delimiter": "\n\n",
1404
+ "num_fewshot": 0,
1405
+ "metric_list": [
1406
+ {
1407
+ "metric": "acc"
1408
+ }
1409
+ ],
1410
+ "output_type": "multiple_choice",
1411
+ "repeats": 1,
1412
+ "should_decontaminate": true,
1413
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1414
+ "metadata": {
1415
+ "version": 1.0
1416
+ }
1417
+ },
1418
+ "blimp_passive_2": {
1419
+ "task": "blimp_passive_2",
1420
+ "group": "blimp",
1421
+ "dataset_path": "blimp",
1422
+ "dataset_name": "passive_2",
1423
+ "validation_split": "train",
1424
+ "doc_to_text": "",
1425
+ "doc_to_target": 0,
1426
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1427
+ "description": "",
1428
+ "target_delimiter": " ",
1429
+ "fewshot_delimiter": "\n\n",
1430
+ "num_fewshot": 0,
1431
+ "metric_list": [
1432
+ {
1433
+ "metric": "acc"
1434
+ }
1435
+ ],
1436
+ "output_type": "multiple_choice",
1437
+ "repeats": 1,
1438
+ "should_decontaminate": true,
1439
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1440
+ "metadata": {
1441
+ "version": 1.0
1442
+ }
1443
+ },
1444
+ "blimp_principle_A_c_command": {
1445
+ "task": "blimp_principle_A_c_command",
1446
+ "group": "blimp",
1447
+ "dataset_path": "blimp",
1448
+ "dataset_name": "principle_A_c_command",
1449
+ "validation_split": "train",
1450
+ "doc_to_text": "",
1451
+ "doc_to_target": 0,
1452
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1453
+ "description": "",
1454
+ "target_delimiter": " ",
1455
+ "fewshot_delimiter": "\n\n",
1456
+ "num_fewshot": 0,
1457
+ "metric_list": [
1458
+ {
1459
+ "metric": "acc"
1460
+ }
1461
+ ],
1462
+ "output_type": "multiple_choice",
1463
+ "repeats": 1,
1464
+ "should_decontaminate": true,
1465
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1466
+ "metadata": {
1467
+ "version": 1.0
1468
+ }
1469
+ },
1470
+ "blimp_principle_A_case_1": {
1471
+ "task": "blimp_principle_A_case_1",
1472
+ "group": "blimp",
1473
+ "dataset_path": "blimp",
1474
+ "dataset_name": "principle_A_case_1",
1475
+ "validation_split": "train",
1476
+ "doc_to_text": "",
1477
+ "doc_to_target": 0,
1478
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1479
+ "description": "",
1480
+ "target_delimiter": " ",
1481
+ "fewshot_delimiter": "\n\n",
1482
+ "num_fewshot": 0,
1483
+ "metric_list": [
1484
+ {
1485
+ "metric": "acc"
1486
+ }
1487
+ ],
1488
+ "output_type": "multiple_choice",
1489
+ "repeats": 1,
1490
+ "should_decontaminate": true,
1491
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1492
+ "metadata": {
1493
+ "version": 1.0
1494
+ }
1495
+ },
1496
+ "blimp_principle_A_case_2": {
1497
+ "task": "blimp_principle_A_case_2",
1498
+ "group": "blimp",
1499
+ "dataset_path": "blimp",
1500
+ "dataset_name": "principle_A_case_2",
1501
+ "validation_split": "train",
1502
+ "doc_to_text": "",
1503
+ "doc_to_target": 0,
1504
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1505
+ "description": "",
1506
+ "target_delimiter": " ",
1507
+ "fewshot_delimiter": "\n\n",
1508
+ "num_fewshot": 0,
1509
+ "metric_list": [
1510
+ {
1511
+ "metric": "acc"
1512
+ }
1513
+ ],
1514
+ "output_type": "multiple_choice",
1515
+ "repeats": 1,
1516
+ "should_decontaminate": true,
1517
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1518
+ "metadata": {
1519
+ "version": 1.0
1520
+ }
1521
+ },
1522
+ "blimp_principle_A_domain_1": {
1523
+ "task": "blimp_principle_A_domain_1",
1524
+ "group": "blimp",
1525
+ "dataset_path": "blimp",
1526
+ "dataset_name": "principle_A_domain_1",
1527
+ "validation_split": "train",
1528
+ "doc_to_text": "",
1529
+ "doc_to_target": 0,
1530
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1531
+ "description": "",
1532
+ "target_delimiter": " ",
1533
+ "fewshot_delimiter": "\n\n",
1534
+ "num_fewshot": 0,
1535
+ "metric_list": [
1536
+ {
1537
+ "metric": "acc"
1538
+ }
1539
+ ],
1540
+ "output_type": "multiple_choice",
1541
+ "repeats": 1,
1542
+ "should_decontaminate": true,
1543
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1544
+ "metadata": {
1545
+ "version": 1.0
1546
+ }
1547
+ },
1548
+ "blimp_principle_A_domain_2": {
1549
+ "task": "blimp_principle_A_domain_2",
1550
+ "group": "blimp",
1551
+ "dataset_path": "blimp",
1552
+ "dataset_name": "principle_A_domain_2",
1553
+ "validation_split": "train",
1554
+ "doc_to_text": "",
1555
+ "doc_to_target": 0,
1556
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1557
+ "description": "",
1558
+ "target_delimiter": " ",
1559
+ "fewshot_delimiter": "\n\n",
1560
+ "num_fewshot": 0,
1561
+ "metric_list": [
1562
+ {
1563
+ "metric": "acc"
1564
+ }
1565
+ ],
1566
+ "output_type": "multiple_choice",
1567
+ "repeats": 1,
1568
+ "should_decontaminate": true,
1569
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1570
+ "metadata": {
1571
+ "version": 1.0
1572
+ }
1573
+ },
1574
+ "blimp_principle_A_domain_3": {
1575
+ "task": "blimp_principle_A_domain_3",
1576
+ "group": "blimp",
1577
+ "dataset_path": "blimp",
1578
+ "dataset_name": "principle_A_domain_3",
1579
+ "validation_split": "train",
1580
+ "doc_to_text": "",
1581
+ "doc_to_target": 0,
1582
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1583
+ "description": "",
1584
+ "target_delimiter": " ",
1585
+ "fewshot_delimiter": "\n\n",
1586
+ "num_fewshot": 0,
1587
+ "metric_list": [
1588
+ {
1589
+ "metric": "acc"
1590
+ }
1591
+ ],
1592
+ "output_type": "multiple_choice",
1593
+ "repeats": 1,
1594
+ "should_decontaminate": true,
1595
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1596
+ "metadata": {
1597
+ "version": 1.0
1598
+ }
1599
+ },
1600
+ "blimp_principle_A_reconstruction": {
1601
+ "task": "blimp_principle_A_reconstruction",
1602
+ "group": "blimp",
1603
+ "dataset_path": "blimp",
1604
+ "dataset_name": "principle_A_reconstruction",
1605
+ "validation_split": "train",
1606
+ "doc_to_text": "",
1607
+ "doc_to_target": 0,
1608
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1609
+ "description": "",
1610
+ "target_delimiter": " ",
1611
+ "fewshot_delimiter": "\n\n",
1612
+ "num_fewshot": 0,
1613
+ "metric_list": [
1614
+ {
1615
+ "metric": "acc"
1616
+ }
1617
+ ],
1618
+ "output_type": "multiple_choice",
1619
+ "repeats": 1,
1620
+ "should_decontaminate": true,
1621
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1622
+ "metadata": {
1623
+ "version": 1.0
1624
+ }
1625
+ },
1626
+ "blimp_regular_plural_subject_verb_agreement_1": {
1627
+ "task": "blimp_regular_plural_subject_verb_agreement_1",
1628
+ "group": "blimp",
1629
+ "dataset_path": "blimp",
1630
+ "dataset_name": "regular_plural_subject_verb_agreement_1",
1631
+ "validation_split": "train",
1632
+ "doc_to_text": "",
1633
+ "doc_to_target": 0,
1634
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1635
+ "description": "",
1636
+ "target_delimiter": " ",
1637
+ "fewshot_delimiter": "\n\n",
1638
+ "num_fewshot": 0,
1639
+ "metric_list": [
1640
+ {
1641
+ "metric": "acc"
1642
+ }
1643
+ ],
1644
+ "output_type": "multiple_choice",
1645
+ "repeats": 1,
1646
+ "should_decontaminate": true,
1647
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1648
+ "metadata": {
1649
+ "version": 1.0
1650
+ }
1651
+ },
1652
+ "blimp_regular_plural_subject_verb_agreement_2": {
1653
+ "task": "blimp_regular_plural_subject_verb_agreement_2",
1654
+ "group": "blimp",
1655
+ "dataset_path": "blimp",
1656
+ "dataset_name": "regular_plural_subject_verb_agreement_2",
1657
+ "validation_split": "train",
1658
+ "doc_to_text": "",
1659
+ "doc_to_target": 0,
1660
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1661
+ "description": "",
1662
+ "target_delimiter": " ",
1663
+ "fewshot_delimiter": "\n\n",
1664
+ "num_fewshot": 0,
1665
+ "metric_list": [
1666
+ {
1667
+ "metric": "acc"
1668
+ }
1669
+ ],
1670
+ "output_type": "multiple_choice",
1671
+ "repeats": 1,
1672
+ "should_decontaminate": true,
1673
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1674
+ "metadata": {
1675
+ "version": 1.0
1676
+ }
1677
+ },
1678
+ "blimp_sentential_negation_npi_licensor_present": {
1679
+ "task": "blimp_sentential_negation_npi_licensor_present",
1680
+ "group": "blimp",
1681
+ "dataset_path": "blimp",
1682
+ "dataset_name": "sentential_negation_npi_licensor_present",
1683
+ "validation_split": "train",
1684
+ "doc_to_text": "",
1685
+ "doc_to_target": 0,
1686
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1687
+ "description": "",
1688
+ "target_delimiter": " ",
1689
+ "fewshot_delimiter": "\n\n",
1690
+ "num_fewshot": 0,
1691
+ "metric_list": [
1692
+ {
1693
+ "metric": "acc"
1694
+ }
1695
+ ],
1696
+ "output_type": "multiple_choice",
1697
+ "repeats": 1,
1698
+ "should_decontaminate": true,
1699
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1700
+ "metadata": {
1701
+ "version": 1.0
1702
+ }
1703
+ },
1704
+ "blimp_sentential_negation_npi_scope": {
1705
+ "task": "blimp_sentential_negation_npi_scope",
1706
+ "group": "blimp",
1707
+ "dataset_path": "blimp",
1708
+ "dataset_name": "sentential_negation_npi_scope",
1709
+ "validation_split": "train",
1710
+ "doc_to_text": "",
1711
+ "doc_to_target": 0,
1712
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1713
+ "description": "",
1714
+ "target_delimiter": " ",
1715
+ "fewshot_delimiter": "\n\n",
1716
+ "num_fewshot": 0,
1717
+ "metric_list": [
1718
+ {
1719
+ "metric": "acc"
1720
+ }
1721
+ ],
1722
+ "output_type": "multiple_choice",
1723
+ "repeats": 1,
1724
+ "should_decontaminate": true,
1725
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1726
+ "metadata": {
1727
+ "version": 1.0
1728
+ }
1729
+ },
1730
+ "blimp_sentential_subject_island": {
1731
+ "task": "blimp_sentential_subject_island",
1732
+ "group": "blimp",
1733
+ "dataset_path": "blimp",
1734
+ "dataset_name": "sentential_subject_island",
1735
+ "validation_split": "train",
1736
+ "doc_to_text": "",
1737
+ "doc_to_target": 0,
1738
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1739
+ "description": "",
1740
+ "target_delimiter": " ",
1741
+ "fewshot_delimiter": "\n\n",
1742
+ "num_fewshot": 0,
1743
+ "metric_list": [
1744
+ {
1745
+ "metric": "acc"
1746
+ }
1747
+ ],
1748
+ "output_type": "multiple_choice",
1749
+ "repeats": 1,
1750
+ "should_decontaminate": true,
1751
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1752
+ "metadata": {
1753
+ "version": 1.0
1754
+ }
1755
+ },
1756
+ "blimp_superlative_quantifiers_1": {
1757
+ "task": "blimp_superlative_quantifiers_1",
1758
+ "group": "blimp",
1759
+ "dataset_path": "blimp",
1760
+ "dataset_name": "superlative_quantifiers_1",
1761
+ "validation_split": "train",
1762
+ "doc_to_text": "",
1763
+ "doc_to_target": 0,
1764
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1765
+ "description": "",
1766
+ "target_delimiter": " ",
1767
+ "fewshot_delimiter": "\n\n",
1768
+ "num_fewshot": 0,
1769
+ "metric_list": [
1770
+ {
1771
+ "metric": "acc"
1772
+ }
1773
+ ],
1774
+ "output_type": "multiple_choice",
1775
+ "repeats": 1,
1776
+ "should_decontaminate": true,
1777
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1778
+ "metadata": {
1779
+ "version": 1.0
1780
+ }
1781
+ },
1782
+ "blimp_superlative_quantifiers_2": {
1783
+ "task": "blimp_superlative_quantifiers_2",
1784
+ "group": "blimp",
1785
+ "dataset_path": "blimp",
1786
+ "dataset_name": "superlative_quantifiers_2",
1787
+ "validation_split": "train",
1788
+ "doc_to_text": "",
1789
+ "doc_to_target": 0,
1790
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1791
+ "description": "",
1792
+ "target_delimiter": " ",
1793
+ "fewshot_delimiter": "\n\n",
1794
+ "num_fewshot": 0,
1795
+ "metric_list": [
1796
+ {
1797
+ "metric": "acc"
1798
+ }
1799
+ ],
1800
+ "output_type": "multiple_choice",
1801
+ "repeats": 1,
1802
+ "should_decontaminate": true,
1803
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1804
+ "metadata": {
1805
+ "version": 1.0
1806
+ }
1807
+ },
1808
+ "blimp_tough_vs_raising_1": {
1809
+ "task": "blimp_tough_vs_raising_1",
1810
+ "group": "blimp",
1811
+ "dataset_path": "blimp",
1812
+ "dataset_name": "tough_vs_raising_1",
1813
+ "validation_split": "train",
1814
+ "doc_to_text": "",
1815
+ "doc_to_target": 0,
1816
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1817
+ "description": "",
1818
+ "target_delimiter": " ",
1819
+ "fewshot_delimiter": "\n\n",
1820
+ "num_fewshot": 0,
1821
+ "metric_list": [
1822
+ {
1823
+ "metric": "acc"
1824
+ }
1825
+ ],
1826
+ "output_type": "multiple_choice",
1827
+ "repeats": 1,
1828
+ "should_decontaminate": true,
1829
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1830
+ "metadata": {
1831
+ "version": 1.0
1832
+ }
1833
+ },
1834
+ "blimp_tough_vs_raising_2": {
1835
+ "task": "blimp_tough_vs_raising_2",
1836
+ "group": "blimp",
1837
+ "dataset_path": "blimp",
1838
+ "dataset_name": "tough_vs_raising_2",
1839
+ "validation_split": "train",
1840
+ "doc_to_text": "",
1841
+ "doc_to_target": 0,
1842
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1843
+ "description": "",
1844
+ "target_delimiter": " ",
1845
+ "fewshot_delimiter": "\n\n",
1846
+ "num_fewshot": 0,
1847
+ "metric_list": [
1848
+ {
1849
+ "metric": "acc"
1850
+ }
1851
+ ],
1852
+ "output_type": "multiple_choice",
1853
+ "repeats": 1,
1854
+ "should_decontaminate": true,
1855
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1856
+ "metadata": {
1857
+ "version": 1.0
1858
+ }
1859
+ },
1860
+ "blimp_transitive": {
1861
+ "task": "blimp_transitive",
1862
+ "group": "blimp",
1863
+ "dataset_path": "blimp",
1864
+ "dataset_name": "transitive",
1865
+ "validation_split": "train",
1866
+ "doc_to_text": "",
1867
+ "doc_to_target": 0,
1868
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1869
+ "description": "",
1870
+ "target_delimiter": " ",
1871
+ "fewshot_delimiter": "\n\n",
1872
+ "num_fewshot": 0,
1873
+ "metric_list": [
1874
+ {
1875
+ "metric": "acc"
1876
+ }
1877
+ ],
1878
+ "output_type": "multiple_choice",
1879
+ "repeats": 1,
1880
+ "should_decontaminate": true,
1881
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1882
+ "metadata": {
1883
+ "version": 1.0
1884
+ }
1885
+ },
1886
+ "blimp_wh_island": {
1887
+ "task": "blimp_wh_island",
1888
+ "group": "blimp",
1889
+ "dataset_path": "blimp",
1890
+ "dataset_name": "wh_island",
1891
+ "validation_split": "train",
1892
+ "doc_to_text": "",
1893
+ "doc_to_target": 0,
1894
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1895
+ "description": "",
1896
+ "target_delimiter": " ",
1897
+ "fewshot_delimiter": "\n\n",
1898
+ "num_fewshot": 0,
1899
+ "metric_list": [
1900
+ {
1901
+ "metric": "acc"
1902
+ }
1903
+ ],
1904
+ "output_type": "multiple_choice",
1905
+ "repeats": 1,
1906
+ "should_decontaminate": true,
1907
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1908
+ "metadata": {
1909
+ "version": 1.0
1910
+ }
1911
+ },
1912
+ "blimp_wh_questions_object_gap": {
1913
+ "task": "blimp_wh_questions_object_gap",
1914
+ "group": "blimp",
1915
+ "dataset_path": "blimp",
1916
+ "dataset_name": "wh_questions_object_gap",
1917
+ "validation_split": "train",
1918
+ "doc_to_text": "",
1919
+ "doc_to_target": 0,
1920
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1921
+ "description": "",
1922
+ "target_delimiter": " ",
1923
+ "fewshot_delimiter": "\n\n",
1924
+ "num_fewshot": 0,
1925
+ "metric_list": [
1926
+ {
1927
+ "metric": "acc"
1928
+ }
1929
+ ],
1930
+ "output_type": "multiple_choice",
1931
+ "repeats": 1,
1932
+ "should_decontaminate": true,
1933
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1934
+ "metadata": {
1935
+ "version": 1.0
1936
+ }
1937
+ },
1938
+ "blimp_wh_questions_subject_gap": {
1939
+ "task": "blimp_wh_questions_subject_gap",
1940
+ "group": "blimp",
1941
+ "dataset_path": "blimp",
1942
+ "dataset_name": "wh_questions_subject_gap",
1943
+ "validation_split": "train",
1944
+ "doc_to_text": "",
1945
+ "doc_to_target": 0,
1946
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1947
+ "description": "",
1948
+ "target_delimiter": " ",
1949
+ "fewshot_delimiter": "\n\n",
1950
+ "num_fewshot": 0,
1951
+ "metric_list": [
1952
+ {
1953
+ "metric": "acc"
1954
+ }
1955
+ ],
1956
+ "output_type": "multiple_choice",
1957
+ "repeats": 1,
1958
+ "should_decontaminate": true,
1959
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1960
+ "metadata": {
1961
+ "version": 1.0
1962
+ }
1963
+ },
1964
+ "blimp_wh_questions_subject_gap_long_distance": {
1965
+ "task": "blimp_wh_questions_subject_gap_long_distance",
1966
+ "group": "blimp",
1967
+ "dataset_path": "blimp",
1968
+ "dataset_name": "wh_questions_subject_gap_long_distance",
1969
+ "validation_split": "train",
1970
+ "doc_to_text": "",
1971
+ "doc_to_target": 0,
1972
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1973
+ "description": "",
1974
+ "target_delimiter": " ",
1975
+ "fewshot_delimiter": "\n\n",
1976
+ "num_fewshot": 0,
1977
+ "metric_list": [
1978
+ {
1979
+ "metric": "acc"
1980
+ }
1981
+ ],
1982
+ "output_type": "multiple_choice",
1983
+ "repeats": 1,
1984
+ "should_decontaminate": true,
1985
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1986
+ "metadata": {
1987
+ "version": 1.0
1988
+ }
1989
+ },
1990
+ "blimp_wh_vs_that_no_gap": {
1991
+ "task": "blimp_wh_vs_that_no_gap",
1992
+ "group": "blimp",
1993
+ "dataset_path": "blimp",
1994
+ "dataset_name": "wh_vs_that_no_gap",
1995
+ "validation_split": "train",
1996
+ "doc_to_text": "",
1997
+ "doc_to_target": 0,
1998
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1999
+ "description": "",
2000
+ "target_delimiter": " ",
2001
+ "fewshot_delimiter": "\n\n",
2002
+ "num_fewshot": 0,
2003
+ "metric_list": [
2004
+ {
2005
+ "metric": "acc"
2006
+ }
2007
+ ],
2008
+ "output_type": "multiple_choice",
2009
+ "repeats": 1,
2010
+ "should_decontaminate": true,
2011
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2012
+ "metadata": {
2013
+ "version": 1.0
2014
+ }
2015
+ },
2016
+ "blimp_wh_vs_that_no_gap_long_distance": {
2017
+ "task": "blimp_wh_vs_that_no_gap_long_distance",
2018
+ "group": "blimp",
2019
+ "dataset_path": "blimp",
2020
+ "dataset_name": "wh_vs_that_no_gap_long_distance",
2021
+ "validation_split": "train",
2022
+ "doc_to_text": "",
2023
+ "doc_to_target": 0,
2024
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2025
+ "description": "",
2026
+ "target_delimiter": " ",
2027
+ "fewshot_delimiter": "\n\n",
2028
+ "num_fewshot": 0,
2029
+ "metric_list": [
2030
+ {
2031
+ "metric": "acc"
2032
+ }
2033
+ ],
2034
+ "output_type": "multiple_choice",
2035
+ "repeats": 1,
2036
+ "should_decontaminate": true,
2037
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2038
+ "metadata": {
2039
+ "version": 1.0
2040
+ }
2041
+ },
2042
+ "blimp_wh_vs_that_with_gap": {
2043
+ "task": "blimp_wh_vs_that_with_gap",
2044
+ "group": "blimp",
2045
+ "dataset_path": "blimp",
2046
+ "dataset_name": "wh_vs_that_with_gap",
2047
+ "validation_split": "train",
2048
+ "doc_to_text": "",
2049
+ "doc_to_target": 0,
2050
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2051
+ "description": "",
2052
+ "target_delimiter": " ",
2053
+ "fewshot_delimiter": "\n\n",
2054
+ "num_fewshot": 0,
2055
+ "metric_list": [
2056
+ {
2057
+ "metric": "acc"
2058
+ }
2059
+ ],
2060
+ "output_type": "multiple_choice",
2061
+ "repeats": 1,
2062
+ "should_decontaminate": true,
2063
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2064
+ "metadata": {
2065
+ "version": 1.0
2066
+ }
2067
+ },
2068
+ "blimp_wh_vs_that_with_gap_long_distance": {
2069
+ "task": "blimp_wh_vs_that_with_gap_long_distance",
2070
+ "group": "blimp",
2071
+ "dataset_path": "blimp",
2072
+ "dataset_name": "wh_vs_that_with_gap_long_distance",
2073
+ "validation_split": "train",
2074
+ "doc_to_text": "",
2075
+ "doc_to_target": 0,
2076
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2077
+ "description": "",
2078
+ "target_delimiter": " ",
2079
+ "fewshot_delimiter": "\n\n",
2080
+ "num_fewshot": 0,
2081
+ "metric_list": [
2082
+ {
2083
+ "metric": "acc"
2084
+ }
2085
+ ],
2086
+ "output_type": "multiple_choice",
2087
+ "repeats": 1,
2088
+ "should_decontaminate": true,
2089
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2090
+ "metadata": {
2091
+ "version": 1.0
2092
+ }
2093
+ }
2094
+ },
2095
+ "versions": {
2096
+ "blimp": "N/A",
2097
+ "blimp_adjunct_island": 1.0,
2098
+ "blimp_anaphor_gender_agreement": 1.0,
2099
+ "blimp_anaphor_number_agreement": 1.0,
2100
+ "blimp_animate_subject_passive": 1.0,
2101
+ "blimp_animate_subject_trans": 1.0,
2102
+ "blimp_causative": 1.0,
2103
+ "blimp_complex_NP_island": 1.0,
2104
+ "blimp_coordinate_structure_constraint_complex_left_branch": 1.0,
2105
+ "blimp_coordinate_structure_constraint_object_extraction": 1.0,
2106
+ "blimp_determiner_noun_agreement_1": 1.0,
2107
+ "blimp_determiner_noun_agreement_2": 1.0,
2108
+ "blimp_determiner_noun_agreement_irregular_1": 1.0,
2109
+ "blimp_determiner_noun_agreement_irregular_2": 1.0,
2110
+ "blimp_determiner_noun_agreement_with_adj_2": 1.0,
2111
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": 1.0,
2112
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": 1.0,
2113
+ "blimp_determiner_noun_agreement_with_adjective_1": 1.0,
2114
+ "blimp_distractor_agreement_relational_noun": 1.0,
2115
+ "blimp_distractor_agreement_relative_clause": 1.0,
2116
+ "blimp_drop_argument": 1.0,
2117
+ "blimp_ellipsis_n_bar_1": 1.0,
2118
+ "blimp_ellipsis_n_bar_2": 1.0,
2119
+ "blimp_existential_there_object_raising": 1.0,
2120
+ "blimp_existential_there_quantifiers_1": 1.0,
2121
+ "blimp_existential_there_quantifiers_2": 1.0,
2122
+ "blimp_existential_there_subject_raising": 1.0,
2123
+ "blimp_expletive_it_object_raising": 1.0,
2124
+ "blimp_inchoative": 1.0,
2125
+ "blimp_intransitive": 1.0,
2126
+ "blimp_irregular_past_participle_adjectives": 1.0,
2127
+ "blimp_irregular_past_participle_verbs": 1.0,
2128
+ "blimp_irregular_plural_subject_verb_agreement_1": 1.0,
2129
+ "blimp_irregular_plural_subject_verb_agreement_2": 1.0,
2130
+ "blimp_left_branch_island_echo_question": 1.0,
2131
+ "blimp_left_branch_island_simple_question": 1.0,
2132
+ "blimp_matrix_question_npi_licensor_present": 1.0,
2133
+ "blimp_npi_present_1": 1.0,
2134
+ "blimp_npi_present_2": 1.0,
2135
+ "blimp_only_npi_licensor_present": 1.0,
2136
+ "blimp_only_npi_scope": 1.0,
2137
+ "blimp_passive_1": 1.0,
2138
+ "blimp_passive_2": 1.0,
2139
+ "blimp_principle_A_c_command": 1.0,
2140
+ "blimp_principle_A_case_1": 1.0,
2141
+ "blimp_principle_A_case_2": 1.0,
2142
+ "blimp_principle_A_domain_1": 1.0,
2143
+ "blimp_principle_A_domain_2": 1.0,
2144
+ "blimp_principle_A_domain_3": 1.0,
2145
+ "blimp_principle_A_reconstruction": 1.0,
2146
+ "blimp_regular_plural_subject_verb_agreement_1": 1.0,
2147
+ "blimp_regular_plural_subject_verb_agreement_2": 1.0,
2148
+ "blimp_sentential_negation_npi_licensor_present": 1.0,
2149
+ "blimp_sentential_negation_npi_scope": 1.0,
2150
+ "blimp_sentential_subject_island": 1.0,
2151
+ "blimp_superlative_quantifiers_1": 1.0,
2152
+ "blimp_superlative_quantifiers_2": 1.0,
2153
+ "blimp_tough_vs_raising_1": 1.0,
2154
+ "blimp_tough_vs_raising_2": 1.0,
2155
+ "blimp_transitive": 1.0,
2156
+ "blimp_wh_island": 1.0,
2157
+ "blimp_wh_questions_object_gap": 1.0,
2158
+ "blimp_wh_questions_subject_gap": 1.0,
2159
+ "blimp_wh_questions_subject_gap_long_distance": 1.0,
2160
+ "blimp_wh_vs_that_no_gap": 1.0,
2161
+ "blimp_wh_vs_that_no_gap_long_distance": 1.0,
2162
+ "blimp_wh_vs_that_with_gap": 1.0,
2163
+ "blimp_wh_vs_that_with_gap_long_distance": 1.0
2164
+ },
2165
+ "n-shot": {
2166
+ "blimp": 0,
2167
+ "blimp_adjunct_island": 0,
2168
+ "blimp_anaphor_gender_agreement": 0,
2169
+ "blimp_anaphor_number_agreement": 0,
2170
+ "blimp_animate_subject_passive": 0,
2171
+ "blimp_animate_subject_trans": 0,
2172
+ "blimp_causative": 0,
2173
+ "blimp_complex_NP_island": 0,
2174
+ "blimp_coordinate_structure_constraint_complex_left_branch": 0,
2175
+ "blimp_coordinate_structure_constraint_object_extraction": 0,
2176
+ "blimp_determiner_noun_agreement_1": 0,
2177
+ "blimp_determiner_noun_agreement_2": 0,
2178
+ "blimp_determiner_noun_agreement_irregular_1": 0,
2179
+ "blimp_determiner_noun_agreement_irregular_2": 0,
2180
+ "blimp_determiner_noun_agreement_with_adj_2": 0,
2181
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": 0,
2182
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": 0,
2183
+ "blimp_determiner_noun_agreement_with_adjective_1": 0,
2184
+ "blimp_distractor_agreement_relational_noun": 0,
2185
+ "blimp_distractor_agreement_relative_clause": 0,
2186
+ "blimp_drop_argument": 0,
2187
+ "blimp_ellipsis_n_bar_1": 0,
2188
+ "blimp_ellipsis_n_bar_2": 0,
2189
+ "blimp_existential_there_object_raising": 0,
2190
+ "blimp_existential_there_quantifiers_1": 0,
2191
+ "blimp_existential_there_quantifiers_2": 0,
2192
+ "blimp_existential_there_subject_raising": 0,
2193
+ "blimp_expletive_it_object_raising": 0,
2194
+ "blimp_inchoative": 0,
2195
+ "blimp_intransitive": 0,
2196
+ "blimp_irregular_past_participle_adjectives": 0,
2197
+ "blimp_irregular_past_participle_verbs": 0,
2198
+ "blimp_irregular_plural_subject_verb_agreement_1": 0,
2199
+ "blimp_irregular_plural_subject_verb_agreement_2": 0,
2200
+ "blimp_left_branch_island_echo_question": 0,
2201
+ "blimp_left_branch_island_simple_question": 0,
2202
+ "blimp_matrix_question_npi_licensor_present": 0,
2203
+ "blimp_npi_present_1": 0,
2204
+ "blimp_npi_present_2": 0,
2205
+ "blimp_only_npi_licensor_present": 0,
2206
+ "blimp_only_npi_scope": 0,
2207
+ "blimp_passive_1": 0,
2208
+ "blimp_passive_2": 0,
2209
+ "blimp_principle_A_c_command": 0,
2210
+ "blimp_principle_A_case_1": 0,
2211
+ "blimp_principle_A_case_2": 0,
2212
+ "blimp_principle_A_domain_1": 0,
2213
+ "blimp_principle_A_domain_2": 0,
2214
+ "blimp_principle_A_domain_3": 0,
2215
+ "blimp_principle_A_reconstruction": 0,
2216
+ "blimp_regular_plural_subject_verb_agreement_1": 0,
2217
+ "blimp_regular_plural_subject_verb_agreement_2": 0,
2218
+ "blimp_sentential_negation_npi_licensor_present": 0,
2219
+ "blimp_sentential_negation_npi_scope": 0,
2220
+ "blimp_sentential_subject_island": 0,
2221
+ "blimp_superlative_quantifiers_1": 0,
2222
+ "blimp_superlative_quantifiers_2": 0,
2223
+ "blimp_tough_vs_raising_1": 0,
2224
+ "blimp_tough_vs_raising_2": 0,
2225
+ "blimp_transitive": 0,
2226
+ "blimp_wh_island": 0,
2227
+ "blimp_wh_questions_object_gap": 0,
2228
+ "blimp_wh_questions_subject_gap": 0,
2229
+ "blimp_wh_questions_subject_gap_long_distance": 0,
2230
+ "blimp_wh_vs_that_no_gap": 0,
2231
+ "blimp_wh_vs_that_no_gap_long_distance": 0,
2232
+ "blimp_wh_vs_that_with_gap": 0,
2233
+ "blimp_wh_vs_that_with_gap_long_distance": 0
2234
+ },
2235
+ "config": {
2236
+ "model": "hf",
2237
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
2238
+ "batch_size": "auto",
2239
+ "batch_sizes": [
2240
+ 64
2241
+ ],
2242
+ "device": null,
2243
+ "use_cache": null,
2244
+ "limit": null,
2245
+ "bootstrap_iters": 100000,
2246
+ "gen_kwargs": null
2247
+ },
2248
+ "git_hash": "8281e96"
2249
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:124f31d4c9170308c2fae50d85a1d593c8fb9353a0b082d246ec739c0724a4ba
3
+ size 324266
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
The diff for this file is too large to render. See raw diff
 
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b899a3bcdd9a3798013caa514c6802576ce96938ba0007edab9e3632047091
3
+ size 116540
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "hellaswag": {
4
+ "acc,none": 0.5258912567217686,
5
+ "acc_stderr,none": 0.004983087049281747,
6
+ "acc_norm,none": 0.7099183429595698,
7
+ "acc_norm_stderr,none": 0.00452872395187824,
8
+ "alias": "hellaswag"
9
+ }
10
+ },
11
+ "configs": {
12
+ "hellaswag": {
13
+ "task": "hellaswag",
14
+ "group": [
15
+ "multiple_choice"
16
+ ],
17
+ "dataset_path": "hellaswag",
18
+ "training_split": "train",
19
+ "validation_split": "validation",
20
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
21
+ "doc_to_text": "{{query}}",
22
+ "doc_to_target": "{{label}}",
23
+ "doc_to_choice": "choices",
24
+ "description": "",
25
+ "target_delimiter": " ",
26
+ "fewshot_delimiter": "\n\n",
27
+ "metric_list": [
28
+ {
29
+ "metric": "acc",
30
+ "aggregation": "mean",
31
+ "higher_is_better": true
32
+ },
33
+ {
34
+ "metric": "acc_norm",
35
+ "aggregation": "mean",
36
+ "higher_is_better": true
37
+ }
38
+ ],
39
+ "output_type": "multiple_choice",
40
+ "repeats": 1,
41
+ "should_decontaminate": false,
42
+ "metadata": {
43
+ "version": 1.0
44
+ }
45
+ }
46
+ },
47
+ "versions": {
48
+ "hellaswag": 1.0
49
+ },
50
+ "n-shot": {
51
+ "hellaswag": 0
52
+ },
53
+ "config": {
54
+ "model": "hf",
55
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
56
+ "batch_size": "auto",
57
+ "batch_sizes": [
58
+ 64
59
+ ],
60
+ "device": null,
61
+ "use_cache": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": null
65
+ },
66
+ "git_hash": "8281e96"
67
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c50c3af8140d29ff67a5f74f04a141a7603e730136ef24c83a6dfad9e8e4a6b
3
+ size 50753
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "lambada": {
4
+ "perplexity,none": 3.8784881294998965,
5
+ "perplexity_stderr,none": 0.23551879245757332,
6
+ "acc,none": 0.7087133708519309,
7
+ "acc_stderr,none": 0.016850692633428886,
8
+ "alias": "lambada"
9
+ },
10
+ "lambada_openai": {
11
+ "perplexity,none": 3.435839806000842,
12
+ "perplexity_stderr,none": 0.06696344523500707,
13
+ "acc,none": 0.7399573064234427,
14
+ "acc_stderr,none": 0.00611135809828806,
15
+ "alias": " - lambada_openai"
16
+ },
17
+ "lambada_standard": {
18
+ "perplexity,none": 4.321136452998951,
19
+ "perplexity_stderr,none": 0.09207106981169363,
20
+ "acc,none": 0.6774694352804191,
21
+ "acc_stderr,none": 0.0065124194470117,
22
+ "alias": " - lambada_standard"
23
+ }
24
+ },
25
+ "groups": {
26
+ "lambada": {
27
+ "perplexity,none": 3.8784881294998965,
28
+ "perplexity_stderr,none": 0.23551879245757332,
29
+ "acc,none": 0.7087133708519309,
30
+ "acc_stderr,none": 0.016850692633428886,
31
+ "alias": "lambada"
32
+ }
33
+ },
34
+ "configs": {
35
+ "lambada_openai": {
36
+ "task": "lambada_openai",
37
+ "group": [
38
+ "lambada"
39
+ ],
40
+ "dataset_path": "EleutherAI/lambada_openai",
41
+ "dataset_name": "default",
42
+ "test_split": "test",
43
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
44
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
45
+ "description": "",
46
+ "target_delimiter": " ",
47
+ "fewshot_delimiter": "\n\n",
48
+ "metric_list": [
49
+ {
50
+ "metric": "perplexity",
51
+ "aggregation": "perplexity",
52
+ "higher_is_better": false
53
+ },
54
+ {
55
+ "metric": "acc",
56
+ "aggregation": "mean",
57
+ "higher_is_better": true
58
+ }
59
+ ],
60
+ "output_type": "loglikelihood",
61
+ "repeats": 1,
62
+ "should_decontaminate": true,
63
+ "doc_to_decontamination_query": "{{text}}",
64
+ "metadata": {
65
+ "version": 1.0
66
+ }
67
+ },
68
+ "lambada_standard": {
69
+ "task": "lambada_standard",
70
+ "group": [
71
+ "lambada"
72
+ ],
73
+ "dataset_path": "lambada",
74
+ "validation_split": "validation",
75
+ "test_split": "test",
76
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
77
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
78
+ "description": "",
79
+ "target_delimiter": " ",
80
+ "fewshot_delimiter": "\n\n",
81
+ "metric_list": [
82
+ {
83
+ "metric": "perplexity",
84
+ "aggregation": "perplexity",
85
+ "higher_is_better": false
86
+ },
87
+ {
88
+ "metric": "acc",
89
+ "aggregation": "mean",
90
+ "higher_is_better": true
91
+ }
92
+ ],
93
+ "output_type": "loglikelihood",
94
+ "repeats": 1,
95
+ "should_decontaminate": true,
96
+ "doc_to_decontamination_query": "{{text}}",
97
+ "metadata": {
98
+ "version": 1.0
99
+ }
100
+ }
101
+ },
102
+ "versions": {
103
+ "lambada": "N/A",
104
+ "lambada_openai": 1.0,
105
+ "lambada_standard": 1.0
106
+ },
107
+ "n-shot": {
108
+ "lambada": 0,
109
+ "lambada_openai": 0,
110
+ "lambada_standard": 0
111
+ },
112
+ "config": {
113
+ "model": "hf",
114
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
115
+ "batch_size": "auto",
116
+ "batch_sizes": [
117
+ 64
118
+ ],
119
+ "device": null,
120
+ "use_cache": null,
121
+ "limit": null,
122
+ "bootstrap_iters": 100000,
123
+ "gen_kwargs": null
124
+ },
125
+ "git_hash": "8281e96"
126
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5469c918dc9db04f1bc7ca239527c4dbd8b3cac25b1e9886ec6e9374b9765a1
3
+ size 47978
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "lambada_multilingual": {
4
+ "perplexity,none": 20.70048891120678,
5
+ "perplexity_stderr,none": 8.059465734784938,
6
+ "acc,none": 0.5369687560644285,
7
+ "acc_stderr,none": 0.08430882353217248,
8
+ "alias": "lambada_multilingual"
9
+ },
10
+ "lambada_openai_mt_de": {
11
+ "perplexity,none": 34.04622392997292,
12
+ "perplexity_stderr,none": 1.8391447472790967,
13
+ "acc,none": 0.42286046962934215,
14
+ "acc_stderr,none": 0.0068825761876977875,
15
+ "alias": " - lambada_openai_mt_de"
16
+ },
17
+ "lambada_openai_mt_en": {
18
+ "perplexity,none": 3.4357348930487683,
19
+ "perplexity_stderr,none": 0.06697837838975183,
20
+ "acc,none": 0.7403454298466913,
21
+ "acc_stderr,none": 0.006108397042730499,
22
+ "alias": " - lambada_openai_mt_en"
23
+ },
24
+ "lambada_openai_mt_es": {
25
+ "perplexity,none": 28.25835836046741,
26
+ "perplexity_stderr,none": 1.3682644328702065,
27
+ "acc,none": 0.45526877547059963,
28
+ "acc_stderr,none": 0.006938045450999903,
29
+ "alias": " - lambada_openai_mt_es"
30
+ },
31
+ "lambada_openai_mt_fr": {
32
+ "perplexity,none": 16.19056770863486,
33
+ "perplexity_stderr,none": 0.7674221220205246,
34
+ "acc,none": 0.5513293227246264,
35
+ "acc_stderr,none": 0.006929173919665485,
36
+ "alias": " - lambada_openai_mt_fr"
37
+ },
38
+ "lambada_openai_mt_it": {
39
+ "perplexity,none": 21.571559663909937,
40
+ "perplexity_stderr,none": 1.1208902713252802,
41
+ "acc,none": 0.515039782650883,
42
+ "acc_stderr,none": 0.006962825604553246,
43
+ "alias": " - lambada_openai_mt_it"
44
+ }
45
+ },
46
+ "groups": {
47
+ "lambada_multilingual": {
48
+ "perplexity,none": 20.70048891120678,
49
+ "perplexity_stderr,none": 8.059465734784938,
50
+ "acc,none": 0.5369687560644285,
51
+ "acc_stderr,none": 0.08430882353217248,
52
+ "alias": "lambada_multilingual"
53
+ }
54
+ },
55
+ "configs": {
56
+ "lambada_openai_mt_de": {
57
+ "task": "lambada_openai_mt_de",
58
+ "group": [
59
+ "lambada_multilingual"
60
+ ],
61
+ "dataset_path": "EleutherAI/lambada_openai",
62
+ "dataset_name": "de",
63
+ "test_split": "test",
64
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
65
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
66
+ "description": "",
67
+ "target_delimiter": " ",
68
+ "fewshot_delimiter": "\n\n",
69
+ "metric_list": [
70
+ {
71
+ "metric": "perplexity",
72
+ "aggregation": "perplexity",
73
+ "higher_is_better": false
74
+ },
75
+ {
76
+ "metric": "acc",
77
+ "aggregation": "mean",
78
+ "higher_is_better": true
79
+ }
80
+ ],
81
+ "output_type": "loglikelihood",
82
+ "repeats": 1,
83
+ "should_decontaminate": true,
84
+ "doc_to_decontamination_query": "{{text}}",
85
+ "metadata": {
86
+ "version": 1.0
87
+ }
88
+ },
89
+ "lambada_openai_mt_en": {
90
+ "task": "lambada_openai_mt_en",
91
+ "group": [
92
+ "lambada_multilingual"
93
+ ],
94
+ "dataset_path": "EleutherAI/lambada_openai",
95
+ "dataset_name": "en",
96
+ "test_split": "test",
97
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
98
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
99
+ "description": "",
100
+ "target_delimiter": " ",
101
+ "fewshot_delimiter": "\n\n",
102
+ "metric_list": [
103
+ {
104
+ "metric": "perplexity",
105
+ "aggregation": "perplexity",
106
+ "higher_is_better": false
107
+ },
108
+ {
109
+ "metric": "acc",
110
+ "aggregation": "mean",
111
+ "higher_is_better": true
112
+ }
113
+ ],
114
+ "output_type": "loglikelihood",
115
+ "repeats": 1,
116
+ "should_decontaminate": true,
117
+ "doc_to_decontamination_query": "{{text}}",
118
+ "metadata": {
119
+ "version": 1.0
120
+ }
121
+ },
122
+ "lambada_openai_mt_es": {
123
+ "task": "lambada_openai_mt_es",
124
+ "group": [
125
+ "lambada_multilingual"
126
+ ],
127
+ "dataset_path": "EleutherAI/lambada_openai",
128
+ "dataset_name": "es",
129
+ "test_split": "test",
130
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
131
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
132
+ "description": "",
133
+ "target_delimiter": " ",
134
+ "fewshot_delimiter": "\n\n",
135
+ "metric_list": [
136
+ {
137
+ "metric": "perplexity",
138
+ "aggregation": "perplexity",
139
+ "higher_is_better": false
140
+ },
141
+ {
142
+ "metric": "acc",
143
+ "aggregation": "mean",
144
+ "higher_is_better": true
145
+ }
146
+ ],
147
+ "output_type": "loglikelihood",
148
+ "repeats": 1,
149
+ "should_decontaminate": true,
150
+ "doc_to_decontamination_query": "{{text}}",
151
+ "metadata": {
152
+ "version": 1.0
153
+ }
154
+ },
155
+ "lambada_openai_mt_fr": {
156
+ "task": "lambada_openai_mt_fr",
157
+ "group": [
158
+ "lambada_multilingual"
159
+ ],
160
+ "dataset_path": "EleutherAI/lambada_openai",
161
+ "dataset_name": "fr",
162
+ "test_split": "test",
163
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
164
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
165
+ "description": "",
166
+ "target_delimiter": " ",
167
+ "fewshot_delimiter": "\n\n",
168
+ "metric_list": [
169
+ {
170
+ "metric": "perplexity",
171
+ "aggregation": "perplexity",
172
+ "higher_is_better": false
173
+ },
174
+ {
175
+ "metric": "acc",
176
+ "aggregation": "mean",
177
+ "higher_is_better": true
178
+ }
179
+ ],
180
+ "output_type": "loglikelihood",
181
+ "repeats": 1,
182
+ "should_decontaminate": true,
183
+ "doc_to_decontamination_query": "{{text}}",
184
+ "metadata": {
185
+ "version": 1.0
186
+ }
187
+ },
188
+ "lambada_openai_mt_it": {
189
+ "task": "lambada_openai_mt_it",
190
+ "group": [
191
+ "lambada_multilingual"
192
+ ],
193
+ "dataset_path": "EleutherAI/lambada_openai",
194
+ "dataset_name": "it",
195
+ "test_split": "test",
196
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
197
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
198
+ "description": "",
199
+ "target_delimiter": " ",
200
+ "fewshot_delimiter": "\n\n",
201
+ "metric_list": [
202
+ {
203
+ "metric": "perplexity",
204
+ "aggregation": "perplexity",
205
+ "higher_is_better": false
206
+ },
207
+ {
208
+ "metric": "acc",
209
+ "aggregation": "mean",
210
+ "higher_is_better": true
211
+ }
212
+ ],
213
+ "output_type": "loglikelihood",
214
+ "repeats": 1,
215
+ "should_decontaminate": true,
216
+ "doc_to_decontamination_query": "{{text}}",
217
+ "metadata": {
218
+ "version": 1.0
219
+ }
220
+ }
221
+ },
222
+ "versions": {
223
+ "lambada_multilingual": "N/A",
224
+ "lambada_openai_mt_de": 1.0,
225
+ "lambada_openai_mt_en": 1.0,
226
+ "lambada_openai_mt_es": 1.0,
227
+ "lambada_openai_mt_fr": 1.0,
228
+ "lambada_openai_mt_it": 1.0
229
+ },
230
+ "n-shot": {
231
+ "lambada_multilingual": 0,
232
+ "lambada_openai_mt_de": 0,
233
+ "lambada_openai_mt_en": 0,
234
+ "lambada_openai_mt_es": 0,
235
+ "lambada_openai_mt_fr": 0,
236
+ "lambada_openai_mt_it": 0
237
+ },
238
+ "config": {
239
+ "model": "hf",
240
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
241
+ "batch_size": "auto",
242
+ "batch_sizes": [
243
+ 64
244
+ ],
245
+ "device": null,
246
+ "use_cache": null,
247
+ "limit": null,
248
+ "bootstrap_iters": 100000,
249
+ "gen_kwargs": null
250
+ },
251
+ "git_hash": "8281e96"
252
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f912cb82f9caef9babb3bce391dc6f94d190bd2e02b50e0fdf4fb56fcf840bca
3
+ size 65618
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "logiqa": {
4
+ "acc,none": 0.23195084485407066,
5
+ "acc_stderr,none": 0.016555252497925898,
6
+ "acc_norm,none": 0.2887864823348694,
7
+ "acc_norm_stderr,none": 0.017775906336539235,
8
+ "alias": "logiqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "logiqa": {
13
+ "task": "logiqa",
14
+ "dataset_path": "EleutherAI/logiqa",
15
+ "dataset_name": "logiqa",
16
+ "training_split": "train",
17
+ "validation_split": "validation",
18
+ "test_split": "test",
19
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Passage: <passage>\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [\"a\", \"b\", \"c\", \"d\"]\n prompt = \"Passage: \" + doc[\"context\"] + \"\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
20
+ "doc_to_target": "def doc_to_target(doc) -> int:\n choices = [\"a\", \"b\", \"c\", \"d\"]\n return choices.index(doc[\"label\"].strip())\n",
21
+ "doc_to_choice": "{{options}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "metric_list": [
26
+ {
27
+ "metric": "acc",
28
+ "aggregation": "mean",
29
+ "higher_is_better": true
30
+ },
31
+ {
32
+ "metric": "acc_norm",
33
+ "aggregation": "mean",
34
+ "higher_is_better": true
35
+ }
36
+ ],
37
+ "output_type": "multiple_choice",
38
+ "repeats": 1,
39
+ "should_decontaminate": true,
40
+ "doc_to_decontamination_query": "{{context}}",
41
+ "metadata": {
42
+ "version": 1.0
43
+ }
44
+ }
45
+ },
46
+ "versions": {
47
+ "logiqa": 1.0
48
+ },
49
+ "n-shot": {
50
+ "logiqa": 0
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
55
+ "batch_size": "auto",
56
+ "batch_sizes": [
57
+ 32
58
+ ],
59
+ "device": null,
60
+ "use_cache": null,
61
+ "limit": null,
62
+ "bootstrap_iters": 100000,
63
+ "gen_kwargs": null
64
+ },
65
+ "git_hash": "8281e96"
66
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67299b15bf7137fdc150e91007bcbf9159bcaa7ba0b9ae070f8d308cca86df29
3
+ size 46560
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,2594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "mmlu": {
4
+ "acc,none": 0.2829369035749893,
5
+ "acc_stderr,none": 0.04449010491367579,
6
+ "alias": "mmlu"
7
+ },
8
+ "mmlu_humanities": {
9
+ "alias": " - humanities",
10
+ "acc,none": 0.27290116896918176,
11
+ "acc_stderr,none": 0.034599615055619884
12
+ },
13
+ "mmlu_formal_logic": {
14
+ "alias": " - formal_logic",
15
+ "acc,none": 0.31746031746031744,
16
+ "acc_stderr,none": 0.04163453031302859
17
+ },
18
+ "mmlu_high_school_european_history": {
19
+ "alias": " - high_school_european_history",
20
+ "acc,none": 0.3212121212121212,
21
+ "acc_stderr,none": 0.03646204963253812
22
+ },
23
+ "mmlu_high_school_us_history": {
24
+ "alias": " - high_school_us_history",
25
+ "acc,none": 0.29411764705882354,
26
+ "acc_stderr,none": 0.03198001660115071
27
+ },
28
+ "mmlu_high_school_world_history": {
29
+ "alias": " - high_school_world_history",
30
+ "acc,none": 0.28270042194092826,
31
+ "acc_stderr,none": 0.029312814153955934
32
+ },
33
+ "mmlu_international_law": {
34
+ "alias": " - international_law",
35
+ "acc,none": 0.3140495867768595,
36
+ "acc_stderr,none": 0.042369647530410184
37
+ },
38
+ "mmlu_jurisprudence": {
39
+ "alias": " - jurisprudence",
40
+ "acc,none": 0.3148148148148148,
41
+ "acc_stderr,none": 0.04489931073591312
42
+ },
43
+ "mmlu_logical_fallacies": {
44
+ "alias": " - logical_fallacies",
45
+ "acc,none": 0.26993865030674846,
46
+ "acc_stderr,none": 0.034878251684978906
47
+ },
48
+ "mmlu_moral_disputes": {
49
+ "alias": " - moral_disputes",
50
+ "acc,none": 0.27167630057803466,
51
+ "acc_stderr,none": 0.023948512905468365
52
+ },
53
+ "mmlu_moral_scenarios": {
54
+ "alias": " - moral_scenarios",
55
+ "acc,none": 0.23910614525139665,
56
+ "acc_stderr,none": 0.014265554192331154
57
+ },
58
+ "mmlu_philosophy": {
59
+ "alias": " - philosophy",
60
+ "acc,none": 0.33762057877813506,
61
+ "acc_stderr,none": 0.02685882587948854
62
+ },
63
+ "mmlu_prehistory": {
64
+ "alias": " - prehistory",
65
+ "acc,none": 0.2839506172839506,
66
+ "acc_stderr,none": 0.025089478523765134
67
+ },
68
+ "mmlu_professional_law": {
69
+ "alias": " - professional_law",
70
+ "acc,none": 0.2516297262059974,
71
+ "acc_stderr,none": 0.011083276280441909
72
+ },
73
+ "mmlu_world_religions": {
74
+ "alias": " - world_religions",
75
+ "acc,none": 0.3333333333333333,
76
+ "acc_stderr,none": 0.036155076303109365
77
+ },
78
+ "mmlu_other": {
79
+ "alias": " - other",
80
+ "acc,none": 0.31412938525909234,
81
+ "acc_stderr,none": 0.04595175653871959
82
+ },
83
+ "mmlu_business_ethics": {
84
+ "alias": " - business_ethics",
85
+ "acc,none": 0.29,
86
+ "acc_stderr,none": 0.045604802157206845
87
+ },
88
+ "mmlu_clinical_knowledge": {
89
+ "alias": " - clinical_knowledge",
90
+ "acc,none": 0.3320754716981132,
91
+ "acc_stderr,none": 0.028985455652334388
92
+ },
93
+ "mmlu_college_medicine": {
94
+ "alias": " - college_medicine",
95
+ "acc,none": 0.21965317919075145,
96
+ "acc_stderr,none": 0.031568093627031744
97
+ },
98
+ "mmlu_global_facts": {
99
+ "alias": " - global_facts",
100
+ "acc,none": 0.33,
101
+ "acc_stderr,none": 0.04725815626252605
102
+ },
103
+ "mmlu_human_aging": {
104
+ "alias": " - human_aging",
105
+ "acc,none": 0.3811659192825112,
106
+ "acc_stderr,none": 0.03259625118416828
107
+ },
108
+ "mmlu_management": {
109
+ "alias": " - management",
110
+ "acc,none": 0.2912621359223301,
111
+ "acc_stderr,none": 0.044986763205729224
112
+ },
113
+ "mmlu_marketing": {
114
+ "alias": " - marketing",
115
+ "acc,none": 0.3247863247863248,
116
+ "acc_stderr,none": 0.03067902276549883
117
+ },
118
+ "mmlu_medical_genetics": {
119
+ "alias": " - medical_genetics",
120
+ "acc,none": 0.28,
121
+ "acc_stderr,none": 0.04512608598542127
122
+ },
123
+ "mmlu_miscellaneous": {
124
+ "alias": " - miscellaneous",
125
+ "acc,none": 0.3652618135376756,
126
+ "acc_stderr,none": 0.01721853002883865
127
+ },
128
+ "mmlu_nutrition": {
129
+ "alias": " - nutrition",
130
+ "acc,none": 0.29411764705882354,
131
+ "acc_stderr,none": 0.026090162504279046
132
+ },
133
+ "mmlu_professional_accounting": {
134
+ "alias": " - professional_accounting",
135
+ "acc,none": 0.25886524822695034,
136
+ "acc_stderr,none": 0.026129572527180848
137
+ },
138
+ "mmlu_professional_medicine": {
139
+ "alias": " - professional_medicine",
140
+ "acc,none": 0.2426470588235294,
141
+ "acc_stderr,none": 0.02604066247420126
142
+ },
143
+ "mmlu_virology": {
144
+ "alias": " - virology",
145
+ "acc,none": 0.3253012048192771,
146
+ "acc_stderr,none": 0.03647168523683227
147
+ },
148
+ "mmlu_social_sciences": {
149
+ "alias": " - social_sciences",
150
+ "acc,none": 0.27331816704582385,
151
+ "acc_stderr,none": 0.04044627009055694
152
+ },
153
+ "mmlu_econometrics": {
154
+ "alias": " - econometrics",
155
+ "acc,none": 0.23684210526315788,
156
+ "acc_stderr,none": 0.03999423879281338
157
+ },
158
+ "mmlu_high_school_geography": {
159
+ "alias": " - high_school_geography",
160
+ "acc,none": 0.29292929292929293,
161
+ "acc_stderr,none": 0.032424979581788166
162
+ },
163
+ "mmlu_high_school_government_and_politics": {
164
+ "alias": " - high_school_government_and_politics",
165
+ "acc,none": 0.34196891191709844,
166
+ "acc_stderr,none": 0.03423465100104282
167
+ },
168
+ "mmlu_high_school_macroeconomics": {
169
+ "alias": " - high_school_macroeconomics",
170
+ "acc,none": 0.23846153846153847,
171
+ "acc_stderr,none": 0.02160629449464773
172
+ },
173
+ "mmlu_high_school_microeconomics": {
174
+ "alias": " - high_school_microeconomics",
175
+ "acc,none": 0.2689075630252101,
176
+ "acc_stderr,none": 0.028801392193631273
177
+ },
178
+ "mmlu_high_school_psychology": {
179
+ "alias": " - high_school_psychology",
180
+ "acc,none": 0.27706422018348625,
181
+ "acc_stderr,none": 0.019188482590169538
182
+ },
183
+ "mmlu_human_sexuality": {
184
+ "alias": " - human_sexuality",
185
+ "acc,none": 0.29770992366412213,
186
+ "acc_stderr,none": 0.040103589424622034
187
+ },
188
+ "mmlu_professional_psychology": {
189
+ "alias": " - professional_psychology",
190
+ "acc,none": 0.27941176470588236,
191
+ "acc_stderr,none": 0.018152871051538802
192
+ },
193
+ "mmlu_public_relations": {
194
+ "alias": " - public_relations",
195
+ "acc,none": 0.36363636363636365,
196
+ "acc_stderr,none": 0.04607582090719976
197
+ },
198
+ "mmlu_security_studies": {
199
+ "alias": " - security_studies",
200
+ "acc,none": 0.2,
201
+ "acc_stderr,none": 0.025607375986579164
202
+ },
203
+ "mmlu_sociology": {
204
+ "alias": " - sociology",
205
+ "acc,none": 0.2935323383084577,
206
+ "acc_stderr,none": 0.03220024104534205
207
+ },
208
+ "mmlu_us_foreign_policy": {
209
+ "alias": " - us_foreign_policy",
210
+ "acc,none": 0.24,
211
+ "acc_stderr,none": 0.04292346959909284
212
+ },
213
+ "mmlu_stem": {
214
+ "alias": " - stem",
215
+ "acc,none": 0.2765620044402157,
216
+ "acc_stderr,none": 0.052108661789192635
217
+ },
218
+ "mmlu_abstract_algebra": {
219
+ "alias": " - abstract_algebra",
220
+ "acc,none": 0.25,
221
+ "acc_stderr,none": 0.04351941398892446
222
+ },
223
+ "mmlu_anatomy": {
224
+ "alias": " - anatomy",
225
+ "acc,none": 0.34074074074074073,
226
+ "acc_stderr,none": 0.040943762699967926
227
+ },
228
+ "mmlu_astronomy": {
229
+ "alias": " - astronomy",
230
+ "acc,none": 0.29605263157894735,
231
+ "acc_stderr,none": 0.03715062154998905
232
+ },
233
+ "mmlu_college_biology": {
234
+ "alias": " - college_biology",
235
+ "acc,none": 0.2916666666666667,
236
+ "acc_stderr,none": 0.038009680605548594
237
+ },
238
+ "mmlu_college_chemistry": {
239
+ "alias": " - college_chemistry",
240
+ "acc,none": 0.24,
241
+ "acc_stderr,none": 0.04292346959909282
242
+ },
243
+ "mmlu_college_computer_science": {
244
+ "alias": " - college_computer_science",
245
+ "acc,none": 0.2,
246
+ "acc_stderr,none": 0.04020151261036846
247
+ },
248
+ "mmlu_college_mathematics": {
249
+ "alias": " - college_mathematics",
250
+ "acc,none": 0.22,
251
+ "acc_stderr,none": 0.0416333199893227
252
+ },
253
+ "mmlu_college_physics": {
254
+ "alias": " - college_physics",
255
+ "acc,none": 0.18627450980392157,
256
+ "acc_stderr,none": 0.03873958714149352
257
+ },
258
+ "mmlu_computer_security": {
259
+ "alias": " - computer_security",
260
+ "acc,none": 0.3,
261
+ "acc_stderr,none": 0.046056618647183814
262
+ },
263
+ "mmlu_conceptual_physics": {
264
+ "alias": " - conceptual_physics",
265
+ "acc,none": 0.33191489361702126,
266
+ "acc_stderr,none": 0.030783736757745643
267
+ },
268
+ "mmlu_electrical_engineering": {
269
+ "alias": " - electrical_engineering",
270
+ "acc,none": 0.2689655172413793,
271
+ "acc_stderr,none": 0.03695183311650232
272
+ },
273
+ "mmlu_elementary_mathematics": {
274
+ "alias": " - elementary_mathematics",
275
+ "acc,none": 0.25925925925925924,
276
+ "acc_stderr,none": 0.022569897074918428
277
+ },
278
+ "mmlu_high_school_biology": {
279
+ "alias": " - high_school_biology",
280
+ "acc,none": 0.34838709677419355,
281
+ "acc_stderr,none": 0.02710482632810094
282
+ },
283
+ "mmlu_high_school_chemistry": {
284
+ "alias": " - high_school_chemistry",
285
+ "acc,none": 0.2561576354679803,
286
+ "acc_stderr,none": 0.030712730070982592
287
+ },
288
+ "mmlu_high_school_computer_science": {
289
+ "alias": " - high_school_computer_science",
290
+ "acc,none": 0.33,
291
+ "acc_stderr,none": 0.047258156262526045
292
+ },
293
+ "mmlu_high_school_mathematics": {
294
+ "alias": " - high_school_mathematics",
295
+ "acc,none": 0.27037037037037037,
296
+ "acc_stderr,none": 0.02708037281514566
297
+ },
298
+ "mmlu_high_school_physics": {
299
+ "alias": " - high_school_physics",
300
+ "acc,none": 0.271523178807947,
301
+ "acc_stderr,none": 0.03631329803969653
302
+ },
303
+ "mmlu_high_school_statistics": {
304
+ "alias": " - high_school_statistics",
305
+ "acc,none": 0.18518518518518517,
306
+ "acc_stderr,none": 0.026491914727355168
307
+ },
308
+ "mmlu_machine_learning": {
309
+ "alias": " - machine_learning",
310
+ "acc,none": 0.33035714285714285,
311
+ "acc_stderr,none": 0.04464285714285713
312
+ }
313
+ },
314
+ "groups": {
315
+ "mmlu": {
316
+ "acc,none": 0.2829369035749893,
317
+ "acc_stderr,none": 0.04449010491367579,
318
+ "alias": "mmlu"
319
+ },
320
+ "mmlu_humanities": {
321
+ "alias": " - humanities",
322
+ "acc,none": 0.27290116896918176,
323
+ "acc_stderr,none": 0.034599615055619884
324
+ },
325
+ "mmlu_other": {
326
+ "alias": " - other",
327
+ "acc,none": 0.31412938525909234,
328
+ "acc_stderr,none": 0.04595175653871959
329
+ },
330
+ "mmlu_social_sciences": {
331
+ "alias": " - social_sciences",
332
+ "acc,none": 0.27331816704582385,
333
+ "acc_stderr,none": 0.04044627009055694
334
+ },
335
+ "mmlu_stem": {
336
+ "alias": " - stem",
337
+ "acc,none": 0.2765620044402157,
338
+ "acc_stderr,none": 0.052108661789192635
339
+ }
340
+ },
341
+ "configs": {
342
+ "mmlu_abstract_algebra": {
343
+ "task": "mmlu_abstract_algebra",
344
+ "task_alias": "abstract_algebra",
345
+ "group": "mmlu_stem",
346
+ "group_alias": "stem",
347
+ "dataset_path": "hails/mmlu_no_train",
348
+ "dataset_name": "abstract_algebra",
349
+ "test_split": "test",
350
+ "fewshot_split": "dev",
351
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
352
+ "doc_to_target": "answer",
353
+ "doc_to_choice": [
354
+ "A",
355
+ "B",
356
+ "C",
357
+ "D"
358
+ ],
359
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
360
+ "target_delimiter": " ",
361
+ "fewshot_delimiter": "\n\n",
362
+ "fewshot_config": {
363
+ "sampler": "first_n"
364
+ },
365
+ "metric_list": [
366
+ {
367
+ "metric": "acc",
368
+ "aggregation": "mean",
369
+ "higher_is_better": true
370
+ }
371
+ ],
372
+ "output_type": "multiple_choice",
373
+ "repeats": 1,
374
+ "should_decontaminate": false,
375
+ "metadata": {
376
+ "version": 0.0
377
+ }
378
+ },
379
+ "mmlu_anatomy": {
380
+ "task": "mmlu_anatomy",
381
+ "task_alias": "anatomy",
382
+ "group": "mmlu_stem",
383
+ "group_alias": "stem",
384
+ "dataset_path": "hails/mmlu_no_train",
385
+ "dataset_name": "anatomy",
386
+ "test_split": "test",
387
+ "fewshot_split": "dev",
388
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
389
+ "doc_to_target": "answer",
390
+ "doc_to_choice": [
391
+ "A",
392
+ "B",
393
+ "C",
394
+ "D"
395
+ ],
396
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
397
+ "target_delimiter": " ",
398
+ "fewshot_delimiter": "\n\n",
399
+ "fewshot_config": {
400
+ "sampler": "first_n"
401
+ },
402
+ "metric_list": [
403
+ {
404
+ "metric": "acc",
405
+ "aggregation": "mean",
406
+ "higher_is_better": true
407
+ }
408
+ ],
409
+ "output_type": "multiple_choice",
410
+ "repeats": 1,
411
+ "should_decontaminate": false,
412
+ "metadata": {
413
+ "version": 0.0
414
+ }
415
+ },
416
+ "mmlu_astronomy": {
417
+ "task": "mmlu_astronomy",
418
+ "task_alias": "astronomy",
419
+ "group": "mmlu_stem",
420
+ "group_alias": "stem",
421
+ "dataset_path": "hails/mmlu_no_train",
422
+ "dataset_name": "astronomy",
423
+ "test_split": "test",
424
+ "fewshot_split": "dev",
425
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
426
+ "doc_to_target": "answer",
427
+ "doc_to_choice": [
428
+ "A",
429
+ "B",
430
+ "C",
431
+ "D"
432
+ ],
433
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
434
+ "target_delimiter": " ",
435
+ "fewshot_delimiter": "\n\n",
436
+ "fewshot_config": {
437
+ "sampler": "first_n"
438
+ },
439
+ "metric_list": [
440
+ {
441
+ "metric": "acc",
442
+ "aggregation": "mean",
443
+ "higher_is_better": true
444
+ }
445
+ ],
446
+ "output_type": "multiple_choice",
447
+ "repeats": 1,
448
+ "should_decontaminate": false,
449
+ "metadata": {
450
+ "version": 0.0
451
+ }
452
+ },
453
+ "mmlu_business_ethics": {
454
+ "task": "mmlu_business_ethics",
455
+ "task_alias": "business_ethics",
456
+ "group": "mmlu_other",
457
+ "group_alias": "other",
458
+ "dataset_path": "hails/mmlu_no_train",
459
+ "dataset_name": "business_ethics",
460
+ "test_split": "test",
461
+ "fewshot_split": "dev",
462
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
463
+ "doc_to_target": "answer",
464
+ "doc_to_choice": [
465
+ "A",
466
+ "B",
467
+ "C",
468
+ "D"
469
+ ],
470
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
471
+ "target_delimiter": " ",
472
+ "fewshot_delimiter": "\n\n",
473
+ "fewshot_config": {
474
+ "sampler": "first_n"
475
+ },
476
+ "metric_list": [
477
+ {
478
+ "metric": "acc",
479
+ "aggregation": "mean",
480
+ "higher_is_better": true
481
+ }
482
+ ],
483
+ "output_type": "multiple_choice",
484
+ "repeats": 1,
485
+ "should_decontaminate": false,
486
+ "metadata": {
487
+ "version": 0.0
488
+ }
489
+ },
490
+ "mmlu_clinical_knowledge": {
491
+ "task": "mmlu_clinical_knowledge",
492
+ "task_alias": "clinical_knowledge",
493
+ "group": "mmlu_other",
494
+ "group_alias": "other",
495
+ "dataset_path": "hails/mmlu_no_train",
496
+ "dataset_name": "clinical_knowledge",
497
+ "test_split": "test",
498
+ "fewshot_split": "dev",
499
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
500
+ "doc_to_target": "answer",
501
+ "doc_to_choice": [
502
+ "A",
503
+ "B",
504
+ "C",
505
+ "D"
506
+ ],
507
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
508
+ "target_delimiter": " ",
509
+ "fewshot_delimiter": "\n\n",
510
+ "fewshot_config": {
511
+ "sampler": "first_n"
512
+ },
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "mmlu_college_biology": {
528
+ "task": "mmlu_college_biology",
529
+ "task_alias": "college_biology",
530
+ "group": "mmlu_stem",
531
+ "group_alias": "stem",
532
+ "dataset_path": "hails/mmlu_no_train",
533
+ "dataset_name": "college_biology",
534
+ "test_split": "test",
535
+ "fewshot_split": "dev",
536
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
537
+ "doc_to_target": "answer",
538
+ "doc_to_choice": [
539
+ "A",
540
+ "B",
541
+ "C",
542
+ "D"
543
+ ],
544
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
545
+ "target_delimiter": " ",
546
+ "fewshot_delimiter": "\n\n",
547
+ "fewshot_config": {
548
+ "sampler": "first_n"
549
+ },
550
+ "metric_list": [
551
+ {
552
+ "metric": "acc",
553
+ "aggregation": "mean",
554
+ "higher_is_better": true
555
+ }
556
+ ],
557
+ "output_type": "multiple_choice",
558
+ "repeats": 1,
559
+ "should_decontaminate": false,
560
+ "metadata": {
561
+ "version": 0.0
562
+ }
563
+ },
564
+ "mmlu_college_chemistry": {
565
+ "task": "mmlu_college_chemistry",
566
+ "task_alias": "college_chemistry",
567
+ "group": "mmlu_stem",
568
+ "group_alias": "stem",
569
+ "dataset_path": "hails/mmlu_no_train",
570
+ "dataset_name": "college_chemistry",
571
+ "test_split": "test",
572
+ "fewshot_split": "dev",
573
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
574
+ "doc_to_target": "answer",
575
+ "doc_to_choice": [
576
+ "A",
577
+ "B",
578
+ "C",
579
+ "D"
580
+ ],
581
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
582
+ "target_delimiter": " ",
583
+ "fewshot_delimiter": "\n\n",
584
+ "fewshot_config": {
585
+ "sampler": "first_n"
586
+ },
587
+ "metric_list": [
588
+ {
589
+ "metric": "acc",
590
+ "aggregation": "mean",
591
+ "higher_is_better": true
592
+ }
593
+ ],
594
+ "output_type": "multiple_choice",
595
+ "repeats": 1,
596
+ "should_decontaminate": false,
597
+ "metadata": {
598
+ "version": 0.0
599
+ }
600
+ },
601
+ "mmlu_college_computer_science": {
602
+ "task": "mmlu_college_computer_science",
603
+ "task_alias": "college_computer_science",
604
+ "group": "mmlu_stem",
605
+ "group_alias": "stem",
606
+ "dataset_path": "hails/mmlu_no_train",
607
+ "dataset_name": "college_computer_science",
608
+ "test_split": "test",
609
+ "fewshot_split": "dev",
610
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
611
+ "doc_to_target": "answer",
612
+ "doc_to_choice": [
613
+ "A",
614
+ "B",
615
+ "C",
616
+ "D"
617
+ ],
618
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
619
+ "target_delimiter": " ",
620
+ "fewshot_delimiter": "\n\n",
621
+ "fewshot_config": {
622
+ "sampler": "first_n"
623
+ },
624
+ "metric_list": [
625
+ {
626
+ "metric": "acc",
627
+ "aggregation": "mean",
628
+ "higher_is_better": true
629
+ }
630
+ ],
631
+ "output_type": "multiple_choice",
632
+ "repeats": 1,
633
+ "should_decontaminate": false,
634
+ "metadata": {
635
+ "version": 0.0
636
+ }
637
+ },
638
+ "mmlu_college_mathematics": {
639
+ "task": "mmlu_college_mathematics",
640
+ "task_alias": "college_mathematics",
641
+ "group": "mmlu_stem",
642
+ "group_alias": "stem",
643
+ "dataset_path": "hails/mmlu_no_train",
644
+ "dataset_name": "college_mathematics",
645
+ "test_split": "test",
646
+ "fewshot_split": "dev",
647
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
648
+ "doc_to_target": "answer",
649
+ "doc_to_choice": [
650
+ "A",
651
+ "B",
652
+ "C",
653
+ "D"
654
+ ],
655
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
656
+ "target_delimiter": " ",
657
+ "fewshot_delimiter": "\n\n",
658
+ "fewshot_config": {
659
+ "sampler": "first_n"
660
+ },
661
+ "metric_list": [
662
+ {
663
+ "metric": "acc",
664
+ "aggregation": "mean",
665
+ "higher_is_better": true
666
+ }
667
+ ],
668
+ "output_type": "multiple_choice",
669
+ "repeats": 1,
670
+ "should_decontaminate": false,
671
+ "metadata": {
672
+ "version": 0.0
673
+ }
674
+ },
675
+ "mmlu_college_medicine": {
676
+ "task": "mmlu_college_medicine",
677
+ "task_alias": "college_medicine",
678
+ "group": "mmlu_other",
679
+ "group_alias": "other",
680
+ "dataset_path": "hails/mmlu_no_train",
681
+ "dataset_name": "college_medicine",
682
+ "test_split": "test",
683
+ "fewshot_split": "dev",
684
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
685
+ "doc_to_target": "answer",
686
+ "doc_to_choice": [
687
+ "A",
688
+ "B",
689
+ "C",
690
+ "D"
691
+ ],
692
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
693
+ "target_delimiter": " ",
694
+ "fewshot_delimiter": "\n\n",
695
+ "fewshot_config": {
696
+ "sampler": "first_n"
697
+ },
698
+ "metric_list": [
699
+ {
700
+ "metric": "acc",
701
+ "aggregation": "mean",
702
+ "higher_is_better": true
703
+ }
704
+ ],
705
+ "output_type": "multiple_choice",
706
+ "repeats": 1,
707
+ "should_decontaminate": false,
708
+ "metadata": {
709
+ "version": 0.0
710
+ }
711
+ },
712
+ "mmlu_college_physics": {
713
+ "task": "mmlu_college_physics",
714
+ "task_alias": "college_physics",
715
+ "group": "mmlu_stem",
716
+ "group_alias": "stem",
717
+ "dataset_path": "hails/mmlu_no_train",
718
+ "dataset_name": "college_physics",
719
+ "test_split": "test",
720
+ "fewshot_split": "dev",
721
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
722
+ "doc_to_target": "answer",
723
+ "doc_to_choice": [
724
+ "A",
725
+ "B",
726
+ "C",
727
+ "D"
728
+ ],
729
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
730
+ "target_delimiter": " ",
731
+ "fewshot_delimiter": "\n\n",
732
+ "fewshot_config": {
733
+ "sampler": "first_n"
734
+ },
735
+ "metric_list": [
736
+ {
737
+ "metric": "acc",
738
+ "aggregation": "mean",
739
+ "higher_is_better": true
740
+ }
741
+ ],
742
+ "output_type": "multiple_choice",
743
+ "repeats": 1,
744
+ "should_decontaminate": false,
745
+ "metadata": {
746
+ "version": 0.0
747
+ }
748
+ },
749
+ "mmlu_computer_security": {
750
+ "task": "mmlu_computer_security",
751
+ "task_alias": "computer_security",
752
+ "group": "mmlu_stem",
753
+ "group_alias": "stem",
754
+ "dataset_path": "hails/mmlu_no_train",
755
+ "dataset_name": "computer_security",
756
+ "test_split": "test",
757
+ "fewshot_split": "dev",
758
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
759
+ "doc_to_target": "answer",
760
+ "doc_to_choice": [
761
+ "A",
762
+ "B",
763
+ "C",
764
+ "D"
765
+ ],
766
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
767
+ "target_delimiter": " ",
768
+ "fewshot_delimiter": "\n\n",
769
+ "fewshot_config": {
770
+ "sampler": "first_n"
771
+ },
772
+ "metric_list": [
773
+ {
774
+ "metric": "acc",
775
+ "aggregation": "mean",
776
+ "higher_is_better": true
777
+ }
778
+ ],
779
+ "output_type": "multiple_choice",
780
+ "repeats": 1,
781
+ "should_decontaminate": false,
782
+ "metadata": {
783
+ "version": 0.0
784
+ }
785
+ },
786
+ "mmlu_conceptual_physics": {
787
+ "task": "mmlu_conceptual_physics",
788
+ "task_alias": "conceptual_physics",
789
+ "group": "mmlu_stem",
790
+ "group_alias": "stem",
791
+ "dataset_path": "hails/mmlu_no_train",
792
+ "dataset_name": "conceptual_physics",
793
+ "test_split": "test",
794
+ "fewshot_split": "dev",
795
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
796
+ "doc_to_target": "answer",
797
+ "doc_to_choice": [
798
+ "A",
799
+ "B",
800
+ "C",
801
+ "D"
802
+ ],
803
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
804
+ "target_delimiter": " ",
805
+ "fewshot_delimiter": "\n\n",
806
+ "fewshot_config": {
807
+ "sampler": "first_n"
808
+ },
809
+ "metric_list": [
810
+ {
811
+ "metric": "acc",
812
+ "aggregation": "mean",
813
+ "higher_is_better": true
814
+ }
815
+ ],
816
+ "output_type": "multiple_choice",
817
+ "repeats": 1,
818
+ "should_decontaminate": false,
819
+ "metadata": {
820
+ "version": 0.0
821
+ }
822
+ },
823
+ "mmlu_econometrics": {
824
+ "task": "mmlu_econometrics",
825
+ "task_alias": "econometrics",
826
+ "group": "mmlu_social_sciences",
827
+ "group_alias": "social_sciences",
828
+ "dataset_path": "hails/mmlu_no_train",
829
+ "dataset_name": "econometrics",
830
+ "test_split": "test",
831
+ "fewshot_split": "dev",
832
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
833
+ "doc_to_target": "answer",
834
+ "doc_to_choice": [
835
+ "A",
836
+ "B",
837
+ "C",
838
+ "D"
839
+ ],
840
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
841
+ "target_delimiter": " ",
842
+ "fewshot_delimiter": "\n\n",
843
+ "fewshot_config": {
844
+ "sampler": "first_n"
845
+ },
846
+ "metric_list": [
847
+ {
848
+ "metric": "acc",
849
+ "aggregation": "mean",
850
+ "higher_is_better": true
851
+ }
852
+ ],
853
+ "output_type": "multiple_choice",
854
+ "repeats": 1,
855
+ "should_decontaminate": false,
856
+ "metadata": {
857
+ "version": 0.0
858
+ }
859
+ },
860
+ "mmlu_electrical_engineering": {
861
+ "task": "mmlu_electrical_engineering",
862
+ "task_alias": "electrical_engineering",
863
+ "group": "mmlu_stem",
864
+ "group_alias": "stem",
865
+ "dataset_path": "hails/mmlu_no_train",
866
+ "dataset_name": "electrical_engineering",
867
+ "test_split": "test",
868
+ "fewshot_split": "dev",
869
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
870
+ "doc_to_target": "answer",
871
+ "doc_to_choice": [
872
+ "A",
873
+ "B",
874
+ "C",
875
+ "D"
876
+ ],
877
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
878
+ "target_delimiter": " ",
879
+ "fewshot_delimiter": "\n\n",
880
+ "fewshot_config": {
881
+ "sampler": "first_n"
882
+ },
883
+ "metric_list": [
884
+ {
885
+ "metric": "acc",
886
+ "aggregation": "mean",
887
+ "higher_is_better": true
888
+ }
889
+ ],
890
+ "output_type": "multiple_choice",
891
+ "repeats": 1,
892
+ "should_decontaminate": false,
893
+ "metadata": {
894
+ "version": 0.0
895
+ }
896
+ },
897
+ "mmlu_elementary_mathematics": {
898
+ "task": "mmlu_elementary_mathematics",
899
+ "task_alias": "elementary_mathematics",
900
+ "group": "mmlu_stem",
901
+ "group_alias": "stem",
902
+ "dataset_path": "hails/mmlu_no_train",
903
+ "dataset_name": "elementary_mathematics",
904
+ "test_split": "test",
905
+ "fewshot_split": "dev",
906
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
907
+ "doc_to_target": "answer",
908
+ "doc_to_choice": [
909
+ "A",
910
+ "B",
911
+ "C",
912
+ "D"
913
+ ],
914
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
915
+ "target_delimiter": " ",
916
+ "fewshot_delimiter": "\n\n",
917
+ "fewshot_config": {
918
+ "sampler": "first_n"
919
+ },
920
+ "metric_list": [
921
+ {
922
+ "metric": "acc",
923
+ "aggregation": "mean",
924
+ "higher_is_better": true
925
+ }
926
+ ],
927
+ "output_type": "multiple_choice",
928
+ "repeats": 1,
929
+ "should_decontaminate": false,
930
+ "metadata": {
931
+ "version": 0.0
932
+ }
933
+ },
934
+ "mmlu_formal_logic": {
935
+ "task": "mmlu_formal_logic",
936
+ "task_alias": "formal_logic",
937
+ "group": "mmlu_humanities",
938
+ "group_alias": "humanities",
939
+ "dataset_path": "hails/mmlu_no_train",
940
+ "dataset_name": "formal_logic",
941
+ "test_split": "test",
942
+ "fewshot_split": "dev",
943
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
944
+ "doc_to_target": "answer",
945
+ "doc_to_choice": [
946
+ "A",
947
+ "B",
948
+ "C",
949
+ "D"
950
+ ],
951
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
952
+ "target_delimiter": " ",
953
+ "fewshot_delimiter": "\n\n",
954
+ "fewshot_config": {
955
+ "sampler": "first_n"
956
+ },
957
+ "metric_list": [
958
+ {
959
+ "metric": "acc",
960
+ "aggregation": "mean",
961
+ "higher_is_better": true
962
+ }
963
+ ],
964
+ "output_type": "multiple_choice",
965
+ "repeats": 1,
966
+ "should_decontaminate": false,
967
+ "metadata": {
968
+ "version": 0.0
969
+ }
970
+ },
971
+ "mmlu_global_facts": {
972
+ "task": "mmlu_global_facts",
973
+ "task_alias": "global_facts",
974
+ "group": "mmlu_other",
975
+ "group_alias": "other",
976
+ "dataset_path": "hails/mmlu_no_train",
977
+ "dataset_name": "global_facts",
978
+ "test_split": "test",
979
+ "fewshot_split": "dev",
980
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
981
+ "doc_to_target": "answer",
982
+ "doc_to_choice": [
983
+ "A",
984
+ "B",
985
+ "C",
986
+ "D"
987
+ ],
988
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
989
+ "target_delimiter": " ",
990
+ "fewshot_delimiter": "\n\n",
991
+ "fewshot_config": {
992
+ "sampler": "first_n"
993
+ },
994
+ "metric_list": [
995
+ {
996
+ "metric": "acc",
997
+ "aggregation": "mean",
998
+ "higher_is_better": true
999
+ }
1000
+ ],
1001
+ "output_type": "multiple_choice",
1002
+ "repeats": 1,
1003
+ "should_decontaminate": false,
1004
+ "metadata": {
1005
+ "version": 0.0
1006
+ }
1007
+ },
1008
+ "mmlu_high_school_biology": {
1009
+ "task": "mmlu_high_school_biology",
1010
+ "task_alias": "high_school_biology",
1011
+ "group": "mmlu_stem",
1012
+ "group_alias": "stem",
1013
+ "dataset_path": "hails/mmlu_no_train",
1014
+ "dataset_name": "high_school_biology",
1015
+ "test_split": "test",
1016
+ "fewshot_split": "dev",
1017
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1018
+ "doc_to_target": "answer",
1019
+ "doc_to_choice": [
1020
+ "A",
1021
+ "B",
1022
+ "C",
1023
+ "D"
1024
+ ],
1025
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
1026
+ "target_delimiter": " ",
1027
+ "fewshot_delimiter": "\n\n",
1028
+ "fewshot_config": {
1029
+ "sampler": "first_n"
1030
+ },
1031
+ "metric_list": [
1032
+ {
1033
+ "metric": "acc",
1034
+ "aggregation": "mean",
1035
+ "higher_is_better": true
1036
+ }
1037
+ ],
1038
+ "output_type": "multiple_choice",
1039
+ "repeats": 1,
1040
+ "should_decontaminate": false,
1041
+ "metadata": {
1042
+ "version": 0.0
1043
+ }
1044
+ },
1045
+ "mmlu_high_school_chemistry": {
1046
+ "task": "mmlu_high_school_chemistry",
1047
+ "task_alias": "high_school_chemistry",
1048
+ "group": "mmlu_stem",
1049
+ "group_alias": "stem",
1050
+ "dataset_path": "hails/mmlu_no_train",
1051
+ "dataset_name": "high_school_chemistry",
1052
+ "test_split": "test",
1053
+ "fewshot_split": "dev",
1054
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1055
+ "doc_to_target": "answer",
1056
+ "doc_to_choice": [
1057
+ "A",
1058
+ "B",
1059
+ "C",
1060
+ "D"
1061
+ ],
1062
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
1063
+ "target_delimiter": " ",
1064
+ "fewshot_delimiter": "\n\n",
1065
+ "fewshot_config": {
1066
+ "sampler": "first_n"
1067
+ },
1068
+ "metric_list": [
1069
+ {
1070
+ "metric": "acc",
1071
+ "aggregation": "mean",
1072
+ "higher_is_better": true
1073
+ }
1074
+ ],
1075
+ "output_type": "multiple_choice",
1076
+ "repeats": 1,
1077
+ "should_decontaminate": false,
1078
+ "metadata": {
1079
+ "version": 0.0
1080
+ }
1081
+ },
1082
+ "mmlu_high_school_computer_science": {
1083
+ "task": "mmlu_high_school_computer_science",
1084
+ "task_alias": "high_school_computer_science",
1085
+ "group": "mmlu_stem",
1086
+ "group_alias": "stem",
1087
+ "dataset_path": "hails/mmlu_no_train",
1088
+ "dataset_name": "high_school_computer_science",
1089
+ "test_split": "test",
1090
+ "fewshot_split": "dev",
1091
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1092
+ "doc_to_target": "answer",
1093
+ "doc_to_choice": [
1094
+ "A",
1095
+ "B",
1096
+ "C",
1097
+ "D"
1098
+ ],
1099
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
1100
+ "target_delimiter": " ",
1101
+ "fewshot_delimiter": "\n\n",
1102
+ "fewshot_config": {
1103
+ "sampler": "first_n"
1104
+ },
1105
+ "metric_list": [
1106
+ {
1107
+ "metric": "acc",
1108
+ "aggregation": "mean",
1109
+ "higher_is_better": true
1110
+ }
1111
+ ],
1112
+ "output_type": "multiple_choice",
1113
+ "repeats": 1,
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 0.0
1117
+ }
1118
+ },
1119
+ "mmlu_high_school_european_history": {
1120
+ "task": "mmlu_high_school_european_history",
1121
+ "task_alias": "high_school_european_history",
1122
+ "group": "mmlu_humanities",
1123
+ "group_alias": "humanities",
1124
+ "dataset_path": "hails/mmlu_no_train",
1125
+ "dataset_name": "high_school_european_history",
1126
+ "test_split": "test",
1127
+ "fewshot_split": "dev",
1128
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1129
+ "doc_to_target": "answer",
1130
+ "doc_to_choice": [
1131
+ "A",
1132
+ "B",
1133
+ "C",
1134
+ "D"
1135
+ ],
1136
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
1137
+ "target_delimiter": " ",
1138
+ "fewshot_delimiter": "\n\n",
1139
+ "fewshot_config": {
1140
+ "sampler": "first_n"
1141
+ },
1142
+ "metric_list": [
1143
+ {
1144
+ "metric": "acc",
1145
+ "aggregation": "mean",
1146
+ "higher_is_better": true
1147
+ }
1148
+ ],
1149
+ "output_type": "multiple_choice",
1150
+ "repeats": 1,
1151
+ "should_decontaminate": false,
1152
+ "metadata": {
1153
+ "version": 0.0
1154
+ }
1155
+ },
1156
+ "mmlu_high_school_geography": {
1157
+ "task": "mmlu_high_school_geography",
1158
+ "task_alias": "high_school_geography",
1159
+ "group": "mmlu_social_sciences",
1160
+ "group_alias": "social_sciences",
1161
+ "dataset_path": "hails/mmlu_no_train",
1162
+ "dataset_name": "high_school_geography",
1163
+ "test_split": "test",
1164
+ "fewshot_split": "dev",
1165
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1166
+ "doc_to_target": "answer",
1167
+ "doc_to_choice": [
1168
+ "A",
1169
+ "B",
1170
+ "C",
1171
+ "D"
1172
+ ],
1173
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
1174
+ "target_delimiter": " ",
1175
+ "fewshot_delimiter": "\n\n",
1176
+ "fewshot_config": {
1177
+ "sampler": "first_n"
1178
+ },
1179
+ "metric_list": [
1180
+ {
1181
+ "metric": "acc",
1182
+ "aggregation": "mean",
1183
+ "higher_is_better": true
1184
+ }
1185
+ ],
1186
+ "output_type": "multiple_choice",
1187
+ "repeats": 1,
1188
+ "should_decontaminate": false,
1189
+ "metadata": {
1190
+ "version": 0.0
1191
+ }
1192
+ },
1193
+ "mmlu_high_school_government_and_politics": {
1194
+ "task": "mmlu_high_school_government_and_politics",
1195
+ "task_alias": "high_school_government_and_politics",
1196
+ "group": "mmlu_social_sciences",
1197
+ "group_alias": "social_sciences",
1198
+ "dataset_path": "hails/mmlu_no_train",
1199
+ "dataset_name": "high_school_government_and_politics",
1200
+ "test_split": "test",
1201
+ "fewshot_split": "dev",
1202
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1203
+ "doc_to_target": "answer",
1204
+ "doc_to_choice": [
1205
+ "A",
1206
+ "B",
1207
+ "C",
1208
+ "D"
1209
+ ],
1210
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
1211
+ "target_delimiter": " ",
1212
+ "fewshot_delimiter": "\n\n",
1213
+ "fewshot_config": {
1214
+ "sampler": "first_n"
1215
+ },
1216
+ "metric_list": [
1217
+ {
1218
+ "metric": "acc",
1219
+ "aggregation": "mean",
1220
+ "higher_is_better": true
1221
+ }
1222
+ ],
1223
+ "output_type": "multiple_choice",
1224
+ "repeats": 1,
1225
+ "should_decontaminate": false,
1226
+ "metadata": {
1227
+ "version": 0.0
1228
+ }
1229
+ },
1230
+ "mmlu_high_school_macroeconomics": {
1231
+ "task": "mmlu_high_school_macroeconomics",
1232
+ "task_alias": "high_school_macroeconomics",
1233
+ "group": "mmlu_social_sciences",
1234
+ "group_alias": "social_sciences",
1235
+ "dataset_path": "hails/mmlu_no_train",
1236
+ "dataset_name": "high_school_macroeconomics",
1237
+ "test_split": "test",
1238
+ "fewshot_split": "dev",
1239
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1240
+ "doc_to_target": "answer",
1241
+ "doc_to_choice": [
1242
+ "A",
1243
+ "B",
1244
+ "C",
1245
+ "D"
1246
+ ],
1247
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
1248
+ "target_delimiter": " ",
1249
+ "fewshot_delimiter": "\n\n",
1250
+ "fewshot_config": {
1251
+ "sampler": "first_n"
1252
+ },
1253
+ "metric_list": [
1254
+ {
1255
+ "metric": "acc",
1256
+ "aggregation": "mean",
1257
+ "higher_is_better": true
1258
+ }
1259
+ ],
1260
+ "output_type": "multiple_choice",
1261
+ "repeats": 1,
1262
+ "should_decontaminate": false,
1263
+ "metadata": {
1264
+ "version": 0.0
1265
+ }
1266
+ },
1267
+ "mmlu_high_school_mathematics": {
1268
+ "task": "mmlu_high_school_mathematics",
1269
+ "task_alias": "high_school_mathematics",
1270
+ "group": "mmlu_stem",
1271
+ "group_alias": "stem",
1272
+ "dataset_path": "hails/mmlu_no_train",
1273
+ "dataset_name": "high_school_mathematics",
1274
+ "test_split": "test",
1275
+ "fewshot_split": "dev",
1276
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1277
+ "doc_to_target": "answer",
1278
+ "doc_to_choice": [
1279
+ "A",
1280
+ "B",
1281
+ "C",
1282
+ "D"
1283
+ ],
1284
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
1285
+ "target_delimiter": " ",
1286
+ "fewshot_delimiter": "\n\n",
1287
+ "fewshot_config": {
1288
+ "sampler": "first_n"
1289
+ },
1290
+ "metric_list": [
1291
+ {
1292
+ "metric": "acc",
1293
+ "aggregation": "mean",
1294
+ "higher_is_better": true
1295
+ }
1296
+ ],
1297
+ "output_type": "multiple_choice",
1298
+ "repeats": 1,
1299
+ "should_decontaminate": false,
1300
+ "metadata": {
1301
+ "version": 0.0
1302
+ }
1303
+ },
1304
+ "mmlu_high_school_microeconomics": {
1305
+ "task": "mmlu_high_school_microeconomics",
1306
+ "task_alias": "high_school_microeconomics",
1307
+ "group": "mmlu_social_sciences",
1308
+ "group_alias": "social_sciences",
1309
+ "dataset_path": "hails/mmlu_no_train",
1310
+ "dataset_name": "high_school_microeconomics",
1311
+ "test_split": "test",
1312
+ "fewshot_split": "dev",
1313
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1314
+ "doc_to_target": "answer",
1315
+ "doc_to_choice": [
1316
+ "A",
1317
+ "B",
1318
+ "C",
1319
+ "D"
1320
+ ],
1321
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
1322
+ "target_delimiter": " ",
1323
+ "fewshot_delimiter": "\n\n",
1324
+ "fewshot_config": {
1325
+ "sampler": "first_n"
1326
+ },
1327
+ "metric_list": [
1328
+ {
1329
+ "metric": "acc",
1330
+ "aggregation": "mean",
1331
+ "higher_is_better": true
1332
+ }
1333
+ ],
1334
+ "output_type": "multiple_choice",
1335
+ "repeats": 1,
1336
+ "should_decontaminate": false,
1337
+ "metadata": {
1338
+ "version": 0.0
1339
+ }
1340
+ },
1341
+ "mmlu_high_school_physics": {
1342
+ "task": "mmlu_high_school_physics",
1343
+ "task_alias": "high_school_physics",
1344
+ "group": "mmlu_stem",
1345
+ "group_alias": "stem",
1346
+ "dataset_path": "hails/mmlu_no_train",
1347
+ "dataset_name": "high_school_physics",
1348
+ "test_split": "test",
1349
+ "fewshot_split": "dev",
1350
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1351
+ "doc_to_target": "answer",
1352
+ "doc_to_choice": [
1353
+ "A",
1354
+ "B",
1355
+ "C",
1356
+ "D"
1357
+ ],
1358
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
1359
+ "target_delimiter": " ",
1360
+ "fewshot_delimiter": "\n\n",
1361
+ "fewshot_config": {
1362
+ "sampler": "first_n"
1363
+ },
1364
+ "metric_list": [
1365
+ {
1366
+ "metric": "acc",
1367
+ "aggregation": "mean",
1368
+ "higher_is_better": true
1369
+ }
1370
+ ],
1371
+ "output_type": "multiple_choice",
1372
+ "repeats": 1,
1373
+ "should_decontaminate": false,
1374
+ "metadata": {
1375
+ "version": 0.0
1376
+ }
1377
+ },
1378
+ "mmlu_high_school_psychology": {
1379
+ "task": "mmlu_high_school_psychology",
1380
+ "task_alias": "high_school_psychology",
1381
+ "group": "mmlu_social_sciences",
1382
+ "group_alias": "social_sciences",
1383
+ "dataset_path": "hails/mmlu_no_train",
1384
+ "dataset_name": "high_school_psychology",
1385
+ "test_split": "test",
1386
+ "fewshot_split": "dev",
1387
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1388
+ "doc_to_target": "answer",
1389
+ "doc_to_choice": [
1390
+ "A",
1391
+ "B",
1392
+ "C",
1393
+ "D"
1394
+ ],
1395
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
1396
+ "target_delimiter": " ",
1397
+ "fewshot_delimiter": "\n\n",
1398
+ "fewshot_config": {
1399
+ "sampler": "first_n"
1400
+ },
1401
+ "metric_list": [
1402
+ {
1403
+ "metric": "acc",
1404
+ "aggregation": "mean",
1405
+ "higher_is_better": true
1406
+ }
1407
+ ],
1408
+ "output_type": "multiple_choice",
1409
+ "repeats": 1,
1410
+ "should_decontaminate": false,
1411
+ "metadata": {
1412
+ "version": 0.0
1413
+ }
1414
+ },
1415
+ "mmlu_high_school_statistics": {
1416
+ "task": "mmlu_high_school_statistics",
1417
+ "task_alias": "high_school_statistics",
1418
+ "group": "mmlu_stem",
1419
+ "group_alias": "stem",
1420
+ "dataset_path": "hails/mmlu_no_train",
1421
+ "dataset_name": "high_school_statistics",
1422
+ "test_split": "test",
1423
+ "fewshot_split": "dev",
1424
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1425
+ "doc_to_target": "answer",
1426
+ "doc_to_choice": [
1427
+ "A",
1428
+ "B",
1429
+ "C",
1430
+ "D"
1431
+ ],
1432
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
1433
+ "target_delimiter": " ",
1434
+ "fewshot_delimiter": "\n\n",
1435
+ "fewshot_config": {
1436
+ "sampler": "first_n"
1437
+ },
1438
+ "metric_list": [
1439
+ {
1440
+ "metric": "acc",
1441
+ "aggregation": "mean",
1442
+ "higher_is_better": true
1443
+ }
1444
+ ],
1445
+ "output_type": "multiple_choice",
1446
+ "repeats": 1,
1447
+ "should_decontaminate": false,
1448
+ "metadata": {
1449
+ "version": 0.0
1450
+ }
1451
+ },
1452
+ "mmlu_high_school_us_history": {
1453
+ "task": "mmlu_high_school_us_history",
1454
+ "task_alias": "high_school_us_history",
1455
+ "group": "mmlu_humanities",
1456
+ "group_alias": "humanities",
1457
+ "dataset_path": "hails/mmlu_no_train",
1458
+ "dataset_name": "high_school_us_history",
1459
+ "test_split": "test",
1460
+ "fewshot_split": "dev",
1461
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1462
+ "doc_to_target": "answer",
1463
+ "doc_to_choice": [
1464
+ "A",
1465
+ "B",
1466
+ "C",
1467
+ "D"
1468
+ ],
1469
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
1470
+ "target_delimiter": " ",
1471
+ "fewshot_delimiter": "\n\n",
1472
+ "fewshot_config": {
1473
+ "sampler": "first_n"
1474
+ },
1475
+ "metric_list": [
1476
+ {
1477
+ "metric": "acc",
1478
+ "aggregation": "mean",
1479
+ "higher_is_better": true
1480
+ }
1481
+ ],
1482
+ "output_type": "multiple_choice",
1483
+ "repeats": 1,
1484
+ "should_decontaminate": false,
1485
+ "metadata": {
1486
+ "version": 0.0
1487
+ }
1488
+ },
1489
+ "mmlu_high_school_world_history": {
1490
+ "task": "mmlu_high_school_world_history",
1491
+ "task_alias": "high_school_world_history",
1492
+ "group": "mmlu_humanities",
1493
+ "group_alias": "humanities",
1494
+ "dataset_path": "hails/mmlu_no_train",
1495
+ "dataset_name": "high_school_world_history",
1496
+ "test_split": "test",
1497
+ "fewshot_split": "dev",
1498
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1499
+ "doc_to_target": "answer",
1500
+ "doc_to_choice": [
1501
+ "A",
1502
+ "B",
1503
+ "C",
1504
+ "D"
1505
+ ],
1506
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
1507
+ "target_delimiter": " ",
1508
+ "fewshot_delimiter": "\n\n",
1509
+ "fewshot_config": {
1510
+ "sampler": "first_n"
1511
+ },
1512
+ "metric_list": [
1513
+ {
1514
+ "metric": "acc",
1515
+ "aggregation": "mean",
1516
+ "higher_is_better": true
1517
+ }
1518
+ ],
1519
+ "output_type": "multiple_choice",
1520
+ "repeats": 1,
1521
+ "should_decontaminate": false,
1522
+ "metadata": {
1523
+ "version": 0.0
1524
+ }
1525
+ },
1526
+ "mmlu_human_aging": {
1527
+ "task": "mmlu_human_aging",
1528
+ "task_alias": "human_aging",
1529
+ "group": "mmlu_other",
1530
+ "group_alias": "other",
1531
+ "dataset_path": "hails/mmlu_no_train",
1532
+ "dataset_name": "human_aging",
1533
+ "test_split": "test",
1534
+ "fewshot_split": "dev",
1535
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1536
+ "doc_to_target": "answer",
1537
+ "doc_to_choice": [
1538
+ "A",
1539
+ "B",
1540
+ "C",
1541
+ "D"
1542
+ ],
1543
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
1544
+ "target_delimiter": " ",
1545
+ "fewshot_delimiter": "\n\n",
1546
+ "fewshot_config": {
1547
+ "sampler": "first_n"
1548
+ },
1549
+ "metric_list": [
1550
+ {
1551
+ "metric": "acc",
1552
+ "aggregation": "mean",
1553
+ "higher_is_better": true
1554
+ }
1555
+ ],
1556
+ "output_type": "multiple_choice",
1557
+ "repeats": 1,
1558
+ "should_decontaminate": false,
1559
+ "metadata": {
1560
+ "version": 0.0
1561
+ }
1562
+ },
1563
+ "mmlu_human_sexuality": {
1564
+ "task": "mmlu_human_sexuality",
1565
+ "task_alias": "human_sexuality",
1566
+ "group": "mmlu_social_sciences",
1567
+ "group_alias": "social_sciences",
1568
+ "dataset_path": "hails/mmlu_no_train",
1569
+ "dataset_name": "human_sexuality",
1570
+ "test_split": "test",
1571
+ "fewshot_split": "dev",
1572
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1573
+ "doc_to_target": "answer",
1574
+ "doc_to_choice": [
1575
+ "A",
1576
+ "B",
1577
+ "C",
1578
+ "D"
1579
+ ],
1580
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
1581
+ "target_delimiter": " ",
1582
+ "fewshot_delimiter": "\n\n",
1583
+ "fewshot_config": {
1584
+ "sampler": "first_n"
1585
+ },
1586
+ "metric_list": [
1587
+ {
1588
+ "metric": "acc",
1589
+ "aggregation": "mean",
1590
+ "higher_is_better": true
1591
+ }
1592
+ ],
1593
+ "output_type": "multiple_choice",
1594
+ "repeats": 1,
1595
+ "should_decontaminate": false,
1596
+ "metadata": {
1597
+ "version": 0.0
1598
+ }
1599
+ },
1600
+ "mmlu_international_law": {
1601
+ "task": "mmlu_international_law",
1602
+ "task_alias": "international_law",
1603
+ "group": "mmlu_humanities",
1604
+ "group_alias": "humanities",
1605
+ "dataset_path": "hails/mmlu_no_train",
1606
+ "dataset_name": "international_law",
1607
+ "test_split": "test",
1608
+ "fewshot_split": "dev",
1609
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1610
+ "doc_to_target": "answer",
1611
+ "doc_to_choice": [
1612
+ "A",
1613
+ "B",
1614
+ "C",
1615
+ "D"
1616
+ ],
1617
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
1618
+ "target_delimiter": " ",
1619
+ "fewshot_delimiter": "\n\n",
1620
+ "fewshot_config": {
1621
+ "sampler": "first_n"
1622
+ },
1623
+ "metric_list": [
1624
+ {
1625
+ "metric": "acc",
1626
+ "aggregation": "mean",
1627
+ "higher_is_better": true
1628
+ }
1629
+ ],
1630
+ "output_type": "multiple_choice",
1631
+ "repeats": 1,
1632
+ "should_decontaminate": false,
1633
+ "metadata": {
1634
+ "version": 0.0
1635
+ }
1636
+ },
1637
+ "mmlu_jurisprudence": {
1638
+ "task": "mmlu_jurisprudence",
1639
+ "task_alias": "jurisprudence",
1640
+ "group": "mmlu_humanities",
1641
+ "group_alias": "humanities",
1642
+ "dataset_path": "hails/mmlu_no_train",
1643
+ "dataset_name": "jurisprudence",
1644
+ "test_split": "test",
1645
+ "fewshot_split": "dev",
1646
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1647
+ "doc_to_target": "answer",
1648
+ "doc_to_choice": [
1649
+ "A",
1650
+ "B",
1651
+ "C",
1652
+ "D"
1653
+ ],
1654
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
1655
+ "target_delimiter": " ",
1656
+ "fewshot_delimiter": "\n\n",
1657
+ "fewshot_config": {
1658
+ "sampler": "first_n"
1659
+ },
1660
+ "metric_list": [
1661
+ {
1662
+ "metric": "acc",
1663
+ "aggregation": "mean",
1664
+ "higher_is_better": true
1665
+ }
1666
+ ],
1667
+ "output_type": "multiple_choice",
1668
+ "repeats": 1,
1669
+ "should_decontaminate": false,
1670
+ "metadata": {
1671
+ "version": 0.0
1672
+ }
1673
+ },
1674
+ "mmlu_logical_fallacies": {
1675
+ "task": "mmlu_logical_fallacies",
1676
+ "task_alias": "logical_fallacies",
1677
+ "group": "mmlu_humanities",
1678
+ "group_alias": "humanities",
1679
+ "dataset_path": "hails/mmlu_no_train",
1680
+ "dataset_name": "logical_fallacies",
1681
+ "test_split": "test",
1682
+ "fewshot_split": "dev",
1683
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1684
+ "doc_to_target": "answer",
1685
+ "doc_to_choice": [
1686
+ "A",
1687
+ "B",
1688
+ "C",
1689
+ "D"
1690
+ ],
1691
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
1692
+ "target_delimiter": " ",
1693
+ "fewshot_delimiter": "\n\n",
1694
+ "fewshot_config": {
1695
+ "sampler": "first_n"
1696
+ },
1697
+ "metric_list": [
1698
+ {
1699
+ "metric": "acc",
1700
+ "aggregation": "mean",
1701
+ "higher_is_better": true
1702
+ }
1703
+ ],
1704
+ "output_type": "multiple_choice",
1705
+ "repeats": 1,
1706
+ "should_decontaminate": false,
1707
+ "metadata": {
1708
+ "version": 0.0
1709
+ }
1710
+ },
1711
+ "mmlu_machine_learning": {
1712
+ "task": "mmlu_machine_learning",
1713
+ "task_alias": "machine_learning",
1714
+ "group": "mmlu_stem",
1715
+ "group_alias": "stem",
1716
+ "dataset_path": "hails/mmlu_no_train",
1717
+ "dataset_name": "machine_learning",
1718
+ "test_split": "test",
1719
+ "fewshot_split": "dev",
1720
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1721
+ "doc_to_target": "answer",
1722
+ "doc_to_choice": [
1723
+ "A",
1724
+ "B",
1725
+ "C",
1726
+ "D"
1727
+ ],
1728
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
1729
+ "target_delimiter": " ",
1730
+ "fewshot_delimiter": "\n\n",
1731
+ "fewshot_config": {
1732
+ "sampler": "first_n"
1733
+ },
1734
+ "metric_list": [
1735
+ {
1736
+ "metric": "acc",
1737
+ "aggregation": "mean",
1738
+ "higher_is_better": true
1739
+ }
1740
+ ],
1741
+ "output_type": "multiple_choice",
1742
+ "repeats": 1,
1743
+ "should_decontaminate": false,
1744
+ "metadata": {
1745
+ "version": 0.0
1746
+ }
1747
+ },
1748
+ "mmlu_management": {
1749
+ "task": "mmlu_management",
1750
+ "task_alias": "management",
1751
+ "group": "mmlu_other",
1752
+ "group_alias": "other",
1753
+ "dataset_path": "hails/mmlu_no_train",
1754
+ "dataset_name": "management",
1755
+ "test_split": "test",
1756
+ "fewshot_split": "dev",
1757
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1758
+ "doc_to_target": "answer",
1759
+ "doc_to_choice": [
1760
+ "A",
1761
+ "B",
1762
+ "C",
1763
+ "D"
1764
+ ],
1765
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
1766
+ "target_delimiter": " ",
1767
+ "fewshot_delimiter": "\n\n",
1768
+ "fewshot_config": {
1769
+ "sampler": "first_n"
1770
+ },
1771
+ "metric_list": [
1772
+ {
1773
+ "metric": "acc",
1774
+ "aggregation": "mean",
1775
+ "higher_is_better": true
1776
+ }
1777
+ ],
1778
+ "output_type": "multiple_choice",
1779
+ "repeats": 1,
1780
+ "should_decontaminate": false,
1781
+ "metadata": {
1782
+ "version": 0.0
1783
+ }
1784
+ },
1785
+ "mmlu_marketing": {
1786
+ "task": "mmlu_marketing",
1787
+ "task_alias": "marketing",
1788
+ "group": "mmlu_other",
1789
+ "group_alias": "other",
1790
+ "dataset_path": "hails/mmlu_no_train",
1791
+ "dataset_name": "marketing",
1792
+ "test_split": "test",
1793
+ "fewshot_split": "dev",
1794
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1795
+ "doc_to_target": "answer",
1796
+ "doc_to_choice": [
1797
+ "A",
1798
+ "B",
1799
+ "C",
1800
+ "D"
1801
+ ],
1802
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
1803
+ "target_delimiter": " ",
1804
+ "fewshot_delimiter": "\n\n",
1805
+ "fewshot_config": {
1806
+ "sampler": "first_n"
1807
+ },
1808
+ "metric_list": [
1809
+ {
1810
+ "metric": "acc",
1811
+ "aggregation": "mean",
1812
+ "higher_is_better": true
1813
+ }
1814
+ ],
1815
+ "output_type": "multiple_choice",
1816
+ "repeats": 1,
1817
+ "should_decontaminate": false,
1818
+ "metadata": {
1819
+ "version": 0.0
1820
+ }
1821
+ },
1822
+ "mmlu_medical_genetics": {
1823
+ "task": "mmlu_medical_genetics",
1824
+ "task_alias": "medical_genetics",
1825
+ "group": "mmlu_other",
1826
+ "group_alias": "other",
1827
+ "dataset_path": "hails/mmlu_no_train",
1828
+ "dataset_name": "medical_genetics",
1829
+ "test_split": "test",
1830
+ "fewshot_split": "dev",
1831
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1832
+ "doc_to_target": "answer",
1833
+ "doc_to_choice": [
1834
+ "A",
1835
+ "B",
1836
+ "C",
1837
+ "D"
1838
+ ],
1839
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
1840
+ "target_delimiter": " ",
1841
+ "fewshot_delimiter": "\n\n",
1842
+ "fewshot_config": {
1843
+ "sampler": "first_n"
1844
+ },
1845
+ "metric_list": [
1846
+ {
1847
+ "metric": "acc",
1848
+ "aggregation": "mean",
1849
+ "higher_is_better": true
1850
+ }
1851
+ ],
1852
+ "output_type": "multiple_choice",
1853
+ "repeats": 1,
1854
+ "should_decontaminate": false,
1855
+ "metadata": {
1856
+ "version": 0.0
1857
+ }
1858
+ },
1859
+ "mmlu_miscellaneous": {
1860
+ "task": "mmlu_miscellaneous",
1861
+ "task_alias": "miscellaneous",
1862
+ "group": "mmlu_other",
1863
+ "group_alias": "other",
1864
+ "dataset_path": "hails/mmlu_no_train",
1865
+ "dataset_name": "miscellaneous",
1866
+ "test_split": "test",
1867
+ "fewshot_split": "dev",
1868
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1869
+ "doc_to_target": "answer",
1870
+ "doc_to_choice": [
1871
+ "A",
1872
+ "B",
1873
+ "C",
1874
+ "D"
1875
+ ],
1876
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
1877
+ "target_delimiter": " ",
1878
+ "fewshot_delimiter": "\n\n",
1879
+ "fewshot_config": {
1880
+ "sampler": "first_n"
1881
+ },
1882
+ "metric_list": [
1883
+ {
1884
+ "metric": "acc",
1885
+ "aggregation": "mean",
1886
+ "higher_is_better": true
1887
+ }
1888
+ ],
1889
+ "output_type": "multiple_choice",
1890
+ "repeats": 1,
1891
+ "should_decontaminate": false,
1892
+ "metadata": {
1893
+ "version": 0.0
1894
+ }
1895
+ },
1896
+ "mmlu_moral_disputes": {
1897
+ "task": "mmlu_moral_disputes",
1898
+ "task_alias": "moral_disputes",
1899
+ "group": "mmlu_humanities",
1900
+ "group_alias": "humanities",
1901
+ "dataset_path": "hails/mmlu_no_train",
1902
+ "dataset_name": "moral_disputes",
1903
+ "test_split": "test",
1904
+ "fewshot_split": "dev",
1905
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1906
+ "doc_to_target": "answer",
1907
+ "doc_to_choice": [
1908
+ "A",
1909
+ "B",
1910
+ "C",
1911
+ "D"
1912
+ ],
1913
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
1914
+ "target_delimiter": " ",
1915
+ "fewshot_delimiter": "\n\n",
1916
+ "fewshot_config": {
1917
+ "sampler": "first_n"
1918
+ },
1919
+ "metric_list": [
1920
+ {
1921
+ "metric": "acc",
1922
+ "aggregation": "mean",
1923
+ "higher_is_better": true
1924
+ }
1925
+ ],
1926
+ "output_type": "multiple_choice",
1927
+ "repeats": 1,
1928
+ "should_decontaminate": false,
1929
+ "metadata": {
1930
+ "version": 0.0
1931
+ }
1932
+ },
1933
+ "mmlu_moral_scenarios": {
1934
+ "task": "mmlu_moral_scenarios",
1935
+ "task_alias": "moral_scenarios",
1936
+ "group": "mmlu_humanities",
1937
+ "group_alias": "humanities",
1938
+ "dataset_path": "hails/mmlu_no_train",
1939
+ "dataset_name": "moral_scenarios",
1940
+ "test_split": "test",
1941
+ "fewshot_split": "dev",
1942
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1943
+ "doc_to_target": "answer",
1944
+ "doc_to_choice": [
1945
+ "A",
1946
+ "B",
1947
+ "C",
1948
+ "D"
1949
+ ],
1950
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
1951
+ "target_delimiter": " ",
1952
+ "fewshot_delimiter": "\n\n",
1953
+ "fewshot_config": {
1954
+ "sampler": "first_n"
1955
+ },
1956
+ "metric_list": [
1957
+ {
1958
+ "metric": "acc",
1959
+ "aggregation": "mean",
1960
+ "higher_is_better": true
1961
+ }
1962
+ ],
1963
+ "output_type": "multiple_choice",
1964
+ "repeats": 1,
1965
+ "should_decontaminate": false,
1966
+ "metadata": {
1967
+ "version": 0.0
1968
+ }
1969
+ },
1970
+ "mmlu_nutrition": {
1971
+ "task": "mmlu_nutrition",
1972
+ "task_alias": "nutrition",
1973
+ "group": "mmlu_other",
1974
+ "group_alias": "other",
1975
+ "dataset_path": "hails/mmlu_no_train",
1976
+ "dataset_name": "nutrition",
1977
+ "test_split": "test",
1978
+ "fewshot_split": "dev",
1979
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1980
+ "doc_to_target": "answer",
1981
+ "doc_to_choice": [
1982
+ "A",
1983
+ "B",
1984
+ "C",
1985
+ "D"
1986
+ ],
1987
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
1988
+ "target_delimiter": " ",
1989
+ "fewshot_delimiter": "\n\n",
1990
+ "fewshot_config": {
1991
+ "sampler": "first_n"
1992
+ },
1993
+ "metric_list": [
1994
+ {
1995
+ "metric": "acc",
1996
+ "aggregation": "mean",
1997
+ "higher_is_better": true
1998
+ }
1999
+ ],
2000
+ "output_type": "multiple_choice",
2001
+ "repeats": 1,
2002
+ "should_decontaminate": false,
2003
+ "metadata": {
2004
+ "version": 0.0
2005
+ }
2006
+ },
2007
+ "mmlu_philosophy": {
2008
+ "task": "mmlu_philosophy",
2009
+ "task_alias": "philosophy",
2010
+ "group": "mmlu_humanities",
2011
+ "group_alias": "humanities",
2012
+ "dataset_path": "hails/mmlu_no_train",
2013
+ "dataset_name": "philosophy",
2014
+ "test_split": "test",
2015
+ "fewshot_split": "dev",
2016
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2017
+ "doc_to_target": "answer",
2018
+ "doc_to_choice": [
2019
+ "A",
2020
+ "B",
2021
+ "C",
2022
+ "D"
2023
+ ],
2024
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
2025
+ "target_delimiter": " ",
2026
+ "fewshot_delimiter": "\n\n",
2027
+ "fewshot_config": {
2028
+ "sampler": "first_n"
2029
+ },
2030
+ "metric_list": [
2031
+ {
2032
+ "metric": "acc",
2033
+ "aggregation": "mean",
2034
+ "higher_is_better": true
2035
+ }
2036
+ ],
2037
+ "output_type": "multiple_choice",
2038
+ "repeats": 1,
2039
+ "should_decontaminate": false,
2040
+ "metadata": {
2041
+ "version": 0.0
2042
+ }
2043
+ },
2044
+ "mmlu_prehistory": {
2045
+ "task": "mmlu_prehistory",
2046
+ "task_alias": "prehistory",
2047
+ "group": "mmlu_humanities",
2048
+ "group_alias": "humanities",
2049
+ "dataset_path": "hails/mmlu_no_train",
2050
+ "dataset_name": "prehistory",
2051
+ "test_split": "test",
2052
+ "fewshot_split": "dev",
2053
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2054
+ "doc_to_target": "answer",
2055
+ "doc_to_choice": [
2056
+ "A",
2057
+ "B",
2058
+ "C",
2059
+ "D"
2060
+ ],
2061
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
2062
+ "target_delimiter": " ",
2063
+ "fewshot_delimiter": "\n\n",
2064
+ "fewshot_config": {
2065
+ "sampler": "first_n"
2066
+ },
2067
+ "metric_list": [
2068
+ {
2069
+ "metric": "acc",
2070
+ "aggregation": "mean",
2071
+ "higher_is_better": true
2072
+ }
2073
+ ],
2074
+ "output_type": "multiple_choice",
2075
+ "repeats": 1,
2076
+ "should_decontaminate": false,
2077
+ "metadata": {
2078
+ "version": 0.0
2079
+ }
2080
+ },
2081
+ "mmlu_professional_accounting": {
2082
+ "task": "mmlu_professional_accounting",
2083
+ "task_alias": "professional_accounting",
2084
+ "group": "mmlu_other",
2085
+ "group_alias": "other",
2086
+ "dataset_path": "hails/mmlu_no_train",
2087
+ "dataset_name": "professional_accounting",
2088
+ "test_split": "test",
2089
+ "fewshot_split": "dev",
2090
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2091
+ "doc_to_target": "answer",
2092
+ "doc_to_choice": [
2093
+ "A",
2094
+ "B",
2095
+ "C",
2096
+ "D"
2097
+ ],
2098
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
2099
+ "target_delimiter": " ",
2100
+ "fewshot_delimiter": "\n\n",
2101
+ "fewshot_config": {
2102
+ "sampler": "first_n"
2103
+ },
2104
+ "metric_list": [
2105
+ {
2106
+ "metric": "acc",
2107
+ "aggregation": "mean",
2108
+ "higher_is_better": true
2109
+ }
2110
+ ],
2111
+ "output_type": "multiple_choice",
2112
+ "repeats": 1,
2113
+ "should_decontaminate": false,
2114
+ "metadata": {
2115
+ "version": 0.0
2116
+ }
2117
+ },
2118
+ "mmlu_professional_law": {
2119
+ "task": "mmlu_professional_law",
2120
+ "task_alias": "professional_law",
2121
+ "group": "mmlu_humanities",
2122
+ "group_alias": "humanities",
2123
+ "dataset_path": "hails/mmlu_no_train",
2124
+ "dataset_name": "professional_law",
2125
+ "test_split": "test",
2126
+ "fewshot_split": "dev",
2127
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2128
+ "doc_to_target": "answer",
2129
+ "doc_to_choice": [
2130
+ "A",
2131
+ "B",
2132
+ "C",
2133
+ "D"
2134
+ ],
2135
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
2136
+ "target_delimiter": " ",
2137
+ "fewshot_delimiter": "\n\n",
2138
+ "fewshot_config": {
2139
+ "sampler": "first_n"
2140
+ },
2141
+ "metric_list": [
2142
+ {
2143
+ "metric": "acc",
2144
+ "aggregation": "mean",
2145
+ "higher_is_better": true
2146
+ }
2147
+ ],
2148
+ "output_type": "multiple_choice",
2149
+ "repeats": 1,
2150
+ "should_decontaminate": false,
2151
+ "metadata": {
2152
+ "version": 0.0
2153
+ }
2154
+ },
2155
+ "mmlu_professional_medicine": {
2156
+ "task": "mmlu_professional_medicine",
2157
+ "task_alias": "professional_medicine",
2158
+ "group": "mmlu_other",
2159
+ "group_alias": "other",
2160
+ "dataset_path": "hails/mmlu_no_train",
2161
+ "dataset_name": "professional_medicine",
2162
+ "test_split": "test",
2163
+ "fewshot_split": "dev",
2164
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2165
+ "doc_to_target": "answer",
2166
+ "doc_to_choice": [
2167
+ "A",
2168
+ "B",
2169
+ "C",
2170
+ "D"
2171
+ ],
2172
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
2173
+ "target_delimiter": " ",
2174
+ "fewshot_delimiter": "\n\n",
2175
+ "fewshot_config": {
2176
+ "sampler": "first_n"
2177
+ },
2178
+ "metric_list": [
2179
+ {
2180
+ "metric": "acc",
2181
+ "aggregation": "mean",
2182
+ "higher_is_better": true
2183
+ }
2184
+ ],
2185
+ "output_type": "multiple_choice",
2186
+ "repeats": 1,
2187
+ "should_decontaminate": false,
2188
+ "metadata": {
2189
+ "version": 0.0
2190
+ }
2191
+ },
2192
+ "mmlu_professional_psychology": {
2193
+ "task": "mmlu_professional_psychology",
2194
+ "task_alias": "professional_psychology",
2195
+ "group": "mmlu_social_sciences",
2196
+ "group_alias": "social_sciences",
2197
+ "dataset_path": "hails/mmlu_no_train",
2198
+ "dataset_name": "professional_psychology",
2199
+ "test_split": "test",
2200
+ "fewshot_split": "dev",
2201
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2202
+ "doc_to_target": "answer",
2203
+ "doc_to_choice": [
2204
+ "A",
2205
+ "B",
2206
+ "C",
2207
+ "D"
2208
+ ],
2209
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
2210
+ "target_delimiter": " ",
2211
+ "fewshot_delimiter": "\n\n",
2212
+ "fewshot_config": {
2213
+ "sampler": "first_n"
2214
+ },
2215
+ "metric_list": [
2216
+ {
2217
+ "metric": "acc",
2218
+ "aggregation": "mean",
2219
+ "higher_is_better": true
2220
+ }
2221
+ ],
2222
+ "output_type": "multiple_choice",
2223
+ "repeats": 1,
2224
+ "should_decontaminate": false,
2225
+ "metadata": {
2226
+ "version": 0.0
2227
+ }
2228
+ },
2229
+ "mmlu_public_relations": {
2230
+ "task": "mmlu_public_relations",
2231
+ "task_alias": "public_relations",
2232
+ "group": "mmlu_social_sciences",
2233
+ "group_alias": "social_sciences",
2234
+ "dataset_path": "hails/mmlu_no_train",
2235
+ "dataset_name": "public_relations",
2236
+ "test_split": "test",
2237
+ "fewshot_split": "dev",
2238
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2239
+ "doc_to_target": "answer",
2240
+ "doc_to_choice": [
2241
+ "A",
2242
+ "B",
2243
+ "C",
2244
+ "D"
2245
+ ],
2246
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
2247
+ "target_delimiter": " ",
2248
+ "fewshot_delimiter": "\n\n",
2249
+ "fewshot_config": {
2250
+ "sampler": "first_n"
2251
+ },
2252
+ "metric_list": [
2253
+ {
2254
+ "metric": "acc",
2255
+ "aggregation": "mean",
2256
+ "higher_is_better": true
2257
+ }
2258
+ ],
2259
+ "output_type": "multiple_choice",
2260
+ "repeats": 1,
2261
+ "should_decontaminate": false,
2262
+ "metadata": {
2263
+ "version": 0.0
2264
+ }
2265
+ },
2266
+ "mmlu_security_studies": {
2267
+ "task": "mmlu_security_studies",
2268
+ "task_alias": "security_studies",
2269
+ "group": "mmlu_social_sciences",
2270
+ "group_alias": "social_sciences",
2271
+ "dataset_path": "hails/mmlu_no_train",
2272
+ "dataset_name": "security_studies",
2273
+ "test_split": "test",
2274
+ "fewshot_split": "dev",
2275
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2276
+ "doc_to_target": "answer",
2277
+ "doc_to_choice": [
2278
+ "A",
2279
+ "B",
2280
+ "C",
2281
+ "D"
2282
+ ],
2283
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
2284
+ "target_delimiter": " ",
2285
+ "fewshot_delimiter": "\n\n",
2286
+ "fewshot_config": {
2287
+ "sampler": "first_n"
2288
+ },
2289
+ "metric_list": [
2290
+ {
2291
+ "metric": "acc",
2292
+ "aggregation": "mean",
2293
+ "higher_is_better": true
2294
+ }
2295
+ ],
2296
+ "output_type": "multiple_choice",
2297
+ "repeats": 1,
2298
+ "should_decontaminate": false,
2299
+ "metadata": {
2300
+ "version": 0.0
2301
+ }
2302
+ },
2303
+ "mmlu_sociology": {
2304
+ "task": "mmlu_sociology",
2305
+ "task_alias": "sociology",
2306
+ "group": "mmlu_social_sciences",
2307
+ "group_alias": "social_sciences",
2308
+ "dataset_path": "hails/mmlu_no_train",
2309
+ "dataset_name": "sociology",
2310
+ "test_split": "test",
2311
+ "fewshot_split": "dev",
2312
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2313
+ "doc_to_target": "answer",
2314
+ "doc_to_choice": [
2315
+ "A",
2316
+ "B",
2317
+ "C",
2318
+ "D"
2319
+ ],
2320
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
2321
+ "target_delimiter": " ",
2322
+ "fewshot_delimiter": "\n\n",
2323
+ "fewshot_config": {
2324
+ "sampler": "first_n"
2325
+ },
2326
+ "metric_list": [
2327
+ {
2328
+ "metric": "acc",
2329
+ "aggregation": "mean",
2330
+ "higher_is_better": true
2331
+ }
2332
+ ],
2333
+ "output_type": "multiple_choice",
2334
+ "repeats": 1,
2335
+ "should_decontaminate": false,
2336
+ "metadata": {
2337
+ "version": 0.0
2338
+ }
2339
+ },
2340
+ "mmlu_us_foreign_policy": {
2341
+ "task": "mmlu_us_foreign_policy",
2342
+ "task_alias": "us_foreign_policy",
2343
+ "group": "mmlu_social_sciences",
2344
+ "group_alias": "social_sciences",
2345
+ "dataset_path": "hails/mmlu_no_train",
2346
+ "dataset_name": "us_foreign_policy",
2347
+ "test_split": "test",
2348
+ "fewshot_split": "dev",
2349
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2350
+ "doc_to_target": "answer",
2351
+ "doc_to_choice": [
2352
+ "A",
2353
+ "B",
2354
+ "C",
2355
+ "D"
2356
+ ],
2357
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
2358
+ "target_delimiter": " ",
2359
+ "fewshot_delimiter": "\n\n",
2360
+ "fewshot_config": {
2361
+ "sampler": "first_n"
2362
+ },
2363
+ "metric_list": [
2364
+ {
2365
+ "metric": "acc",
2366
+ "aggregation": "mean",
2367
+ "higher_is_better": true
2368
+ }
2369
+ ],
2370
+ "output_type": "multiple_choice",
2371
+ "repeats": 1,
2372
+ "should_decontaminate": false,
2373
+ "metadata": {
2374
+ "version": 0.0
2375
+ }
2376
+ },
2377
+ "mmlu_virology": {
2378
+ "task": "mmlu_virology",
2379
+ "task_alias": "virology",
2380
+ "group": "mmlu_other",
2381
+ "group_alias": "other",
2382
+ "dataset_path": "hails/mmlu_no_train",
2383
+ "dataset_name": "virology",
2384
+ "test_split": "test",
2385
+ "fewshot_split": "dev",
2386
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2387
+ "doc_to_target": "answer",
2388
+ "doc_to_choice": [
2389
+ "A",
2390
+ "B",
2391
+ "C",
2392
+ "D"
2393
+ ],
2394
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
2395
+ "target_delimiter": " ",
2396
+ "fewshot_delimiter": "\n\n",
2397
+ "fewshot_config": {
2398
+ "sampler": "first_n"
2399
+ },
2400
+ "metric_list": [
2401
+ {
2402
+ "metric": "acc",
2403
+ "aggregation": "mean",
2404
+ "higher_is_better": true
2405
+ }
2406
+ ],
2407
+ "output_type": "multiple_choice",
2408
+ "repeats": 1,
2409
+ "should_decontaminate": false,
2410
+ "metadata": {
2411
+ "version": 0.0
2412
+ }
2413
+ },
2414
+ "mmlu_world_religions": {
2415
+ "task": "mmlu_world_religions",
2416
+ "task_alias": "world_religions",
2417
+ "group": "mmlu_humanities",
2418
+ "group_alias": "humanities",
2419
+ "dataset_path": "hails/mmlu_no_train",
2420
+ "dataset_name": "world_religions",
2421
+ "test_split": "test",
2422
+ "fewshot_split": "dev",
2423
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2424
+ "doc_to_target": "answer",
2425
+ "doc_to_choice": [
2426
+ "A",
2427
+ "B",
2428
+ "C",
2429
+ "D"
2430
+ ],
2431
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
2432
+ "target_delimiter": " ",
2433
+ "fewshot_delimiter": "\n\n",
2434
+ "fewshot_config": {
2435
+ "sampler": "first_n"
2436
+ },
2437
+ "metric_list": [
2438
+ {
2439
+ "metric": "acc",
2440
+ "aggregation": "mean",
2441
+ "higher_is_better": true
2442
+ }
2443
+ ],
2444
+ "output_type": "multiple_choice",
2445
+ "repeats": 1,
2446
+ "should_decontaminate": false,
2447
+ "metadata": {
2448
+ "version": 0.0
2449
+ }
2450
+ }
2451
+ },
2452
+ "versions": {
2453
+ "mmlu": "N/A",
2454
+ "mmlu_abstract_algebra": 0.0,
2455
+ "mmlu_anatomy": 0.0,
2456
+ "mmlu_astronomy": 0.0,
2457
+ "mmlu_business_ethics": 0.0,
2458
+ "mmlu_clinical_knowledge": 0.0,
2459
+ "mmlu_college_biology": 0.0,
2460
+ "mmlu_college_chemistry": 0.0,
2461
+ "mmlu_college_computer_science": 0.0,
2462
+ "mmlu_college_mathematics": 0.0,
2463
+ "mmlu_college_medicine": 0.0,
2464
+ "mmlu_college_physics": 0.0,
2465
+ "mmlu_computer_security": 0.0,
2466
+ "mmlu_conceptual_physics": 0.0,
2467
+ "mmlu_econometrics": 0.0,
2468
+ "mmlu_electrical_engineering": 0.0,
2469
+ "mmlu_elementary_mathematics": 0.0,
2470
+ "mmlu_formal_logic": 0.0,
2471
+ "mmlu_global_facts": 0.0,
2472
+ "mmlu_high_school_biology": 0.0,
2473
+ "mmlu_high_school_chemistry": 0.0,
2474
+ "mmlu_high_school_computer_science": 0.0,
2475
+ "mmlu_high_school_european_history": 0.0,
2476
+ "mmlu_high_school_geography": 0.0,
2477
+ "mmlu_high_school_government_and_politics": 0.0,
2478
+ "mmlu_high_school_macroeconomics": 0.0,
2479
+ "mmlu_high_school_mathematics": 0.0,
2480
+ "mmlu_high_school_microeconomics": 0.0,
2481
+ "mmlu_high_school_physics": 0.0,
2482
+ "mmlu_high_school_psychology": 0.0,
2483
+ "mmlu_high_school_statistics": 0.0,
2484
+ "mmlu_high_school_us_history": 0.0,
2485
+ "mmlu_high_school_world_history": 0.0,
2486
+ "mmlu_human_aging": 0.0,
2487
+ "mmlu_human_sexuality": 0.0,
2488
+ "mmlu_humanities": "N/A",
2489
+ "mmlu_international_law": 0.0,
2490
+ "mmlu_jurisprudence": 0.0,
2491
+ "mmlu_logical_fallacies": 0.0,
2492
+ "mmlu_machine_learning": 0.0,
2493
+ "mmlu_management": 0.0,
2494
+ "mmlu_marketing": 0.0,
2495
+ "mmlu_medical_genetics": 0.0,
2496
+ "mmlu_miscellaneous": 0.0,
2497
+ "mmlu_moral_disputes": 0.0,
2498
+ "mmlu_moral_scenarios": 0.0,
2499
+ "mmlu_nutrition": 0.0,
2500
+ "mmlu_other": "N/A",
2501
+ "mmlu_philosophy": 0.0,
2502
+ "mmlu_prehistory": 0.0,
2503
+ "mmlu_professional_accounting": 0.0,
2504
+ "mmlu_professional_law": 0.0,
2505
+ "mmlu_professional_medicine": 0.0,
2506
+ "mmlu_professional_psychology": 0.0,
2507
+ "mmlu_public_relations": 0.0,
2508
+ "mmlu_security_studies": 0.0,
2509
+ "mmlu_social_sciences": "N/A",
2510
+ "mmlu_sociology": 0.0,
2511
+ "mmlu_stem": "N/A",
2512
+ "mmlu_us_foreign_policy": 0.0,
2513
+ "mmlu_virology": 0.0,
2514
+ "mmlu_world_religions": 0.0
2515
+ },
2516
+ "n-shot": {
2517
+ "mmlu": 0,
2518
+ "mmlu_abstract_algebra": 0,
2519
+ "mmlu_anatomy": 0,
2520
+ "mmlu_astronomy": 0,
2521
+ "mmlu_business_ethics": 0,
2522
+ "mmlu_clinical_knowledge": 0,
2523
+ "mmlu_college_biology": 0,
2524
+ "mmlu_college_chemistry": 0,
2525
+ "mmlu_college_computer_science": 0,
2526
+ "mmlu_college_mathematics": 0,
2527
+ "mmlu_college_medicine": 0,
2528
+ "mmlu_college_physics": 0,
2529
+ "mmlu_computer_security": 0,
2530
+ "mmlu_conceptual_physics": 0,
2531
+ "mmlu_econometrics": 0,
2532
+ "mmlu_electrical_engineering": 0,
2533
+ "mmlu_elementary_mathematics": 0,
2534
+ "mmlu_formal_logic": 0,
2535
+ "mmlu_global_facts": 0,
2536
+ "mmlu_high_school_biology": 0,
2537
+ "mmlu_high_school_chemistry": 0,
2538
+ "mmlu_high_school_computer_science": 0,
2539
+ "mmlu_high_school_european_history": 0,
2540
+ "mmlu_high_school_geography": 0,
2541
+ "mmlu_high_school_government_and_politics": 0,
2542
+ "mmlu_high_school_macroeconomics": 0,
2543
+ "mmlu_high_school_mathematics": 0,
2544
+ "mmlu_high_school_microeconomics": 0,
2545
+ "mmlu_high_school_physics": 0,
2546
+ "mmlu_high_school_psychology": 0,
2547
+ "mmlu_high_school_statistics": 0,
2548
+ "mmlu_high_school_us_history": 0,
2549
+ "mmlu_high_school_world_history": 0,
2550
+ "mmlu_human_aging": 0,
2551
+ "mmlu_human_sexuality": 0,
2552
+ "mmlu_humanities": 0,
2553
+ "mmlu_international_law": 0,
2554
+ "mmlu_jurisprudence": 0,
2555
+ "mmlu_logical_fallacies": 0,
2556
+ "mmlu_machine_learning": 0,
2557
+ "mmlu_management": 0,
2558
+ "mmlu_marketing": 0,
2559
+ "mmlu_medical_genetics": 0,
2560
+ "mmlu_miscellaneous": 0,
2561
+ "mmlu_moral_disputes": 0,
2562
+ "mmlu_moral_scenarios": 0,
2563
+ "mmlu_nutrition": 0,
2564
+ "mmlu_other": 0,
2565
+ "mmlu_philosophy": 0,
2566
+ "mmlu_prehistory": 0,
2567
+ "mmlu_professional_accounting": 0,
2568
+ "mmlu_professional_law": 0,
2569
+ "mmlu_professional_medicine": 0,
2570
+ "mmlu_professional_psychology": 0,
2571
+ "mmlu_public_relations": 0,
2572
+ "mmlu_security_studies": 0,
2573
+ "mmlu_social_sciences": 0,
2574
+ "mmlu_sociology": 0,
2575
+ "mmlu_stem": 0,
2576
+ "mmlu_us_foreign_policy": 0,
2577
+ "mmlu_virology": 0,
2578
+ "mmlu_world_religions": 0
2579
+ },
2580
+ "config": {
2581
+ "model": "hf",
2582
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
2583
+ "batch_size": "auto",
2584
+ "batch_sizes": [
2585
+ 16
2586
+ ],
2587
+ "device": null,
2588
+ "use_cache": null,
2589
+ "limit": null,
2590
+ "bootstrap_iters": 100000,
2591
+ "gen_kwargs": null
2592
+ },
2593
+ "git_hash": "8281e96"
2594
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aeacbad31b10462e38468f9a41add94a358c97bb183815bad4bcbb2404c77f2
3
+ size 118654
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "openbookqa": {
4
+ "acc,none": 0.304,
5
+ "acc_stderr,none": 0.02059164957122493,
6
+ "acc_norm,none": 0.414,
7
+ "acc_norm_stderr,none": 0.022049497969827865,
8
+ "alias": "openbookqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "openbookqa": {
13
+ "task": "openbookqa",
14
+ "dataset_path": "openbookqa",
15
+ "dataset_name": "main",
16
+ "training_split": "train",
17
+ "validation_split": "validation",
18
+ "test_split": "test",
19
+ "doc_to_text": "question_stem",
20
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
21
+ "doc_to_choice": "{{choices.text}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "metric_list": [
26
+ {
27
+ "metric": "acc",
28
+ "aggregation": "mean",
29
+ "higher_is_better": true
30
+ },
31
+ {
32
+ "metric": "acc_norm",
33
+ "aggregation": "mean",
34
+ "higher_is_better": true
35
+ }
36
+ ],
37
+ "output_type": "multiple_choice",
38
+ "repeats": 1,
39
+ "should_decontaminate": true,
40
+ "doc_to_decontamination_query": "question_stem",
41
+ "metadata": {
42
+ "version": 1.0
43
+ }
44
+ }
45
+ },
46
+ "versions": {
47
+ "openbookqa": 1.0
48
+ },
49
+ "n-shot": {
50
+ "openbookqa": 0
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
55
+ "batch_size": "auto",
56
+ "batch_sizes": [
57
+ 64
58
+ ],
59
+ "device": null,
60
+ "use_cache": null,
61
+ "limit": null,
62
+ "bootstrap_iters": 100000,
63
+ "gen_kwargs": null
64
+ },
65
+ "git_hash": "8281e96"
66
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4835323f43b6730c5a32cc797bcbc0fff8d451550e77a3b5179f8b1659cd8e32
3
+ size 43388
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "pawsx": {
4
+ "acc,none": 0.4712142857142857,
5
+ "acc_stderr,none": 0.060879199130928825,
6
+ "alias": "pawsx"
7
+ },
8
+ "paws_de": {
9
+ "acc,none": 0.403,
10
+ "acc_stderr,none": 0.010970673536247517,
11
+ "alias": " - paws_de"
12
+ },
13
+ "paws_en": {
14
+ "acc,none": 0.37,
15
+ "acc_stderr,none": 0.01079853249993165,
16
+ "alias": " - paws_en"
17
+ },
18
+ "paws_es": {
19
+ "acc,none": 0.392,
20
+ "acc_stderr,none": 0.01091913979244253,
21
+ "alias": " - paws_es"
22
+ },
23
+ "paws_fr": {
24
+ "acc,none": 0.548,
25
+ "acc_stderr,none": 0.011131484850525779,
26
+ "alias": " - paws_fr"
27
+ },
28
+ "paws_ja": {
29
+ "acc,none": 0.543,
30
+ "acc_stderr,none": 0.011141704034140802,
31
+ "alias": " - paws_ja"
32
+ },
33
+ "paws_ko": {
34
+ "acc,none": 0.524,
35
+ "acc_stderr,none": 0.011170245619215438,
36
+ "alias": " - paws_ko"
37
+ },
38
+ "paws_zh": {
39
+ "acc,none": 0.5185,
40
+ "acc_stderr,none": 0.011175478542788577,
41
+ "alias": " - paws_zh"
42
+ }
43
+ },
44
+ "groups": {
45
+ "pawsx": {
46
+ "acc,none": 0.4712142857142857,
47
+ "acc_stderr,none": 0.060879199130928825,
48
+ "alias": "pawsx"
49
+ }
50
+ },
51
+ "configs": {
52
+ "paws_de": {
53
+ "task": "paws_de",
54
+ "group": "pawsx",
55
+ "dataset_path": "paws-x",
56
+ "dataset_name": "de",
57
+ "training_split": "train",
58
+ "validation_split": "validation",
59
+ "test_split": "test",
60
+ "doc_to_text": "",
61
+ "doc_to_target": "label",
62
+ "doc_to_choice": "{{[sentence1+\", richtig? Ja, \"+sentence2, sentence1+\", richtig? Nein, \"+sentence2]}}",
63
+ "description": "",
64
+ "target_delimiter": " ",
65
+ "fewshot_delimiter": "\n\n",
66
+ "metric_list": [
67
+ {
68
+ "metric": "acc",
69
+ "aggregation": "mean",
70
+ "higher_is_better": true
71
+ }
72
+ ],
73
+ "output_type": "multiple_choice",
74
+ "repeats": 1,
75
+ "should_decontaminate": false,
76
+ "metadata": {
77
+ "version": 0.0
78
+ }
79
+ },
80
+ "paws_en": {
81
+ "task": "paws_en",
82
+ "group": "pawsx",
83
+ "dataset_path": "paws-x",
84
+ "dataset_name": "en",
85
+ "training_split": "train",
86
+ "validation_split": "validation",
87
+ "test_split": "test",
88
+ "doc_to_text": "",
89
+ "doc_to_target": "label",
90
+ "doc_to_choice": "{{[sentence1+\", right? Yes, \"+sentence2, sentence1+\", right? No, \"+sentence2]}}",
91
+ "description": "",
92
+ "target_delimiter": " ",
93
+ "fewshot_delimiter": "\n\n",
94
+ "metric_list": [
95
+ {
96
+ "metric": "acc",
97
+ "aggregation": "mean",
98
+ "higher_is_better": true
99
+ }
100
+ ],
101
+ "output_type": "multiple_choice",
102
+ "repeats": 1,
103
+ "should_decontaminate": false,
104
+ "metadata": {
105
+ "version": 0.0
106
+ }
107
+ },
108
+ "paws_es": {
109
+ "task": "paws_es",
110
+ "group": "pawsx",
111
+ "dataset_path": "paws-x",
112
+ "dataset_name": "es",
113
+ "training_split": "train",
114
+ "validation_split": "validation",
115
+ "test_split": "test",
116
+ "doc_to_text": "",
117
+ "doc_to_target": "label",
118
+ "doc_to_choice": "{{[sentence1+\", verdad? Sí, \"+sentence2, sentence1+\", verdad? No, \"+sentence2]}}",
119
+ "description": "",
120
+ "target_delimiter": " ",
121
+ "fewshot_delimiter": "\n\n",
122
+ "metric_list": [
123
+ {
124
+ "metric": "acc",
125
+ "aggregation": "mean",
126
+ "higher_is_better": true
127
+ }
128
+ ],
129
+ "output_type": "multiple_choice",
130
+ "repeats": 1,
131
+ "should_decontaminate": false,
132
+ "metadata": {
133
+ "version": 0.0
134
+ }
135
+ },
136
+ "paws_fr": {
137
+ "task": "paws_fr",
138
+ "group": "pawsx",
139
+ "dataset_path": "paws-x",
140
+ "dataset_name": "fr",
141
+ "training_split": "train",
142
+ "validation_split": "validation",
143
+ "test_split": "test",
144
+ "doc_to_text": "",
145
+ "doc_to_target": "label",
146
+ "doc_to_choice": "{{[sentence1+\", n'est-ce pas? Oui, \"+sentence2, sentence1+\", n'est-ce pas? No, \"+sentence2]}}",
147
+ "description": "",
148
+ "target_delimiter": " ",
149
+ "fewshot_delimiter": "\n\n",
150
+ "metric_list": [
151
+ {
152
+ "metric": "acc",
153
+ "aggregation": "mean",
154
+ "higher_is_better": true
155
+ }
156
+ ],
157
+ "output_type": "multiple_choice",
158
+ "repeats": 1,
159
+ "should_decontaminate": false,
160
+ "metadata": {
161
+ "version": 0.0
162
+ }
163
+ },
164
+ "paws_ja": {
165
+ "task": "paws_ja",
166
+ "group": "pawsx",
167
+ "dataset_path": "paws-x",
168
+ "dataset_name": "ja",
169
+ "training_split": "train",
170
+ "validation_split": "validation",
171
+ "test_split": "test",
172
+ "doc_to_text": "",
173
+ "doc_to_target": "label",
174
+ "doc_to_choice": "{{[sentence1+\", ですね? はい, \"+sentence2, sentence1+\", ですね? いいえ, \"+sentence2]}}",
175
+ "description": "",
176
+ "target_delimiter": " ",
177
+ "fewshot_delimiter": "\n\n",
178
+ "metric_list": [
179
+ {
180
+ "metric": "acc",
181
+ "aggregation": "mean",
182
+ "higher_is_better": true
183
+ }
184
+ ],
185
+ "output_type": "multiple_choice",
186
+ "repeats": 1,
187
+ "should_decontaminate": false,
188
+ "metadata": {
189
+ "version": 0.0
190
+ }
191
+ },
192
+ "paws_ko": {
193
+ "task": "paws_ko",
194
+ "group": "pawsx",
195
+ "dataset_path": "paws-x",
196
+ "dataset_name": "ko",
197
+ "training_split": "train",
198
+ "validation_split": "validation",
199
+ "test_split": "test",
200
+ "doc_to_text": "",
201
+ "doc_to_target": "label",
202
+ "doc_to_choice": "{{[sentence1+\", 맞죠? 예, \"+sentence2, sentence1+\", 맞죠? 아니요, \"+sentence2]}}",
203
+ "description": "",
204
+ "target_delimiter": " ",
205
+ "fewshot_delimiter": "\n\n",
206
+ "metric_list": [
207
+ {
208
+ "metric": "acc",
209
+ "aggregation": "mean",
210
+ "higher_is_better": true
211
+ }
212
+ ],
213
+ "output_type": "multiple_choice",
214
+ "repeats": 1,
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 0.0
218
+ }
219
+ },
220
+ "paws_zh": {
221
+ "task": "paws_zh",
222
+ "group": "pawsx",
223
+ "dataset_path": "paws-x",
224
+ "dataset_name": "zh",
225
+ "training_split": "train",
226
+ "validation_split": "validation",
227
+ "test_split": "test",
228
+ "doc_to_text": "",
229
+ "doc_to_target": "label",
230
+ "doc_to_choice": "{{[sentence1+\", 对吧? 是, \"+sentence2, sentence1+\", 对吧? 不是, \"+sentence2]}}",
231
+ "description": "",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "metric_list": [
235
+ {
236
+ "metric": "acc",
237
+ "aggregation": "mean",
238
+ "higher_is_better": true
239
+ }
240
+ ],
241
+ "output_type": "multiple_choice",
242
+ "repeats": 1,
243
+ "should_decontaminate": false,
244
+ "metadata": {
245
+ "version": 0.0
246
+ }
247
+ }
248
+ },
249
+ "versions": {
250
+ "paws_de": 0.0,
251
+ "paws_en": 0.0,
252
+ "paws_es": 0.0,
253
+ "paws_fr": 0.0,
254
+ "paws_ja": 0.0,
255
+ "paws_ko": 0.0,
256
+ "paws_zh": 0.0,
257
+ "pawsx": "N/A"
258
+ },
259
+ "n-shot": {
260
+ "paws_de": 0,
261
+ "paws_en": 0,
262
+ "paws_es": 0,
263
+ "paws_fr": 0,
264
+ "paws_ja": 0,
265
+ "paws_ko": 0,
266
+ "paws_zh": 0,
267
+ "pawsx": 0
268
+ },
269
+ "config": {
270
+ "model": "hf",
271
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
272
+ "batch_size": "auto",
273
+ "batch_sizes": [
274
+ 64
275
+ ],
276
+ "device": null,
277
+ "use_cache": null,
278
+ "limit": null,
279
+ "bootstrap_iters": 100000,
280
+ "gen_kwargs": null
281
+ },
282
+ "git_hash": "8281e96"
283
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7ac1bbc0d95f9acf8b2a00c09e83cc57c516631bb35e21852925ede775f8ea4
3
+ size 50070
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "piqa": {
4
+ "acc,none": 0.764961915125136,
5
+ "acc_stderr,none": 0.009893146688805324,
6
+ "acc_norm,none": 0.7747551686615887,
7
+ "acc_norm_stderr,none": 0.009746643471032148,
8
+ "alias": "piqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "piqa": {
13
+ "task": "piqa",
14
+ "dataset_path": "piqa",
15
+ "training_split": "train",
16
+ "validation_split": "validation",
17
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
18
+ "doc_to_target": "label",
19
+ "doc_to_choice": "{{[sol1, sol2]}}",
20
+ "description": "",
21
+ "target_delimiter": " ",
22
+ "fewshot_delimiter": "\n\n",
23
+ "metric_list": [
24
+ {
25
+ "metric": "acc",
26
+ "aggregation": "mean",
27
+ "higher_is_better": true
28
+ },
29
+ {
30
+ "metric": "acc_norm",
31
+ "aggregation": "mean",
32
+ "higher_is_better": true
33
+ }
34
+ ],
35
+ "output_type": "multiple_choice",
36
+ "repeats": 1,
37
+ "should_decontaminate": true,
38
+ "doc_to_decontamination_query": "goal",
39
+ "metadata": {
40
+ "version": 1.0
41
+ }
42
+ }
43
+ },
44
+ "versions": {
45
+ "piqa": 1.0
46
+ },
47
+ "n-shot": {
48
+ "piqa": 0
49
+ },
50
+ "config": {
51
+ "model": "hf",
52
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
53
+ "batch_size": "auto",
54
+ "batch_sizes": [
55
+ 64
56
+ ],
57
+ "device": null,
58
+ "use_cache": null,
59
+ "limit": null,
60
+ "bootstrap_iters": 100000,
61
+ "gen_kwargs": null
62
+ },
63
+ "git_hash": "8281e96"
64
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7b3444a90fd722a1c7a63a6c27b749192774c34e3e761be19f563a7fcb2078d
3
+ size 42150
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/sciq/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "sciq": {
4
+ "acc,none": 0.952,
5
+ "acc_stderr,none": 0.006763264133666682,
6
+ "acc_norm,none": 0.919,
7
+ "acc_norm_stderr,none": 0.008632121032139946,
8
+ "alias": "sciq"
9
+ }
10
+ },
11
+ "configs": {
12
+ "sciq": {
13
+ "task": "sciq",
14
+ "dataset_path": "sciq",
15
+ "training_split": "train",
16
+ "validation_split": "validation",
17
+ "test_split": "test",
18
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
19
+ "doc_to_target": 3,
20
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
21
+ "description": "",
22
+ "target_delimiter": " ",
23
+ "fewshot_delimiter": "\n\n",
24
+ "metric_list": [
25
+ {
26
+ "metric": "acc",
27
+ "aggregation": "mean",
28
+ "higher_is_better": true
29
+ },
30
+ {
31
+ "metric": "acc_norm",
32
+ "aggregation": "mean",
33
+ "higher_is_better": true
34
+ }
35
+ ],
36
+ "output_type": "multiple_choice",
37
+ "repeats": 1,
38
+ "should_decontaminate": true,
39
+ "doc_to_decontamination_query": "{{support}} {{question}}",
40
+ "metadata": {
41
+ "version": 1.0
42
+ }
43
+ }
44
+ },
45
+ "versions": {
46
+ "sciq": 1.0
47
+ },
48
+ "n-shot": {
49
+ "sciq": 0
50
+ },
51
+ "config": {
52
+ "model": "hf",
53
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
54
+ "batch_size": "auto",
55
+ "batch_sizes": [
56
+ 16
57
+ ],
58
+ "device": null,
59
+ "use_cache": null,
60
+ "limit": null,
61
+ "bootstrap_iters": 100000,
62
+ "gen_kwargs": null
63
+ },
64
+ "git_hash": "8281e96"
65
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/sciq/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8895bc6bc5b9c3db76a109263be8745d171e1b2c023a02d9b25b7ab23e4b3ea8
3
+ size 43599
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/winogrande/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "winogrande": {
4
+ "acc,none": 0.6748224151539068,
5
+ "acc_stderr,none": 0.013165525471764358,
6
+ "alias": "winogrande"
7
+ }
8
+ },
9
+ "configs": {
10
+ "winogrande": {
11
+ "task": "winogrande",
12
+ "dataset_path": "winogrande",
13
+ "dataset_name": "winogrande_xl",
14
+ "training_split": "train",
15
+ "validation_split": "validation",
16
+ "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
17
+ "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
18
+ "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
19
+ "description": "",
20
+ "target_delimiter": " ",
21
+ "fewshot_delimiter": "\n\n",
22
+ "metric_list": [
23
+ {
24
+ "metric": "acc",
25
+ "aggregation": "mean",
26
+ "higher_is_better": true
27
+ }
28
+ ],
29
+ "output_type": "multiple_choice",
30
+ "repeats": 1,
31
+ "should_decontaminate": true,
32
+ "doc_to_decontamination_query": "sentence",
33
+ "metadata": {
34
+ "version": 1.0
35
+ }
36
+ }
37
+ },
38
+ "versions": {
39
+ "winogrande": 1.0
40
+ },
41
+ "n-shot": {
42
+ "winogrande": 0
43
+ },
44
+ "config": {
45
+ "model": "hf",
46
+ "model_args": "pretrained=./rwkv-x-dev/R4-7B-15t-No-Mask_pth,dtype=bfloat16,trust_remote_code=True",
47
+ "batch_size": "auto",
48
+ "batch_sizes": [
49
+ 64
50
+ ],
51
+ "device": null,
52
+ "use_cache": null,
53
+ "limit": null,
54
+ "bootstrap_iters": 100000,
55
+ "gen_kwargs": null
56
+ },
57
+ "git_hash": "8281e96"
58
+ }
lm-eval-output/rwkv-x-dev/R4-7B-15t-No-Mask/winogrande/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3c14d782580d2dd1b7ccea691743c0a8a42c6381d2346ce3f0811b4e19af39b
3
+ size 42933
lm-eval-output/rwkv-x-dev/R4-no-shuffle-rwkv-53/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "record": {
4
+ "f1,none": 0.27631047641932965,
5
+ "f1_stderr,none": 0.004434293873068706,
6
+ "em,none": 0.267,
7
+ "em_stderr,none": 0.004424144810664585,
8
+ "alias": "record"
9
+ }
10
+ },
11
+ "configs": {
12
+ "record": {
13
+ "task": "record",
14
+ "group": [
15
+ "super-glue-lm-eval-v1"
16
+ ],
17
+ "dataset_path": "super_glue",
18
+ "dataset_name": "record",
19
+ "training_split": "train",
20
+ "validation_split": "validation",
21
+ "doc_to_text": "def doc_to_text(doc):\n initial_text, *highlights = doc[\"passage\"].strip().split(\"\\n@highlight\\n\")\n text = initial_text + \"\\n\\n\"\n for highlight in highlights:\n text += f\" - {highlight}.\\n\"\n return text\n",
22
+ "doc_to_target": "{{answers}}",
23
+ "doc_to_choice": "{{entities}}",
24
+ "process_results": "def process_results(doc, results):\n # ReCoRD's evaluation is actually deceptively simple:\n # - Pick the maximum likelihood prediction entity\n # - Evaluate the accuracy and token F1 PER EXAMPLE\n # - Average over all examples\n max_idx = np.argmax(np.array([result[0] for result in results]))\n\n prediction = doc[\"entities\"][max_idx]\n gold_label_set = doc[\"answers\"]\n f1 = metric_max_over_ground_truths(\n squad_metrics.compute_f1, prediction, gold_label_set\n )\n em = metric_max_over_ground_truths(\n squad_metrics.compute_exact, prediction, gold_label_set\n )\n\n return {\n \"f1\": f1,\n \"em\": em,\n }\n",
25
+ "description": "",
26
+ "target_delimiter": " ",
27
+ "fewshot_delimiter": "\n\n",
28
+ "metric_list": [
29
+ {
30
+ "metric": "f1",
31
+ "aggregation": "mean"
32
+ },
33
+ {
34
+ "metric": "em",
35
+ "higher_is_better": true,
36
+ "aggregation": "mean"
37
+ }
38
+ ],
39
+ "output_type": "multiple_choice",
40
+ "repeats": 1,
41
+ "should_decontaminate": false,
42
+ "metadata": {
43
+ "version": 1.0
44
+ }
45
+ }
46
+ },
47
+ "versions": {
48
+ "record": 1.0
49
+ },
50
+ "n-shot": {
51
+ "record": 0
52
+ },
53
+ "config": {
54
+ "model": "hf",
55
+ "model_args": "pretrained=./rwkv-x-dev/R4-no-shuffle-rwkv-53_pth,dtype=bfloat16,trust_remote_code=True",
56
+ "batch_size": "auto",
57
+ "batch_sizes": [
58
+ 16
59
+ ],
60
+ "device": null,
61
+ "use_cache": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": null
65
+ },
66
+ "git_hash": "8281e96"
67
+ }
lm-eval-output/rwkv-x-dev/R4-no-shuffle-rwkv-53/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8885f811aa8866f500f16e1c8e6a2ece868c11c4164d2c4300c39c5fc1bfc632
3
+ size 99307