ggbetz commited on
Commit
878305f
1 Parent(s): 5fe97aa

Upload results for model google/gemma-2b (#39)

Browse files

- Upload results for model google/gemma-2b (ce1fbfeb9821da7193cb6402a3fc1a108741ce62)

data/google/gemma-2b/base/24-03-17-01:14:20.json ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "eligendi-nam-4797_logiqa2_base": {
4
+ "acc,none": 0.2582697201017812,
5
+ "acc_stderr,none": 0.01104260805837803,
6
+ "alias": "eligendi-nam-4797_logiqa2_base"
7
+ },
8
+ "eligendi-nam-4797_logiqa_base": {
9
+ "acc,none": 0.26677316293929715,
10
+ "acc_stderr,none": 0.017690912581307235,
11
+ "alias": "eligendi-nam-4797_logiqa_base"
12
+ },
13
+ "eligendi-nam-4797_lsat-ar_base": {
14
+ "acc,none": 0.2391304347826087,
15
+ "acc_stderr,none": 0.028187385293933945,
16
+ "alias": "eligendi-nam-4797_lsat-ar_base"
17
+ },
18
+ "eligendi-nam-4797_lsat-lr_base": {
19
+ "acc,none": 0.20392156862745098,
20
+ "acc_stderr,none": 0.017858731965579532,
21
+ "alias": "eligendi-nam-4797_lsat-lr_base"
22
+ },
23
+ "eligendi-nam-4797_lsat-rc_base": {
24
+ "acc,none": 0.17472118959107807,
25
+ "acc_stderr,none": 0.02319560685205096,
26
+ "alias": "eligendi-nam-4797_lsat-rc_base"
27
+ }
28
+ },
29
+ "configs": {
30
+ "eligendi-nam-4797_logiqa2_base": {
31
+ "task": "eligendi-nam-4797_logiqa2_base",
32
+ "group": "logikon-bench",
33
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
34
+ "dataset_kwargs": {
35
+ "data_files": {
36
+ "test": "eligendi-nam-4797-logiqa2/test-00000-of-00001.parquet"
37
+ }
38
+ },
39
+ "test_split": "test",
40
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
41
+ "doc_to_target": "{{answer}}",
42
+ "doc_to_choice": "{{options}}",
43
+ "description": "",
44
+ "target_delimiter": " ",
45
+ "fewshot_delimiter": "\n\n",
46
+ "num_fewshot": 0,
47
+ "metric_list": [
48
+ {
49
+ "metric": "acc",
50
+ "aggregation": "mean",
51
+ "higher_is_better": true
52
+ }
53
+ ],
54
+ "output_type": "multiple_choice",
55
+ "repeats": 1,
56
+ "should_decontaminate": false,
57
+ "metadata": {
58
+ "version": 0.0
59
+ }
60
+ },
61
+ "eligendi-nam-4797_logiqa_base": {
62
+ "task": "eligendi-nam-4797_logiqa_base",
63
+ "group": "logikon-bench",
64
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
65
+ "dataset_kwargs": {
66
+ "data_files": {
67
+ "test": "eligendi-nam-4797-logiqa/test-00000-of-00001.parquet"
68
+ }
69
+ },
70
+ "test_split": "test",
71
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
72
+ "doc_to_target": "{{answer}}",
73
+ "doc_to_choice": "{{options}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 0,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ }
84
+ ],
85
+ "output_type": "multiple_choice",
86
+ "repeats": 1,
87
+ "should_decontaminate": false,
88
+ "metadata": {
89
+ "version": 0.0
90
+ }
91
+ },
92
+ "eligendi-nam-4797_lsat-ar_base": {
93
+ "task": "eligendi-nam-4797_lsat-ar_base",
94
+ "group": "logikon-bench",
95
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
96
+ "dataset_kwargs": {
97
+ "data_files": {
98
+ "test": "eligendi-nam-4797-lsat-ar/test-00000-of-00001.parquet"
99
+ }
100
+ },
101
+ "test_split": "test",
102
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
103
+ "doc_to_target": "{{answer}}",
104
+ "doc_to_choice": "{{options}}",
105
+ "description": "",
106
+ "target_delimiter": " ",
107
+ "fewshot_delimiter": "\n\n",
108
+ "num_fewshot": 0,
109
+ "metric_list": [
110
+ {
111
+ "metric": "acc",
112
+ "aggregation": "mean",
113
+ "higher_is_better": true
114
+ }
115
+ ],
116
+ "output_type": "multiple_choice",
117
+ "repeats": 1,
118
+ "should_decontaminate": false,
119
+ "metadata": {
120
+ "version": 0.0
121
+ }
122
+ },
123
+ "eligendi-nam-4797_lsat-lr_base": {
124
+ "task": "eligendi-nam-4797_lsat-lr_base",
125
+ "group": "logikon-bench",
126
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
127
+ "dataset_kwargs": {
128
+ "data_files": {
129
+ "test": "eligendi-nam-4797-lsat-lr/test-00000-of-00001.parquet"
130
+ }
131
+ },
132
+ "test_split": "test",
133
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
134
+ "doc_to_target": "{{answer}}",
135
+ "doc_to_choice": "{{options}}",
136
+ "description": "",
137
+ "target_delimiter": " ",
138
+ "fewshot_delimiter": "\n\n",
139
+ "num_fewshot": 0,
140
+ "metric_list": [
141
+ {
142
+ "metric": "acc",
143
+ "aggregation": "mean",
144
+ "higher_is_better": true
145
+ }
146
+ ],
147
+ "output_type": "multiple_choice",
148
+ "repeats": 1,
149
+ "should_decontaminate": false,
150
+ "metadata": {
151
+ "version": 0.0
152
+ }
153
+ },
154
+ "eligendi-nam-4797_lsat-rc_base": {
155
+ "task": "eligendi-nam-4797_lsat-rc_base",
156
+ "group": "logikon-bench",
157
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
158
+ "dataset_kwargs": {
159
+ "data_files": {
160
+ "test": "eligendi-nam-4797-lsat-rc/test-00000-of-00001.parquet"
161
+ }
162
+ },
163
+ "test_split": "test",
164
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
165
+ "doc_to_target": "{{answer}}",
166
+ "doc_to_choice": "{{options}}",
167
+ "description": "",
168
+ "target_delimiter": " ",
169
+ "fewshot_delimiter": "\n\n",
170
+ "num_fewshot": 0,
171
+ "metric_list": [
172
+ {
173
+ "metric": "acc",
174
+ "aggregation": "mean",
175
+ "higher_is_better": true
176
+ }
177
+ ],
178
+ "output_type": "multiple_choice",
179
+ "repeats": 1,
180
+ "should_decontaminate": false,
181
+ "metadata": {
182
+ "version": 0.0
183
+ }
184
+ }
185
+ },
186
+ "versions": {
187
+ "eligendi-nam-4797_logiqa2_base": 0.0,
188
+ "eligendi-nam-4797_logiqa_base": 0.0,
189
+ "eligendi-nam-4797_lsat-ar_base": 0.0,
190
+ "eligendi-nam-4797_lsat-lr_base": 0.0,
191
+ "eligendi-nam-4797_lsat-rc_base": 0.0
192
+ },
193
+ "n-shot": {
194
+ "eligendi-nam-4797_logiqa2_base": 0,
195
+ "eligendi-nam-4797_logiqa_base": 0,
196
+ "eligendi-nam-4797_lsat-ar_base": 0,
197
+ "eligendi-nam-4797_lsat-lr_base": 0,
198
+ "eligendi-nam-4797_lsat-rc_base": 0
199
+ },
200
+ "config": {
201
+ "model": "vllm",
202
+ "model_args": "pretrained=google/gemma-2b,revision=main,dtype=bfloat16,tensor_parallel_size=1,gpu_memory_utilization=0.5,trust_remote_code=true,max_length=2048",
203
+ "batch_size": "auto",
204
+ "batch_sizes": [],
205
+ "device": null,
206
+ "use_cache": null,
207
+ "limit": null,
208
+ "bootstrap_iters": 100000,
209
+ "gen_kwargs": null
210
+ },
211
+ "git_hash": "f4fd67a"
212
+ }