picocreator commited on
Commit
d5d3533
1 Parent(s): 29c75a9

more evals

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. compile-results.ipynb +79 -501
  2. lm-eval-output/RWKV/v6-Finch-7B-HF/xnli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +30 -30
  3. lm-eval-output/RWKV/v6-Finch-7B-HF/xnli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +2 -2
  4. lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  5. lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +132 -0
  6. lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  7. lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  8. lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +161 -0
  9. lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  10. lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  11. lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +2249 -0
  12. lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  13. lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  14. lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +0 -0
  15. lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  16. lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  17. lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +58 -0
  18. lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  19. lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  20. lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +374 -0
  21. lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  22. lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  23. lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +67 -0
  24. lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  25. lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  26. lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +126 -0
  27. lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  28. lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  29. lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +252 -0
  30. lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  31. lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  32. lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +66 -0
  33. lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  34. lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  35. lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +2594 -0
  36. lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  37. lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  38. lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +66 -0
  39. lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  40. lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  41. lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +283 -0
  42. lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  43. lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  44. lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +64 -0
  45. lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  46. lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  47. lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +0 -0
  48. lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log +3 -0
  49. lm-eval-output/m8than/Finch-14B-Continued-10/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz +3 -0
  50. lm-eval-output/m8than/Finch-14B-Continued-10/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json +67 -0
compile-results.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 32,
6
  "metadata": {},
7
  "outputs": [
8
  {
@@ -11,12 +11,12 @@
11
  "text": [
12
  "Defaulting to user installation because normal site-packages is not writeable\n",
13
  "Requirement already satisfied: pandas in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (2.2.0)\n",
14
- "Requirement already satisfied: numpy<2,>=1.22.4 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (1.26.1)\n",
15
- "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2.8.2)\n",
16
  "Requirement already satisfied: pytz>=2020.1 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n",
 
17
  "Requirement already satisfied: tzdata>=2022.7 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n",
 
18
  "Requirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.15.0)\n",
19
- "\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.0 is available.\n",
20
  "You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n"
21
  ]
22
  }
@@ -36,14 +36,14 @@
36
  },
37
  {
38
  "cell_type": "code",
39
- "execution_count": 33,
40
  "metadata": {},
41
  "outputs": [
42
  {
43
  "name": "stdout",
44
  "output_type": "stream",
45
  "text": [
46
- "Found 5821 results.json files\n"
47
  ]
48
  }
49
  ],
@@ -71,7 +71,7 @@
71
  },
72
  {
73
  "cell_type": "code",
74
- "execution_count": 34,
75
  "metadata": {},
76
  "outputs": [
77
  {
@@ -156,16 +156,16 @@
156
  },
157
  {
158
  "cell_type": "code",
159
- "execution_count": 35,
160
  "metadata": {},
161
  "outputs": [
162
  {
163
  "name": "stdout",
164
  "output_type": "stream",
165
  "text": [
166
- "Found 123 models\n",
167
  "Models: \n",
168
- "['mistralai/Mistral-7B-Instruct-v0.2', 'mistralai/Mistral-7B-v0.1', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-7b', 'mosaicml/mpt-7b-chat', 'bigscience/bloom-7b1', 'bigscience/bloomz-7b1-mt', 'bigscience/bloomz-7b1', 'EleutherAI/pythia-2.8b', 'EleutherAI/pythia-1.4b', 'EleutherAI/gpt-j-6b', 'EleutherAI/pythia-6.9b', 'google/flan-t5-base', 'google/gemma-2b', 'google/gemma-2b-it', 'google/gemma-7b', 'google/gemma-7b-it', 'google/flan-t5-large', 'microsoft/phi-1_5', 'microsoft/phi-2', 'microsoft/phi-1', 'allenai/OLMo-7B', 'TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', 'RWKV/rwkv-5-world-1b5', 'RWKV/rwkv-5-world-3b', 'RWKV/rwkv-4-world-3b', 'RWKV/rwkv-6-world-1b6', 'RWKV/rwkv-4-world-1b5', 'RWKV/v5-Eagle-7B-HF', 'RWKV/rwkv-4-world-7b', 'RWKV/rwkv-raven-7b', 'RWKV/rwkv-6-world-3b', 'aisingapore/sealion7b', 'aisingapore/sealion3b', './rwkv-x-dev/1_3-C5-rwkv-270_pth', './rwkv-x-dev/225-EagleX-PreFT-C', './rwkv-x-dev/225-EagleX-PreFT-D', './rwkv-x-dev/1_0_pth', './rwkv-x-dev/chunk4-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-340_pth', './rwkv-x-dev/chunk1-0_8_pth', './rwkv-x-dev/chunk0-0_8_pth', './rwkv-x-dev/225-EagleX-PreFT-E', './rwkv-x-dev/225-EagleX-PreFT-B', './rwkv-x-dev/blink4-final_pth', './rwkv-x-dev/chunk2-0_8_pth', './rwkv-x-dev/chunk3-0_8_pth', './rwkv-x-dev/r3-4k-test2-fix3-blink-final_pth', './rwkv-x-dev/R4-7B-15t-With-Mask_pth', './rwkv-x-dev/r3-testchunk-1-8_pth', './rwkv-x-dev/R4-with-shuffle-rwkv-53_pth', './rwkv-x-dev/chunk7-2-0_85_pth', './rwkv-x-dev/EagleX-1_7T_pth', './rwkv-x-dev/r3-testchunk2-blink-fixed_pth', './rwkv-x-dev/r3-testchunk2-blink_pth', './rwkv-x-dev/rwkv-230_pth', './rwkv-x-dev/1_3-C0-rwkv-60_pth', './rwkv-x-dev/chunk5-0_85_pth', './rwkv-x-dev/R4-7B-Base-No-Mask_pth', './rwkv-x-dev/RWKV-5-World-1B5-v2-20231025-ctx4096', './rwkv-x-dev/R4-1B5-No-Mask_pth', './rwkv-x-dev/RWKV-32K-5B-RW_pth', './rwkv-x-dev/R4-7B-15t-32k-No-Mask_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-60_pth', './rwkv-x-dev/EagleX_1-7T_Chat_pth', './rwkv-x-dev/1_3-C1-rwkv-390_pth', './rwkv-x-dev/1_3-C1-rwkv-20_pth', './rwkv-x-dev/chunk8-1-0_85_pth', './rwkv-x-dev/R4-7B-Base-32k-No-Mask_pth', './rwkv-x-dev/R4-no-shuffle-rwkv-53_pth', './rwkv-x-dev/1_3-C2-rwkv-648_pth', './rwkv-x-dev/1_3-C2-rwkv-250_pth', './rwkv-x-dev/r3-testchunk-1-8-no-cuda-with-warmup_pth', './rwkv-x-dev/1_3-C0-rwkv-140_pth', './rwkv-x-dev/Eagle-225-1FT', './rwkv-x-dev/225-EagleX-PreFT-A', './rwkv-x-dev/225-EagleX-PreFT-F', './rwkv-x-dev/r3-c1-8_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-450_pth', './rwkv-x-dev/RWKV-5-World-3B-v2-20231118-ctx16k', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-40_pth', './rwkv-x-dev/RWKV-5-World-7B-v2-20240128-ctx4096', './rwkv-x-dev/R4-7B-15t-No-Mask_pth', './rwkv-x-dev/1_0-c1-290_pth', './rwkv-x-dev/R4-1B5-With-Mask_pth', './rwkv-x-dev/Quetzal-N8-1', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-30_pth', './rwkv-x-dev/1_3-C0-rwkv-70_pth', './rwkv-x-dev/chunk6-0_85_pth', './rwkv-x-dev/R4-7B-Base-With-Mask_pth', 'rwkv-x-dev/v5-Eagle-7B-1_0T-HF', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-30_pth', './rwkv-x-dev/chunk7-1-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-190_pth', './rwkv-x-dev/R4-7B-15t-extd-e3_pth', './rwkv-x-dev/r3-testchunk2_pth', './rwkv-x-dev/Hermes-RWKV-v5-7B_pth', './rwkv-x-dev/1_3-C0-rwkv-153_pth', './rwkv-x-dev/R4-7B-15t-extd-e2_pth', './rwkv-x-dev/r3-testchunk-blink_pth', 'SmerkyG/rwkv-5-world-1b5', 'SmerkyG/rwkv6-world-1b6', 'SmerkyG/rwkv6-world-3b', 'SmerkyG/rwkv-5-world-3b', 'SmerkyG/rwkv-5-world-7b', 'SmerkyG/rwkv5-world-7b', 'togethercomputer/RedPajama-INCITE-7B-Base', 'togethercomputer/RedPajama-INCITE-7B-Instruct', 'togethercomputer/RedPajama-INCITE-7B-Chat', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-1.3b', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-rw-7b', 'tiiuae/falcon-7b', 'TimeMobius/Mobius-RWKV-Chat-12B-128k-v4-HF', 'huggyllama/llama-7b', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-7b-hf', 'state-spaces/mamba-2.8b-hf', 'state-spaces/mamba-1.4b-hf']\n",
169
  "Saved to compiled-lm-eval-results.json\n"
170
  ]
171
  }
@@ -199,7 +199,7 @@
199
  },
200
  {
201
  "cell_type": "code",
202
- "execution_count": 36,
203
  "metadata": {},
204
  "outputs": [
205
  {
@@ -272,359 +272,15 @@
272
  " <td>0.047059</td>\n",
273
  " </tr>\n",
274
  " <tr>\n",
275
- " <th>5</th>\n",
276
- " <td>bigscience/bloom-7b1</td>\n",
277
- " <td>0.570909</td>\n",
278
- " <td>0.061359</td>\n",
279
- " <td>0.570909</td>\n",
280
- " <td>0.061359</td>\n",
281
- " </tr>\n",
282
- " <tr>\n",
283
- " <th>6</th>\n",
284
- " <td>bigscience/bloomz-7b1-mt</td>\n",
285
- " <td>0.546000</td>\n",
286
- " <td>0.038321</td>\n",
287
- " <td>0.546000</td>\n",
288
- " <td>0.038321</td>\n",
289
- " </tr>\n",
290
- " <tr>\n",
291
- " <th>7</th>\n",
292
- " <td>bigscience/bloomz-7b1</td>\n",
293
- " <td>0.547818</td>\n",
294
- " <td>0.038920</td>\n",
295
- " <td>0.547818</td>\n",
296
- " <td>0.038920</td>\n",
297
- " </tr>\n",
298
- " <tr>\n",
299
- " <th>8</th>\n",
300
- " <td>EleutherAI/pythia-2.8b</td>\n",
301
- " <td>0.537455</td>\n",
302
- " <td>0.026941</td>\n",
303
- " <td>0.537455</td>\n",
304
- " <td>0.026941</td>\n",
305
- " </tr>\n",
306
- " <tr>\n",
307
- " <th>9</th>\n",
308
- " <td>EleutherAI/pythia-1.4b</td>\n",
309
- " <td>0.526545</td>\n",
310
- " <td>0.027441</td>\n",
311
- " <td>0.526545</td>\n",
312
- " <td>0.027441</td>\n",
313
- " </tr>\n",
314
- " <tr>\n",
315
- " <th>10</th>\n",
316
- " <td>EleutherAI/gpt-j-6b</td>\n",
317
- " <td>0.544182</td>\n",
318
- " <td>0.034404</td>\n",
319
- " <td>0.544182</td>\n",
320
- " <td>0.034404</td>\n",
321
- " </tr>\n",
322
- " <tr>\n",
323
- " <th>11</th>\n",
324
- " <td>EleutherAI/pythia-6.9b</td>\n",
325
- " <td>0.540545</td>\n",
326
- " <td>0.029689</td>\n",
327
- " <td>0.540545</td>\n",
328
- " <td>0.029689</td>\n",
329
- " </tr>\n",
330
- " <tr>\n",
331
- " <th>12</th>\n",
332
- " <td>google/flan-t5-base</td>\n",
333
- " <td>0.510909</td>\n",
334
- " <td>0.006743</td>\n",
335
- " <td>0.510909</td>\n",
336
- " <td>0.006743</td>\n",
337
- " </tr>\n",
338
- " <tr>\n",
339
- " <th>13</th>\n",
340
- " <td>google/gemma-2b</td>\n",
341
- " <td>0.000000</td>\n",
342
- " <td>0.000000</td>\n",
343
- " <td>NaN</td>\n",
344
- " <td>NaN</td>\n",
345
  " </tr>\n",
346
  " <tr>\n",
347
- " <th>14</th>\n",
348
- " <td>google/gemma-2b-it</td>\n",
349
- " <td>0.000000</td>\n",
350
- " <td>0.000000</td>\n",
351
- " <td>NaN</td>\n",
352
- " <td>NaN</td>\n",
353
- " </tr>\n",
354
- " <tr>\n",
355
- " <th>15</th>\n",
356
- " <td>google/gemma-7b</td>\n",
357
- " <td>0.517636</td>\n",
358
- " <td>0.006740</td>\n",
359
- " <td>0.517636</td>\n",
360
- " <td>0.006740</td>\n",
361
- " </tr>\n",
362
- " <tr>\n",
363
- " <th>16</th>\n",
364
- " <td>google/gemma-7b-it</td>\n",
365
- " <td>0.517455</td>\n",
366
- " <td>0.006742</td>\n",
367
- " <td>0.517455</td>\n",
368
- " <td>0.006742</td>\n",
369
- " </tr>\n",
370
- " <tr>\n",
371
- " <th>17</th>\n",
372
- " <td>google/flan-t5-large</td>\n",
373
- " <td>0.510545</td>\n",
374
- " <td>0.006743</td>\n",
375
- " <td>0.510545</td>\n",
376
- " <td>0.006743</td>\n",
377
- " </tr>\n",
378
- " <tr>\n",
379
- " <th>18</th>\n",
380
- " <td>microsoft/phi-1_5</td>\n",
381
- " <td>0.521636</td>\n",
382
- " <td>0.026198</td>\n",
383
- " <td>0.521636</td>\n",
384
- " <td>0.026198</td>\n",
385
- " </tr>\n",
386
- " <tr>\n",
387
- " <th>19</th>\n",
388
- " <td>microsoft/phi-2</td>\n",
389
- " <td>0.512182</td>\n",
390
- " <td>0.029742</td>\n",
391
- " <td>0.512182</td>\n",
392
- " <td>0.029742</td>\n",
393
- " </tr>\n",
394
- " <tr>\n",
395
- " <th>20</th>\n",
396
- " <td>microsoft/phi-1</td>\n",
397
- " <td>0.517636</td>\n",
398
- " <td>0.029612</td>\n",
399
- " <td>0.517636</td>\n",
400
- " <td>0.029612</td>\n",
401
- " </tr>\n",
402
- " <tr>\n",
403
- " <th>21</th>\n",
404
- " <td>allenai/OLMo-7B</td>\n",
405
- " <td>0.537818</td>\n",
406
- " <td>0.034147</td>\n",
407
- " <td>0.537818</td>\n",
408
- " <td>0.034147</td>\n",
409
- " </tr>\n",
410
- " <tr>\n",
411
- " <th>22</th>\n",
412
- " <td>TinyLlama/TinyLlama-1.1B-intermediate-step-143...</td>\n",
413
- " <td>0.529273</td>\n",
414
- " <td>0.029316</td>\n",
415
- " <td>0.529273</td>\n",
416
- " <td>0.029316</td>\n",
417
- " </tr>\n",
418
- " <tr>\n",
419
- " <th>23</th>\n",
420
- " <td>TinyLlama/TinyLlama-1.1B-Chat-v1.0</td>\n",
421
- " <td>0.528909</td>\n",
422
- " <td>0.031702</td>\n",
423
- " <td>0.528909</td>\n",
424
- " <td>0.031702</td>\n",
425
- " </tr>\n",
426
- " <tr>\n",
427
- " <th>24</th>\n",
428
- " <td>RWKV/rwkv-5-world-1b5</td>\n",
429
- " <td>0.578909</td>\n",
430
- " <td>0.044635</td>\n",
431
- " <td>0.578909</td>\n",
432
- " <td>0.044635</td>\n",
433
- " </tr>\n",
434
- " <tr>\n",
435
- " <th>25</th>\n",
436
- " <td>RWKV/rwkv-5-world-3b</td>\n",
437
- " <td>0.590000</td>\n",
438
- " <td>0.057252</td>\n",
439
- " <td>0.590000</td>\n",
440
- " <td>0.057252</td>\n",
441
- " </tr>\n",
442
- " <tr>\n",
443
- " <th>26</th>\n",
444
- " <td>RWKV/rwkv-4-world-3b</td>\n",
445
- " <td>0.575455</td>\n",
446
- " <td>0.040977</td>\n",
447
- " <td>0.575455</td>\n",
448
- " <td>0.040977</td>\n",
449
- " </tr>\n",
450
- " <tr>\n",
451
- " <th>27</th>\n",
452
- " <td>RWKV/rwkv-4-world-1b5</td>\n",
453
- " <td>0.554000</td>\n",
454
- " <td>0.039406</td>\n",
455
- " <td>0.554000</td>\n",
456
- " <td>0.039406</td>\n",
457
- " </tr>\n",
458
- " <tr>\n",
459
- " <th>28</th>\n",
460
- " <td>RWKV/v5-Eagle-7B-HF</td>\n",
461
- " <td>0.622364</td>\n",
462
- " <td>0.070563</td>\n",
463
- " <td>0.622364</td>\n",
464
- " <td>0.070563</td>\n",
465
- " </tr>\n",
466
- " <tr>\n",
467
- " <th>29</th>\n",
468
- " <td>RWKV/rwkv-4-world-7b</td>\n",
469
- " <td>0.601455</td>\n",
470
- " <td>0.053116</td>\n",
471
- " <td>0.601455</td>\n",
472
- " <td>0.053116</td>\n",
473
- " </tr>\n",
474
- " <tr>\n",
475
- " <th>30</th>\n",
476
- " <td>aisingapore/sealion7b</td>\n",
477
- " <td>0.559818</td>\n",
478
- " <td>0.060680</td>\n",
479
- " <td>0.559818</td>\n",
480
- " <td>0.060680</td>\n",
481
- " </tr>\n",
482
- " <tr>\n",
483
- " <th>31</th>\n",
484
- " <td>aisingapore/sealion3b</td>\n",
485
- " <td>0.559273</td>\n",
486
- " <td>0.054490</td>\n",
487
- " <td>0.559273</td>\n",
488
- " <td>0.054490</td>\n",
489
- " </tr>\n",
490
- " <tr>\n",
491
- " <th>32</th>\n",
492
- " <td>rwkv-x-dev/v5-Eagle-7B-1_0T-HF</td>\n",
493
- " <td>0.622364</td>\n",
494
- " <td>0.072168</td>\n",
495
- " <td>0.622364</td>\n",
496
- " <td>0.072168</td>\n",
497
- " </tr>\n",
498
- " <tr>\n",
499
- " <th>33</th>\n",
500
- " <td>SmerkyG/rwkv-5-world-1b5</td>\n",
501
- " <td>0.578727</td>\n",
502
- " <td>0.044247</td>\n",
503
- " <td>0.578727</td>\n",
504
- " <td>0.044247</td>\n",
505
- " </tr>\n",
506
- " <tr>\n",
507
- " <th>34</th>\n",
508
- " <td>SmerkyG/rwkv6-world-1b6</td>\n",
509
- " <td>0.579636</td>\n",
510
- " <td>0.052056</td>\n",
511
- " <td>0.579636</td>\n",
512
- " <td>0.052056</td>\n",
513
- " </tr>\n",
514
- " <tr>\n",
515
- " <th>35</th>\n",
516
- " <td>SmerkyG/rwkv6-world-3b</td>\n",
517
- " <td>0.595273</td>\n",
518
- " <td>0.061039</td>\n",
519
- " <td>0.595273</td>\n",
520
- " <td>0.061039</td>\n",
521
- " </tr>\n",
522
- " <tr>\n",
523
- " <th>36</th>\n",
524
- " <td>SmerkyG/rwkv-5-world-3b</td>\n",
525
- " <td>0.590182</td>\n",
526
- " <td>0.059748</td>\n",
527
- " <td>0.590182</td>\n",
528
- " <td>0.059748</td>\n",
529
- " </tr>\n",
530
- " <tr>\n",
531
- " <th>37</th>\n",
532
- " <td>SmerkyG/rwkv-5-world-7b</td>\n",
533
- " <td>0.621818</td>\n",
534
- " <td>0.071125</td>\n",
535
- " <td>0.621818</td>\n",
536
- " <td>0.071125</td>\n",
537
- " </tr>\n",
538
- " <tr>\n",
539
- " <th>38</th>\n",
540
- " <td>SmerkyG/rwkv5-world-7b</td>\n",
541
- " <td>0.000000</td>\n",
542
- " <td>0.000000</td>\n",
543
- " <td>NaN</td>\n",
544
- " <td>NaN</td>\n",
545
- " </tr>\n",
546
- " <tr>\n",
547
- " <th>39</th>\n",
548
- " <td>togethercomputer/RedPajama-INCITE-7B-Base</td>\n",
549
- " <td>0.525455</td>\n",
550
- " <td>0.036407</td>\n",
551
- " <td>0.525455</td>\n",
552
- " <td>0.036407</td>\n",
553
- " </tr>\n",
554
- " <tr>\n",
555
- " <th>40</th>\n",
556
- " <td>togethercomputer/RedPajama-INCITE-7B-Instruct</td>\n",
557
- " <td>0.528545</td>\n",
558
- " <td>0.036470</td>\n",
559
- " <td>0.528545</td>\n",
560
- " <td>0.036470</td>\n",
561
- " </tr>\n",
562
- " <tr>\n",
563
- " <th>41</th>\n",
564
- " <td>togethercomputer/RedPajama-INCITE-7B-Chat</td>\n",
565
- " <td>0.535455</td>\n",
566
- " <td>0.038723</td>\n",
567
- " <td>0.535455</td>\n",
568
- " <td>0.038723</td>\n",
569
- " </tr>\n",
570
- " <tr>\n",
571
- " <th>42</th>\n",
572
- " <td>facebook/opt-2.7b</td>\n",
573
- " <td>0.521818</td>\n",
574
- " <td>0.029821</td>\n",
575
- " <td>0.521818</td>\n",
576
- " <td>0.029821</td>\n",
577
- " </tr>\n",
578
- " <tr>\n",
579
- " <th>43</th>\n",
580
- " <td>facebook/opt-6.7b</td>\n",
581
- " <td>0.522909</td>\n",
582
- " <td>0.027216</td>\n",
583
- " <td>0.522909</td>\n",
584
- " <td>0.027216</td>\n",
585
- " </tr>\n",
586
- " <tr>\n",
587
- " <th>44</th>\n",
588
- " <td>facebook/opt-1.3b</td>\n",
589
- " <td>0.521818</td>\n",
590
- " <td>0.029112</td>\n",
591
- " <td>0.521818</td>\n",
592
- " <td>0.029112</td>\n",
593
- " </tr>\n",
594
- " <tr>\n",
595
- " <th>45</th>\n",
596
- " <td>tiiuae/falcon-7b-instruct</td>\n",
597
- " <td>0.536727</td>\n",
598
- " <td>0.053430</td>\n",
599
- " <td>0.536727</td>\n",
600
- " <td>0.053430</td>\n",
601
- " </tr>\n",
602
- " <tr>\n",
603
- " <th>46</th>\n",
604
- " <td>tiiuae/falcon-rw-1b</td>\n",
605
- " <td>0.522545</td>\n",
606
- " <td>0.029446</td>\n",
607
- " <td>0.522545</td>\n",
608
- " <td>0.029446</td>\n",
609
- " </tr>\n",
610
- " <tr>\n",
611
- " <th>47</th>\n",
612
- " <td>tiiuae/falcon-rw-7b</td>\n",
613
- " <td>0.535818</td>\n",
614
- " <td>0.033185</td>\n",
615
- " <td>0.535818</td>\n",
616
- " <td>0.033185</td>\n",
617
- " </tr>\n",
618
- " <tr>\n",
619
- " <th>48</th>\n",
620
- " <td>tiiuae/falcon-7b</td>\n",
621
- " <td>0.559636</td>\n",
622
- " <td>0.071650</td>\n",
623
- " <td>0.559636</td>\n",
624
- " <td>0.071650</td>\n",
625
- " </tr>\n",
626
- " <tr>\n",
627
- " <th>49</th>\n",
628
  " <td>huggyllama/llama-7b</td>\n",
629
  " <td>0.541818</td>\n",
630
  " <td>0.040718</td>\n",
@@ -632,7 +288,7 @@
632
  " <td>0.040718</td>\n",
633
  " </tr>\n",
634
  " <tr>\n",
635
- " <th>50</th>\n",
636
  " <td>meta-llama/Llama-2-7b-chat-hf</td>\n",
637
  " <td>0.559818</td>\n",
638
  " <td>0.054954</td>\n",
@@ -640,7 +296,7 @@
640
  " <td>0.054954</td>\n",
641
  " </tr>\n",
642
  " <tr>\n",
643
- " <th>51</th>\n",
644
  " <td>meta-llama/Llama-2-7b-hf</td>\n",
645
  " <td>0.566727</td>\n",
646
  " <td>0.052515</td>\n",
@@ -648,7 +304,7 @@
648
  " <td>0.052515</td>\n",
649
  " </tr>\n",
650
  " <tr>\n",
651
- " <th>52</th>\n",
652
  " <td>state-spaces/mamba-2.8b-hf</td>\n",
653
  " <td>0.552909</td>\n",
654
  " <td>0.035570</td>\n",
@@ -656,7 +312,7 @@
656
  " <td>0.035570</td>\n",
657
  " </tr>\n",
658
  " <tr>\n",
659
- " <th>53</th>\n",
660
  " <td>state-spaces/mamba-1.4b-hf</td>\n",
661
  " <td>0.544182</td>\n",
662
  " <td>0.031390</td>\n",
@@ -665,123 +321,40 @@
665
  " </tr>\n",
666
  " </tbody>\n",
667
  "</table>\n",
 
668
  "</div>"
669
  ],
670
  "text/plain": [
671
- " model avg_acc \\\n",
672
- "0 mistralai/Mistral-7B-Instruct-v0.2 0.000000 \n",
673
- "1 mistralai/Mistral-7B-v0.1 0.559455 \n",
674
- "2 mosaicml/mpt-7b-instruct 0.537091 \n",
675
- "3 mosaicml/mpt-7b 0.536000 \n",
676
- "4 mosaicml/mpt-7b-chat 0.538000 \n",
677
- "5 bigscience/bloom-7b1 0.570909 \n",
678
- "6 bigscience/bloomz-7b1-mt 0.546000 \n",
679
- "7 bigscience/bloomz-7b1 0.547818 \n",
680
- "8 EleutherAI/pythia-2.8b 0.537455 \n",
681
- "9 EleutherAI/pythia-1.4b 0.526545 \n",
682
- "10 EleutherAI/gpt-j-6b 0.544182 \n",
683
- "11 EleutherAI/pythia-6.9b 0.540545 \n",
684
- "12 google/flan-t5-base 0.510909 \n",
685
- "13 google/gemma-2b 0.000000 \n",
686
- "14 google/gemma-2b-it 0.000000 \n",
687
- "15 google/gemma-7b 0.517636 \n",
688
- "16 google/gemma-7b-it 0.517455 \n",
689
- "17 google/flan-t5-large 0.510545 \n",
690
- "18 microsoft/phi-1_5 0.521636 \n",
691
- "19 microsoft/phi-2 0.512182 \n",
692
- "20 microsoft/phi-1 0.517636 \n",
693
- "21 allenai/OLMo-7B 0.537818 \n",
694
- "22 TinyLlama/TinyLlama-1.1B-intermediate-step-143... 0.529273 \n",
695
- "23 TinyLlama/TinyLlama-1.1B-Chat-v1.0 0.528909 \n",
696
- "24 RWKV/rwkv-5-world-1b5 0.578909 \n",
697
- "25 RWKV/rwkv-5-world-3b 0.590000 \n",
698
- "26 RWKV/rwkv-4-world-3b 0.575455 \n",
699
- "27 RWKV/rwkv-4-world-1b5 0.554000 \n",
700
- "28 RWKV/v5-Eagle-7B-HF 0.622364 \n",
701
- "29 RWKV/rwkv-4-world-7b 0.601455 \n",
702
- "30 aisingapore/sealion7b 0.559818 \n",
703
- "31 aisingapore/sealion3b 0.559273 \n",
704
- "32 rwkv-x-dev/v5-Eagle-7B-1_0T-HF 0.622364 \n",
705
- "33 SmerkyG/rwkv-5-world-1b5 0.578727 \n",
706
- "34 SmerkyG/rwkv6-world-1b6 0.579636 \n",
707
- "35 SmerkyG/rwkv6-world-3b 0.595273 \n",
708
- "36 SmerkyG/rwkv-5-world-3b 0.590182 \n",
709
- "37 SmerkyG/rwkv-5-world-7b 0.621818 \n",
710
- "38 SmerkyG/rwkv5-world-7b 0.000000 \n",
711
- "39 togethercomputer/RedPajama-INCITE-7B-Base 0.525455 \n",
712
- "40 togethercomputer/RedPajama-INCITE-7B-Instruct 0.528545 \n",
713
- "41 togethercomputer/RedPajama-INCITE-7B-Chat 0.535455 \n",
714
- "42 facebook/opt-2.7b 0.521818 \n",
715
- "43 facebook/opt-6.7b 0.522909 \n",
716
- "44 facebook/opt-1.3b 0.521818 \n",
717
- "45 tiiuae/falcon-7b-instruct 0.536727 \n",
718
- "46 tiiuae/falcon-rw-1b 0.522545 \n",
719
- "47 tiiuae/falcon-rw-7b 0.535818 \n",
720
- "48 tiiuae/falcon-7b 0.559636 \n",
721
- "49 huggyllama/llama-7b 0.541818 \n",
722
- "50 meta-llama/Llama-2-7b-chat-hf 0.559818 \n",
723
- "51 meta-llama/Llama-2-7b-hf 0.566727 \n",
724
- "52 state-spaces/mamba-2.8b-hf 0.552909 \n",
725
- "53 state-spaces/mamba-1.4b-hf 0.544182 \n",
726
  "\n",
727
- " avg_acc_stderr xcopa (acc) xcopa (acc_stderr) \n",
728
- "0 0.000000 NaN NaN \n",
729
- "1 0.053879 0.559455 0.053879 \n",
730
- "2 0.041919 0.537091 0.041919 \n",
731
- "3 0.042339 0.536000 0.042339 \n",
732
- "4 0.047059 0.538000 0.047059 \n",
733
- "5 0.061359 0.570909 0.061359 \n",
734
- "6 0.038321 0.546000 0.038321 \n",
735
- "7 0.038920 0.547818 0.038920 \n",
736
- "8 0.026941 0.537455 0.026941 \n",
737
- "9 0.027441 0.526545 0.027441 \n",
738
- "10 0.034404 0.544182 0.034404 \n",
739
- "11 0.029689 0.540545 0.029689 \n",
740
- "12 0.006743 0.510909 0.006743 \n",
741
- "13 0.000000 NaN NaN \n",
742
- "14 0.000000 NaN NaN \n",
743
- "15 0.006740 0.517636 0.006740 \n",
744
- "16 0.006742 0.517455 0.006742 \n",
745
- "17 0.006743 0.510545 0.006743 \n",
746
- "18 0.026198 0.521636 0.026198 \n",
747
- "19 0.029742 0.512182 0.029742 \n",
748
- "20 0.029612 0.517636 0.029612 \n",
749
- "21 0.034147 0.537818 0.034147 \n",
750
- "22 0.029316 0.529273 0.029316 \n",
751
- "23 0.031702 0.528909 0.031702 \n",
752
- "24 0.044635 0.578909 0.044635 \n",
753
- "25 0.057252 0.590000 0.057252 \n",
754
- "26 0.040977 0.575455 0.040977 \n",
755
- "27 0.039406 0.554000 0.039406 \n",
756
- "28 0.070563 0.622364 0.070563 \n",
757
- "29 0.053116 0.601455 0.053116 \n",
758
- "30 0.060680 0.559818 0.060680 \n",
759
- "31 0.054490 0.559273 0.054490 \n",
760
- "32 0.072168 0.622364 0.072168 \n",
761
- "33 0.044247 0.578727 0.044247 \n",
762
- "34 0.052056 0.579636 0.052056 \n",
763
- "35 0.061039 0.595273 0.061039 \n",
764
- "36 0.059748 0.590182 0.059748 \n",
765
- "37 0.071125 0.621818 0.071125 \n",
766
- "38 0.000000 NaN NaN \n",
767
- "39 0.036407 0.525455 0.036407 \n",
768
- "40 0.036470 0.528545 0.036470 \n",
769
- "41 0.038723 0.535455 0.038723 \n",
770
- "42 0.029821 0.521818 0.029821 \n",
771
- "43 0.027216 0.522909 0.027216 \n",
772
- "44 0.029112 0.521818 0.029112 \n",
773
- "45 0.053430 0.536727 0.053430 \n",
774
- "46 0.029446 0.522545 0.029446 \n",
775
- "47 0.033185 0.535818 0.033185 \n",
776
- "48 0.071650 0.559636 0.071650 \n",
777
- "49 0.040718 0.541818 0.040718 \n",
778
- "50 0.054954 0.559818 0.054954 \n",
779
- "51 0.052515 0.566727 0.052515 \n",
780
- "52 0.035570 0.552909 0.035570 \n",
781
- "53 0.031390 0.544182 0.031390 "
782
  ]
783
  },
784
- "execution_count": 36,
785
  "metadata": {},
786
  "output_type": "execute_result"
787
  }
@@ -982,32 +555,32 @@
982
  },
983
  {
984
  "cell_type": "code",
985
- "execution_count": 37,
986
  "metadata": {},
987
  "outputs": [
988
  {
989
  "name": "stdout",
990
  "output_type": "stream",
991
  "text": [
992
- "total 36976\n",
993
- "-rw-r--r--@ 1 picocreator staff 1.2M Apr 15 17:48 bf16-all-results-and-groups.csv\n",
994
- "-rw-r--r--@ 1 picocreator staff 318K Apr 15 17:48 bf16-all-simplified-results-and-groups.csv\n",
995
- "-rw-r--r--@ 1 picocreator staff 318K Apr 15 17:48 bf16-all-sorted-results-and-groups.csv\n",
996
- "-rw-r--r--@ 1 picocreator staff 80K Apr 15 17:48 bf16-eng-focus.csv\n",
997
- "-rw-r--r--@ 1 picocreator staff 1.1M Apr 15 17:48 bf16-eng-results.csv\n",
998
- "-rw-r--r--@ 1 picocreator staff 95K Apr 15 17:48 bf16-eng-summary.csv\n",
999
- "-rw-r--r--@ 1 picocreator staff 120K Apr 15 17:48 bf16-multilang-results.csv\n",
1000
- "-rw-r--r--@ 1 picocreator staff 17K Apr 15 17:48 bf16-multilang-summary.csv\n",
1001
- "-rw-r--r--@ 1 picocreator staff 80K Apr 15 17:48 bf16-sorted-eng-focus.csv\n",
1002
- "-rw-r--r--@ 1 picocreator staff 1.1M Apr 15 17:48 bf16-sorted-eng-results.csv\n",
1003
- "-rw-r--r--@ 1 picocreator staff 95K Apr 15 17:48 bf16-sorted-eng-summary.csv\n",
1004
- "-rw-r--r--@ 1 picocreator staff 17K Apr 15 17:48 bf16-sorted-multilang-summary.csv\n",
1005
- "-rw-r--r-- 1 picocreator staff 9.7M Apr 15 17:48 compiled-lm-eval-results.json\n",
1006
- "-rw-r--r--@ 1 picocreator staff 168K Apr 2 01:34 rwkv-x-dev-bf16-sorted-eng-180.csv\n",
1007
- "-rw-r--r--@ 1 picocreator staff 30K Apr 2 01:34 rwkv-x-dev-bf16-sorted-eng-21-focus.csv\n",
1008
- "-rw-r--r--@ 1 picocreator staff 389K Apr 15 17:48 rwkv-x-dev-bf16-sorted-eng-all.csv\n",
1009
- "-rw-r--r--@ 1 picocreator staff 28K Apr 15 17:48 rwkv-x-dev-bf16-sorted-eng-focus.csv\n",
1010
- "-rw-r--r--@ 1 picocreator staff 24K Apr 15 17:48 rwkv-x-dev-bf16-sorted-multilang-summary.csv\n"
1011
  ]
1012
  }
1013
  ],
@@ -1018,6 +591,11 @@
1018
  "#\n",
1019
  "##################################################\n",
1020
  "\n",
 
 
 
 
 
1021
  "# Overall results\n",
1022
  "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"] )\n",
1023
  "all_results.to_csv('summary/bf16-all-results-and-groups.csv', index=False)\n",
@@ -1043,7 +621,7 @@
1043
  "multilang_grp_sorted.to_csv('summary/bf16-sorted-multilang-summary.csv', index=False)\n",
1044
  "\n",
1045
  "# RWKV perf tracking\n",
1046
- "rwkv_multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], exModels=[], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\"], sort=True )\n",
1047
  "rwkv_multilang_grp_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-multilang-summary.csv', index=False)\n",
1048
  "\n",
1049
  "# All other results\n",
@@ -1071,11 +649,11 @@
1071
  "eng_focus_sorted.to_csv('summary/bf16-sorted-eng-focus.csv', index=False)\n",
1072
  "\n",
1073
  "# RWKV perf tracking\n",
1074
- "rwkv_eng_focus_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_focus_tGrps, inResults=eng_focus_tTest, exModels=[], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\"], sort=True, simplified=True )\n",
1075
  "rwkv_eng_focus_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-focus.csv', index=False)\n",
1076
  "\n",
1077
  "# RWKV perf tracking\n",
1078
- "rwkv_eng_all_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exModels=[], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\"], sort=True, simplified=True )\n",
1079
  "rwkv_eng_all_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-all.csv', index=False)\n",
1080
  "\n",
1081
  "# # Overall results\n",
@@ -1088,7 +666,7 @@
1088
  },
1089
  {
1090
  "cell_type": "code",
1091
- "execution_count": 38,
1092
  "metadata": {},
1093
  "outputs": [],
1094
  "source": [
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 43,
6
  "metadata": {},
7
  "outputs": [
8
  {
 
11
  "text": [
12
  "Defaulting to user installation because normal site-packages is not writeable\n",
13
  "Requirement already satisfied: pandas in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (2.2.0)\n",
 
 
14
  "Requirement already satisfied: pytz>=2020.1 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n",
15
+ "Requirement already satisfied: numpy<2,>=1.22.4 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (1.26.1)\n",
16
  "Requirement already satisfied: tzdata>=2022.7 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n",
17
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2.8.2)\n",
18
  "Requirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.15.0)\n",
19
+ "\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.1.2 is available.\n",
20
  "You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n"
21
  ]
22
  }
 
36
  },
37
  {
38
  "cell_type": "code",
39
+ "execution_count": 44,
40
  "metadata": {},
41
  "outputs": [
42
  {
43
  "name": "stdout",
44
  "output_type": "stream",
45
  "text": [
46
+ "Found 6042 results.json files\n"
47
  ]
48
  }
49
  ],
 
71
  },
72
  {
73
  "cell_type": "code",
74
+ "execution_count": 45,
75
  "metadata": {},
76
  "outputs": [
77
  {
 
156
  },
157
  {
158
  "cell_type": "code",
159
+ "execution_count": 46,
160
  "metadata": {},
161
  "outputs": [
162
  {
163
  "name": "stdout",
164
  "output_type": "stream",
165
  "text": [
166
+ "Found 130 models\n",
167
  "Models: \n",
168
+ "['mistralai/Mistral-7B-Instruct-v0.2', 'mistralai/Mistral-7B-v0.1', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-7b', 'mosaicml/mpt-7b-chat', 'bigscience/bloom-7b1', 'bigscience/bloomz-7b1-mt', 'bigscience/bloomz-7b1', 'EleutherAI/pythia-2.8b', 'EleutherAI/pythia-1.4b', 'EleutherAI/gpt-j-6b', 'EleutherAI/pythia-6.9b', 'google/flan-t5-base', 'google/gemma-2b', 'google/gemma-2b-it', 'google/gemma-7b', 'google/gemma-7b-it', 'google/flan-t5-large', 'microsoft/phi-1_5', 'microsoft/phi-2', 'microsoft/phi-1', 'allenai/OLMo-7B', 'TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', 'RWKV/rwkv-5-world-1b5', 'RWKV/rwkv-5-world-3b', 'RWKV/rwkv-4-world-3b', 'RWKV/v5-EagleX-v2-7B-HF', 'RWKV/rwkv-6-world-1b6', 'RWKV/rwkv-4-world-1b5', 'RWKV/v5-Eagle-7B-HF', 'RWKV/v6-Finch-7B-HF', 'RWKV/rwkv-6-world-3b-v2.1', 'RWKV/rwkv-4-world-7b', 'RWKV/v6-Finch-14B-HF', 'RWKV/rwkv-raven-7b', 'RWKV/rwkv-6-world-3b', 'aisingapore/sealion7b', 'aisingapore/sealion3b', './rwkv-x-dev/1_3-C5-rwkv-270_pth', './rwkv-x-dev/225-EagleX-PreFT-C', './rwkv-x-dev/225-EagleX-PreFT-D', './rwkv-x-dev/1_0_pth', './rwkv-x-dev/chunk4-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-340_pth', './rwkv-x-dev/chunk1-0_8_pth', './rwkv-x-dev/chunk0-0_8_pth', './rwkv-x-dev/225-EagleX-PreFT-E', './rwkv-x-dev/225-EagleX-PreFT-B', './rwkv-x-dev/blink4-final_pth', './rwkv-x-dev/chunk2-0_8_pth', './rwkv-x-dev/chunk3-0_8_pth', './rwkv-x-dev/r3-4k-test2-fix3-blink-final_pth', './rwkv-x-dev/R4-7B-15t-With-Mask_pth', './rwkv-x-dev/r3-testchunk-1-8_pth', './rwkv-x-dev/R4-with-shuffle-rwkv-53_pth', './rwkv-x-dev/chunk7-2-0_85_pth', './rwkv-x-dev/EagleX-1_7T_pth', './rwkv-x-dev/r3-testchunk2-blink-fixed_pth', './rwkv-x-dev/r3-testchunk2-blink_pth', './rwkv-x-dev/rwkv-230_pth', './rwkv-x-dev/1_3-C0-rwkv-60_pth', './rwkv-x-dev/chunk5-0_85_pth', './rwkv-x-dev/R4-7B-Base-No-Mask_pth', './rwkv-x-dev/RWKV-5-World-1B5-v2-20231025-ctx4096', './rwkv-x-dev/R4-1B5-No-Mask_pth', './rwkv-x-dev/RWKV-32K-5B-RW_pth', './rwkv-x-dev/R4-7B-15t-32k-No-Mask_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-60_pth', './rwkv-x-dev/EagleX_1-7T_Chat_pth', './rwkv-x-dev/1_3-C1-rwkv-390_pth', './rwkv-x-dev/1_3-C1-rwkv-20_pth', './rwkv-x-dev/chunk8-1-0_85_pth', './rwkv-x-dev/R4-7B-Base-32k-No-Mask_pth', './rwkv-x-dev/R4-no-shuffle-rwkv-53_pth', './rwkv-x-dev/1_3-C2-rwkv-648_pth', './rwkv-x-dev/1_3-C2-rwkv-250_pth', './rwkv-x-dev/r3-testchunk-1-8-no-cuda-with-warmup_pth', './rwkv-x-dev/1_3-C0-rwkv-140_pth', './rwkv-x-dev/bruber_9b', './rwkv-x-dev/Eagle-225-1FT', './rwkv-x-dev/225-EagleX-PreFT-A', './rwkv-x-dev/225-EagleX-PreFT-F', './rwkv-x-dev/r3-c1-8_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-450_pth', './rwkv-x-dev/RWKV-5-World-3B-v2-20231118-ctx16k', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-40_pth', './rwkv-x-dev/RWKV-5-World-7B-v2-20240128-ctx4096', './rwkv-x-dev/R4-7B-15t-No-Mask_pth', './rwkv-x-dev/1_0-c1-290_pth', './rwkv-x-dev/R4-1B5-With-Mask_pth', './rwkv-x-dev/Quetzal-N8-1', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-30_pth', './rwkv-x-dev/1_3-C0-rwkv-70_pth', './rwkv-x-dev/chunk6-0_85_pth', './rwkv-x-dev/R4-7B-Base-With-Mask_pth', 'rwkv-x-dev/v5-Eagle-7B-1_0T-HF', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-30_pth', './rwkv-x-dev/chunk7-1-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-190_pth', './rwkv-x-dev/R4-7B-15t-extd-e3_pth', './rwkv-x-dev/r3-testchunk2_pth', './rwkv-x-dev/Hermes-RWKV-v5-7B_pth', './rwkv-x-dev/1_3-C0-rwkv-153_pth', './rwkv-x-dev/R4-7B-15t-extd-e2_pth', './rwkv-x-dev/r3-testchunk-blink_pth', 'SmerkyG/rwkv-5-world-1b5', 'SmerkyG/rwkv6-world-1b6', 'SmerkyG/rwkv6-world-3b', 'SmerkyG/rwkv-5-world-3b', 'SmerkyG/rwkv-5-world-7b', 'SmerkyG/rwkv5-world-7b', 'togethercomputer/RedPajama-INCITE-7B-Base', 'togethercomputer/RedPajama-INCITE-7B-Instruct', 'togethercomputer/RedPajama-INCITE-7B-Chat', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-1.3b', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-rw-7b', 'tiiuae/falcon-7b', 'm8than/Finch-14B-Continued', 'm8than/FinchX-Med', 'TimeMobius/Mobius-RWKV-Chat-12B-128k-v4-HF', 'huggyllama/llama-7b', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-7b-hf', 'state-spaces/mamba-2.8b-hf', 'state-spaces/mamba-1.4b-hf']\n",
169
  "Saved to compiled-lm-eval-results.json\n"
170
  ]
171
  }
 
199
  },
200
  {
201
  "cell_type": "code",
202
+ "execution_count": 47,
203
  "metadata": {},
204
  "outputs": [
205
  {
 
272
  " <td>0.047059</td>\n",
273
  " </tr>\n",
274
  " <tr>\n",
275
+ " <th>...</th>\n",
276
+ " <td>...</td>\n",
277
+ " <td>...</td>\n",
278
+ " <td>...</td>\n",
279
+ " <td>...</td>\n",
280
+ " <td>...</td>\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  " </tr>\n",
282
  " <tr>\n",
283
+ " <th>56</th>\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  " <td>huggyllama/llama-7b</td>\n",
285
  " <td>0.541818</td>\n",
286
  " <td>0.040718</td>\n",
 
288
  " <td>0.040718</td>\n",
289
  " </tr>\n",
290
  " <tr>\n",
291
+ " <th>57</th>\n",
292
  " <td>meta-llama/Llama-2-7b-chat-hf</td>\n",
293
  " <td>0.559818</td>\n",
294
  " <td>0.054954</td>\n",
 
296
  " <td>0.054954</td>\n",
297
  " </tr>\n",
298
  " <tr>\n",
299
+ " <th>58</th>\n",
300
  " <td>meta-llama/Llama-2-7b-hf</td>\n",
301
  " <td>0.566727</td>\n",
302
  " <td>0.052515</td>\n",
 
304
  " <td>0.052515</td>\n",
305
  " </tr>\n",
306
  " <tr>\n",
307
+ " <th>59</th>\n",
308
  " <td>state-spaces/mamba-2.8b-hf</td>\n",
309
  " <td>0.552909</td>\n",
310
  " <td>0.035570</td>\n",
 
312
  " <td>0.035570</td>\n",
313
  " </tr>\n",
314
  " <tr>\n",
315
+ " <th>60</th>\n",
316
  " <td>state-spaces/mamba-1.4b-hf</td>\n",
317
  " <td>0.544182</td>\n",
318
  " <td>0.031390</td>\n",
 
321
  " </tr>\n",
322
  " </tbody>\n",
323
  "</table>\n",
324
+ "<p>61 rows × 5 columns</p>\n",
325
  "</div>"
326
  ],
327
  "text/plain": [
328
+ " model avg_acc avg_acc_stderr xcopa (acc) \\\n",
329
+ "0 mistralai/Mistral-7B-Instruct-v0.2 0.000000 0.000000 NaN \n",
330
+ "1 mistralai/Mistral-7B-v0.1 0.559455 0.053879 0.559455 \n",
331
+ "2 mosaicml/mpt-7b-instruct 0.537091 0.041919 0.537091 \n",
332
+ "3 mosaicml/mpt-7b 0.536000 0.042339 0.536000 \n",
333
+ "4 mosaicml/mpt-7b-chat 0.538000 0.047059 0.538000 \n",
334
+ ".. ... ... ... ... \n",
335
+ "56 huggyllama/llama-7b 0.541818 0.040718 0.541818 \n",
336
+ "57 meta-llama/Llama-2-7b-chat-hf 0.559818 0.054954 0.559818 \n",
337
+ "58 meta-llama/Llama-2-7b-hf 0.566727 0.052515 0.566727 \n",
338
+ "59 state-spaces/mamba-2.8b-hf 0.552909 0.035570 0.552909 \n",
339
+ "60 state-spaces/mamba-1.4b-hf 0.544182 0.031390 0.544182 \n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  "\n",
341
+ " xcopa (acc_stderr) \n",
342
+ "0 NaN \n",
343
+ "1 0.053879 \n",
344
+ "2 0.041919 \n",
345
+ "3 0.042339 \n",
346
+ "4 0.047059 \n",
347
+ ".. ... \n",
348
+ "56 0.040718 \n",
349
+ "57 0.054954 \n",
350
+ "58 0.052515 \n",
351
+ "59 0.035570 \n",
352
+ "60 0.031390 \n",
353
+ "\n",
354
+ "[61 rows x 5 columns]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
  ]
356
  },
357
+ "execution_count": 47,
358
  "metadata": {},
359
  "output_type": "execute_result"
360
  }
 
555
  },
556
  {
557
  "cell_type": "code",
558
+ "execution_count": 48,
559
  "metadata": {},
560
  "outputs": [
561
  {
562
  "name": "stdout",
563
  "output_type": "stream",
564
  "text": [
565
+ "total 38624\n",
566
+ "-rw-r--r--@ 1 picocreator staff 1.3M Jul 26 09:22 bf16-all-results-and-groups.csv\n",
567
+ "-rw-r--r--@ 1 picocreator staff 350K Jul 26 09:22 bf16-all-simplified-results-and-groups.csv\n",
568
+ "-rw-r--r--@ 1 picocreator staff 350K Jul 26 09:22 bf16-all-sorted-results-and-groups.csv\n",
569
+ "-rw-r--r--@ 1 picocreator staff 91K Jul 26 09:22 bf16-eng-focus.csv\n",
570
+ "-rw-r--r--@ 1 picocreator staff 1.2M Jul 26 09:22 bf16-eng-results.csv\n",
571
+ "-rw-r--r--@ 1 picocreator staff 105K Jul 26 09:22 bf16-eng-summary.csv\n",
572
+ "-rw-r--r--@ 1 picocreator staff 134K Jul 26 09:22 bf16-multilang-results.csv\n",
573
+ "-rw-r--r--@ 1 picocreator staff 19K Jul 26 09:22 bf16-multilang-summary.csv\n",
574
+ "-rw-r--r--@ 1 picocreator staff 91K Jul 26 09:22 bf16-sorted-eng-focus.csv\n",
575
+ "-rw-r--r--@ 1 picocreator staff 1.2M Jul 26 09:22 bf16-sorted-eng-results.csv\n",
576
+ "-rw-r--r--@ 1 picocreator staff 105K Jul 26 09:22 bf16-sorted-eng-summary.csv\n",
577
+ "-rw-r--r--@ 1 picocreator staff 19K Jul 26 09:22 bf16-sorted-multilang-summary.csv\n",
578
+ "-rw-r--r-- 1 picocreator staff 10M Jul 26 09:22 compiled-lm-eval-results.json\n",
579
+ "-rw-r--r--@ 1 picocreator staff 184K Jul 26 09:21 rwkv-x-dev-bf16-sorted-eng-180.csv\n",
580
+ "-rw-r--r--@ 1 picocreator staff 33K Jul 26 09:21 rwkv-x-dev-bf16-sorted-eng-21-focus.csv\n",
581
+ "-rw-r--r--@ 1 picocreator staff 107K Jul 26 09:22 rwkv-x-dev-bf16-sorted-eng-all.csv\n",
582
+ "-rw-r--r--@ 1 picocreator staff 6.7K Jul 26 09:22 rwkv-x-dev-bf16-sorted-eng-focus.csv\n",
583
+ "-rw-r--r--@ 1 picocreator staff 5.7K Jul 26 09:22 rwkv-x-dev-bf16-sorted-multilang-summary.csv\n"
584
  ]
585
  }
586
  ],
 
591
  "#\n",
592
  "##################################################\n",
593
  "\n",
594
+ "FOCUS_MODEL_LIST=[\n",
595
+ " # \"./rwkv-x-dev/*\", \n",
596
+ " \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\", \"m8than/*\"\n",
597
+ "]\n",
598
+ "\n",
599
  "# Overall results\n",
600
  "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"] )\n",
601
  "all_results.to_csv('summary/bf16-all-results-and-groups.csv', index=False)\n",
 
621
  "multilang_grp_sorted.to_csv('summary/bf16-sorted-multilang-summary.csv', index=False)\n",
622
  "\n",
623
  "# RWKV perf tracking\n",
624
+ "rwkv_multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], exModels=[], inModels=FOCUS_MODEL_LIST, sort=True )\n",
625
  "rwkv_multilang_grp_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-multilang-summary.csv', index=False)\n",
626
  "\n",
627
  "# All other results\n",
 
649
  "eng_focus_sorted.to_csv('summary/bf16-sorted-eng-focus.csv', index=False)\n",
650
  "\n",
651
  "# RWKV perf tracking\n",
652
+ "rwkv_eng_focus_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_focus_tGrps, inResults=eng_focus_tTest, exModels=[], inModels=FOCUS_MODEL_LIST, sort=True, simplified=True )\n",
653
  "rwkv_eng_focus_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-focus.csv', index=False)\n",
654
  "\n",
655
  "# RWKV perf tracking\n",
656
+ "rwkv_eng_all_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exModels=[], inModels=FOCUS_MODEL_LIST, sort=True, simplified=True )\n",
657
  "rwkv_eng_all_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-all.csv', index=False)\n",
658
  "\n",
659
  "# # Overall results\n",
 
666
  },
667
  {
668
  "cell_type": "code",
669
+ "execution_count": 49,
670
  "metadata": {},
671
  "outputs": [],
672
  "source": [
lm-eval-output/RWKV/v6-Finch-7B-HF/xnli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "results": {
3
  "xnli": {
4
- "acc,none": 0.4420883534136546,
5
- "acc_stderr,none": 0.050900528447616215,
6
  "alias": "xnli"
7
  },
8
  "xnli_ar": {
@@ -11,8 +11,8 @@
11
  "alias": " - xnli_ar"
12
  },
13
  "xnli_bg": {
14
- "acc,none": 0.4718875502008032,
15
- "acc_stderr,none": 0.010006219242553592,
16
  "alias": " - xnli_bg"
17
  },
18
  "xnli_de": {
@@ -21,70 +21,70 @@
21
  "alias": " - xnli_de"
22
  },
23
  "xnli_el": {
24
- "acc,none": 0.4,
25
- "acc_stderr,none": 0.009819585875881302,
26
  "alias": " - xnli_el"
27
  },
28
  "xnli_en": {
29
- "acc,none": 0.5417670682730924,
30
- "acc_stderr,none": 0.009987044882812572,
31
  "alias": " - xnli_en"
32
  },
33
  "xnli_es": {
34
- "acc,none": 0.5076305220883535,
35
- "acc_stderr,none": 0.010020905731542311,
36
  "alias": " - xnli_es"
37
  },
38
  "xnli_fr": {
39
- "acc,none": 0.4979919678714859,
40
- "acc_stderr,none": 0.010021992045038411,
41
  "alias": " - xnli_fr"
42
  },
43
  "xnli_hi": {
44
- "acc,none": 0.43815261044176707,
45
- "acc_stderr,none": 0.009945106474553728,
46
  "alias": " - xnli_hi"
47
  },
48
  "xnli_ru": {
49
- "acc,none": 0.4811244979919679,
50
- "acc_stderr,none": 0.010014928901071302,
51
  "alias": " - xnli_ru"
52
  },
53
  "xnli_sw": {
54
- "acc,none": 0.3899598393574297,
55
- "acc_stderr,none": 0.009776349218193002,
56
  "alias": " - xnli_sw"
57
  },
58
  "xnli_th": {
59
- "acc,none": 0.42449799196787147,
60
- "acc_stderr,none": 0.009907151253284258,
61
  "alias": " - xnli_th"
62
  },
63
  "xnli_tr": {
64
- "acc,none": 0.46184738955823296,
65
- "acc_stderr,none": 0.00999285357974995,
66
  "alias": " - xnli_tr"
67
  },
68
  "xnli_ur": {
69
- "acc,none": 0.41726907630522087,
70
- "acc_stderr,none": 0.009883930537517774,
71
  "alias": " - xnli_ur"
72
  },
73
  "xnli_vi": {
74
- "acc,none": 0.40642570281124496,
75
- "acc_stderr,none": 0.009844999034464208,
76
  "alias": " - xnli_vi"
77
  },
78
  "xnli_zh": {
79
- "acc,none": 0.3634538152610442,
80
- "acc_stderr,none": 0.00964111198725755,
81
  "alias": " - xnli_zh"
82
  }
83
  },
84
  "groups": {
85
  "xnli": {
86
- "acc,none": 0.4420883534136546,
87
- "acc_stderr,none": 0.050900528447616215,
88
  "alias": "xnli"
89
  }
90
  },
 
1
  {
2
  "results": {
3
  "xnli": {
4
+ "acc,none": 0.4419812583668005,
5
+ "acc_stderr,none": 0.05072266385982506,
6
  "alias": "xnli"
7
  },
8
  "xnli_ar": {
 
11
  "alias": " - xnli_ar"
12
  },
13
  "xnli_bg": {
14
+ "acc,none": 0.4714859437751004,
15
+ "acc_stderr,none": 0.010005762674605288,
16
  "alias": " - xnli_bg"
17
  },
18
  "xnli_de": {
 
21
  "alias": " - xnli_de"
22
  },
23
  "xnli_el": {
24
+ "acc,none": 0.39959839357429716,
25
+ "acc_stderr,none": 0.009817939267958266,
26
  "alias": " - xnli_el"
27
  },
28
  "xnli_en": {
29
+ "acc,none": 0.5401606425702812,
30
+ "acc_stderr,none": 0.009989691810169688,
31
  "alias": " - xnli_en"
32
  },
33
  "xnli_es": {
34
+ "acc,none": 0.5072289156626506,
35
+ "acc_stderr,none": 0.010021025361119635,
36
  "alias": " - xnli_es"
37
  },
38
  "xnli_fr": {
39
+ "acc,none": 0.4991967871485944,
40
+ "acc_stderr,none": 0.010022059935722397,
41
  "alias": " - xnli_fr"
42
  },
43
  "xnli_hi": {
44
+ "acc,none": 0.4393574297188755,
45
+ "acc_stderr,none": 0.00994808700111736,
46
  "alias": " - xnli_hi"
47
  },
48
  "xnli_ru": {
49
+ "acc,none": 0.4815261044176707,
50
+ "acc_stderr,none": 0.010015229768356988,
51
  "alias": " - xnli_ru"
52
  },
53
  "xnli_sw": {
54
+ "acc,none": 0.39116465863453814,
55
+ "acc_stderr,none": 0.009781766322010008,
56
  "alias": " - xnli_sw"
57
  },
58
  "xnli_th": {
59
+ "acc,none": 0.42128514056224897,
60
+ "acc_stderr,none": 0.009897099560589198,
61
  "alias": " - xnli_th"
62
  },
63
  "xnli_tr": {
64
+ "acc,none": 0.4606425702811245,
65
+ "acc_stderr,none": 0.009990976095711894,
66
  "alias": " - xnli_tr"
67
  },
68
  "xnli_ur": {
69
+ "acc,none": 0.41847389558232934,
70
+ "acc_stderr,none": 0.009887951897505937,
71
  "alias": " - xnli_ur"
72
  },
73
  "xnli_vi": {
74
+ "acc,none": 0.40602409638554215,
75
+ "acc_stderr,none": 0.00984346200738422,
76
  "alias": " - xnli_vi"
77
  },
78
  "xnli_zh": {
79
+ "acc,none": 0.3642570281124498,
80
+ "acc_stderr,none": 0.009645667910246843,
81
  "alias": " - xnli_zh"
82
  }
83
  },
84
  "groups": {
85
  "xnli": {
86
+ "acc,none": 0.4419812583668005,
87
+ "acc_stderr,none": 0.05072266385982506,
88
  "alias": "xnli"
89
  }
90
  },
lm-eval-output/RWKV/v6-Finch-7B-HF/xnli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:487567429f9e88ad57f89771c926406562f87178736ff495bc3d749f45d07926
3
- size 70357
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950386625b020e188469729baf385a8c0e14f0ee1cbcdd15e0ab865ef78f50cd
3
+ size 35171
lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bafb4d997aac45abf501d95155726777eb2d1c8a57295fedab9579859d429
3
+ size 683924
lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ai2_arc": {
4
+ "acc,none": 0.6651634723788049,
5
+ "acc_stderr,none": 0.09757683014091857,
6
+ "acc_norm,none": 0.6660090191657272,
7
+ "acc_norm_stderr,none": 0.08722264440751773,
8
+ "alias": "ai2_arc"
9
+ },
10
+ "arc_challenge": {
11
+ "acc,none": 0.4590443686006826,
12
+ "acc_stderr,none": 0.01456229107360122,
13
+ "acc_norm,none": 0.48208191126279865,
14
+ "acc_norm_stderr,none": 0.014602005585490983,
15
+ "alias": " - arc_challenge"
16
+ },
17
+ "arc_easy": {
18
+ "acc,none": 0.7668350168350169,
19
+ "acc_stderr,none": 0.008676624951179686,
20
+ "acc_norm,none": 0.7567340067340067,
21
+ "acc_norm_stderr,none": 0.008804009846865534,
22
+ "alias": " - arc_easy"
23
+ }
24
+ },
25
+ "groups": {
26
+ "ai2_arc": {
27
+ "acc,none": 0.6651634723788049,
28
+ "acc_stderr,none": 0.09757683014091857,
29
+ "acc_norm,none": 0.6660090191657272,
30
+ "acc_norm_stderr,none": 0.08722264440751773,
31
+ "alias": "ai2_arc"
32
+ }
33
+ },
34
+ "configs": {
35
+ "arc_challenge": {
36
+ "task": "arc_challenge",
37
+ "group": [
38
+ "ai2_arc"
39
+ ],
40
+ "dataset_path": "allenai/ai2_arc",
41
+ "dataset_name": "ARC-Challenge",
42
+ "training_split": "train",
43
+ "validation_split": "validation",
44
+ "test_split": "test",
45
+ "doc_to_text": "Question: {{question}}\nAnswer:",
46
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
47
+ "doc_to_choice": "{{choices.text}}",
48
+ "description": "",
49
+ "target_delimiter": " ",
50
+ "fewshot_delimiter": "\n\n",
51
+ "metric_list": [
52
+ {
53
+ "metric": "acc",
54
+ "aggregation": "mean",
55
+ "higher_is_better": true
56
+ },
57
+ {
58
+ "metric": "acc_norm",
59
+ "aggregation": "mean",
60
+ "higher_is_better": true
61
+ }
62
+ ],
63
+ "output_type": "multiple_choice",
64
+ "repeats": 1,
65
+ "should_decontaminate": true,
66
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
67
+ "metadata": {
68
+ "version": 1.0
69
+ }
70
+ },
71
+ "arc_easy": {
72
+ "task": "arc_easy",
73
+ "group": [
74
+ "ai2_arc"
75
+ ],
76
+ "dataset_path": "allenai/ai2_arc",
77
+ "dataset_name": "ARC-Easy",
78
+ "training_split": "train",
79
+ "validation_split": "validation",
80
+ "test_split": "test",
81
+ "doc_to_text": "Question: {{question}}\nAnswer:",
82
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
83
+ "doc_to_choice": "{{choices.text}}",
84
+ "description": "",
85
+ "target_delimiter": " ",
86
+ "fewshot_delimiter": "\n\n",
87
+ "metric_list": [
88
+ {
89
+ "metric": "acc",
90
+ "aggregation": "mean",
91
+ "higher_is_better": true
92
+ },
93
+ {
94
+ "metric": "acc_norm",
95
+ "aggregation": "mean",
96
+ "higher_is_better": true
97
+ }
98
+ ],
99
+ "output_type": "multiple_choice",
100
+ "repeats": 1,
101
+ "should_decontaminate": true,
102
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
103
+ "metadata": {
104
+ "version": 1.0
105
+ }
106
+ }
107
+ },
108
+ "versions": {
109
+ "ai2_arc": "N/A",
110
+ "arc_challenge": 1.0,
111
+ "arc_easy": 1.0
112
+ },
113
+ "n-shot": {
114
+ "ai2_arc": 0,
115
+ "arc_challenge": 0,
116
+ "arc_easy": 0
117
+ },
118
+ "config": {
119
+ "model": "hf",
120
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
121
+ "batch_size": "auto",
122
+ "batch_sizes": [
123
+ 64
124
+ ],
125
+ "device": null,
126
+ "use_cache": null,
127
+ "limit": null,
128
+ "bootstrap_iters": 100000,
129
+ "gen_kwargs": null
130
+ },
131
+ "git_hash": "97a2520"
132
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/ai2_arc/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8acea2dbceb70318aa8672cd91395169df6d38436d827bf17c6d4dbe7b1f1da
3
+ size 15844
lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b21dc663dd230a6d0b03b9a015f59a040b5305829cec2563a7f86bb6dac49fd8
3
+ size 1082861
lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "anli": {
4
+ "acc,none": 0.5459375,
5
+ "acc_stderr,none": 0.046057318730907466,
6
+ "alias": "anli"
7
+ },
8
+ "anli_r1": {
9
+ "acc,none": 0.639,
10
+ "acc_stderr,none": 0.015195720118175115,
11
+ "alias": " - anli_r1"
12
+ },
13
+ "anli_r2": {
14
+ "acc,none": 0.49,
15
+ "acc_stderr,none": 0.01581613575277321,
16
+ "alias": " - anli_r2"
17
+ },
18
+ "anli_r3": {
19
+ "acc,none": 0.515,
20
+ "acc_stderr,none": 0.014433275195211854,
21
+ "alias": " - anli_r3"
22
+ }
23
+ },
24
+ "groups": {
25
+ "anli": {
26
+ "acc,none": 0.5459375,
27
+ "acc_stderr,none": 0.046057318730907466,
28
+ "alias": "anli"
29
+ }
30
+ },
31
+ "configs": {
32
+ "anli_r1": {
33
+ "task": "anli_r1",
34
+ "group": [
35
+ "anli"
36
+ ],
37
+ "dataset_path": "anli",
38
+ "training_split": "train_r1",
39
+ "validation_split": "dev_r1",
40
+ "test_split": "test_r1",
41
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
42
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
43
+ "doc_to_choice": [
44
+ "True",
45
+ "Neither",
46
+ "False"
47
+ ],
48
+ "description": "",
49
+ "target_delimiter": " ",
50
+ "fewshot_delimiter": "\n\n",
51
+ "metric_list": [
52
+ {
53
+ "metric": "acc",
54
+ "aggregation": "mean",
55
+ "higher_is_better": true
56
+ }
57
+ ],
58
+ "output_type": "multiple_choice",
59
+ "repeats": 1,
60
+ "should_decontaminate": true,
61
+ "doc_to_decontamination_query": "premise",
62
+ "metadata": {
63
+ "version": 1.0
64
+ }
65
+ },
66
+ "anli_r2": {
67
+ "task": "anli_r2",
68
+ "group": [
69
+ "anli"
70
+ ],
71
+ "dataset_path": "anli",
72
+ "training_split": "train_r2",
73
+ "validation_split": "dev_r2",
74
+ "test_split": "test_r2",
75
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
76
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
77
+ "doc_to_choice": [
78
+ "True",
79
+ "Neither",
80
+ "False"
81
+ ],
82
+ "description": "",
83
+ "target_delimiter": " ",
84
+ "fewshot_delimiter": "\n\n",
85
+ "metric_list": [
86
+ {
87
+ "metric": "acc",
88
+ "aggregation": "mean",
89
+ "higher_is_better": true
90
+ }
91
+ ],
92
+ "output_type": "multiple_choice",
93
+ "repeats": 1,
94
+ "should_decontaminate": true,
95
+ "doc_to_decontamination_query": "premise",
96
+ "metadata": {
97
+ "version": 1.0
98
+ }
99
+ },
100
+ "anli_r3": {
101
+ "task": "anli_r3",
102
+ "group": [
103
+ "anli"
104
+ ],
105
+ "dataset_path": "anli",
106
+ "training_split": "train_r3",
107
+ "validation_split": "dev_r3",
108
+ "test_split": "test_r3",
109
+ "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:",
110
+ "doc_to_target": "{{['True', 'Neither', 'False'][label]}}",
111
+ "doc_to_choice": [
112
+ "True",
113
+ "Neither",
114
+ "False"
115
+ ],
116
+ "description": "",
117
+ "target_delimiter": " ",
118
+ "fewshot_delimiter": "\n\n",
119
+ "metric_list": [
120
+ {
121
+ "metric": "acc",
122
+ "aggregation": "mean",
123
+ "higher_is_better": true
124
+ }
125
+ ],
126
+ "output_type": "multiple_choice",
127
+ "repeats": 1,
128
+ "should_decontaminate": true,
129
+ "doc_to_decontamination_query": "premise",
130
+ "metadata": {
131
+ "version": 1.0
132
+ }
133
+ }
134
+ },
135
+ "versions": {
136
+ "anli": "N/A",
137
+ "anli_r1": 1.0,
138
+ "anli_r2": 1.0,
139
+ "anli_r3": 1.0
140
+ },
141
+ "n-shot": {
142
+ "anli": 0,
143
+ "anli_r1": 0,
144
+ "anli_r2": 0,
145
+ "anli_r3": 0
146
+ },
147
+ "config": {
148
+ "model": "hf",
149
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
150
+ "batch_size": "auto",
151
+ "batch_sizes": [
152
+ 64
153
+ ],
154
+ "device": null,
155
+ "use_cache": null,
156
+ "limit": null,
157
+ "bootstrap_iters": 100000,
158
+ "gen_kwargs": null
159
+ },
160
+ "git_hash": "97a2520"
161
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/anli/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88b1775a4b8c8a396f948b580b28cb3f78f8bcb8bdb8d6822c394d7c237a4b9e
3
+ size 17692
lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62f5053b76bd05f8a7247ad11153eef5b360e80ee798c8dc085f6c4dab5d4c5
3
+ size 4234906
lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,2249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "blimp": {
4
+ "acc,none": 0.844,
5
+ "acc_stderr,none": 0.13676486091184517,
6
+ "alias": "blimp"
7
+ },
8
+ "blimp_adjunct_island": {
9
+ "acc,none": 0.912,
10
+ "acc_stderr,none": 0.008963053962592083,
11
+ "alias": " - blimp_adjunct_island"
12
+ },
13
+ "blimp_anaphor_gender_agreement": {
14
+ "acc,none": 0.99,
15
+ "acc_stderr,none": 0.003148000938676768,
16
+ "alias": " - blimp_anaphor_gender_agreement"
17
+ },
18
+ "blimp_anaphor_number_agreement": {
19
+ "acc,none": 0.993,
20
+ "acc_stderr,none": 0.0026377941462437586,
21
+ "alias": " - blimp_anaphor_number_agreement"
22
+ },
23
+ "blimp_animate_subject_passive": {
24
+ "acc,none": 0.83,
25
+ "acc_stderr,none": 0.011884495834541672,
26
+ "alias": " - blimp_animate_subject_passive"
27
+ },
28
+ "blimp_animate_subject_trans": {
29
+ "acc,none": 0.902,
30
+ "acc_stderr,none": 0.009406619184621228,
31
+ "alias": " - blimp_animate_subject_trans"
32
+ },
33
+ "blimp_causative": {
34
+ "acc,none": 0.789,
35
+ "acc_stderr,none": 0.012909130321042092,
36
+ "alias": " - blimp_causative"
37
+ },
38
+ "blimp_complex_NP_island": {
39
+ "acc,none": 0.628,
40
+ "acc_stderr,none": 0.015292149942040577,
41
+ "alias": " - blimp_complex_NP_island"
42
+ },
43
+ "blimp_coordinate_structure_constraint_complex_left_branch": {
44
+ "acc,none": 0.779,
45
+ "acc_stderr,none": 0.01312750285969626,
46
+ "alias": " - blimp_coordinate_structure_constraint_complex_left_branch"
47
+ },
48
+ "blimp_coordinate_structure_constraint_object_extraction": {
49
+ "acc,none": 0.892,
50
+ "acc_stderr,none": 0.009820001651345714,
51
+ "alias": " - blimp_coordinate_structure_constraint_object_extraction"
52
+ },
53
+ "blimp_determiner_noun_agreement_1": {
54
+ "acc,none": 0.994,
55
+ "acc_stderr,none": 0.0024433521993298198,
56
+ "alias": " - blimp_determiner_noun_agreement_1"
57
+ },
58
+ "blimp_determiner_noun_agreement_2": {
59
+ "acc,none": 0.989,
60
+ "acc_stderr,none": 0.003299983316607817,
61
+ "alias": " - blimp_determiner_noun_agreement_2"
62
+ },
63
+ "blimp_determiner_noun_agreement_irregular_1": {
64
+ "acc,none": 0.965,
65
+ "acc_stderr,none": 0.005814534272734934,
66
+ "alias": " - blimp_determiner_noun_agreement_irregular_1"
67
+ },
68
+ "blimp_determiner_noun_agreement_irregular_2": {
69
+ "acc,none": 0.956,
70
+ "acc_stderr,none": 0.006488921798427418,
71
+ "alias": " - blimp_determiner_noun_agreement_irregular_2"
72
+ },
73
+ "blimp_determiner_noun_agreement_with_adj_2": {
74
+ "acc,none": 0.97,
75
+ "acc_stderr,none": 0.0053971408290991955,
76
+ "alias": " - blimp_determiner_noun_agreement_with_adj_2"
77
+ },
78
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": {
79
+ "acc,none": 0.938,
80
+ "acc_stderr,none": 0.007629823996280306,
81
+ "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_1"
82
+ },
83
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": {
84
+ "acc,none": 0.928,
85
+ "acc_stderr,none": 0.008178195576218681,
86
+ "alias": " - blimp_determiner_noun_agreement_with_adj_irregular_2"
87
+ },
88
+ "blimp_determiner_noun_agreement_with_adjective_1": {
89
+ "acc,none": 0.986,
90
+ "acc_stderr,none": 0.0037172325482565743,
91
+ "alias": " - blimp_determiner_noun_agreement_with_adjective_1"
92
+ },
93
+ "blimp_distractor_agreement_relational_noun": {
94
+ "acc,none": 0.945,
95
+ "acc_stderr,none": 0.0072129762946392395,
96
+ "alias": " - blimp_distractor_agreement_relational_noun"
97
+ },
98
+ "blimp_distractor_agreement_relative_clause": {
99
+ "acc,none": 0.871,
100
+ "acc_stderr,none": 0.010605256784796558,
101
+ "alias": " - blimp_distractor_agreement_relative_clause"
102
+ },
103
+ "blimp_drop_argument": {
104
+ "acc,none": 0.789,
105
+ "acc_stderr,none": 0.012909130321042095,
106
+ "alias": " - blimp_drop_argument"
107
+ },
108
+ "blimp_ellipsis_n_bar_1": {
109
+ "acc,none": 0.802,
110
+ "acc_stderr,none": 0.01260773393417531,
111
+ "alias": " - blimp_ellipsis_n_bar_1"
112
+ },
113
+ "blimp_ellipsis_n_bar_2": {
114
+ "acc,none": 0.959,
115
+ "acc_stderr,none": 0.006273624021118792,
116
+ "alias": " - blimp_ellipsis_n_bar_2"
117
+ },
118
+ "blimp_existential_there_object_raising": {
119
+ "acc,none": 0.831,
120
+ "acc_stderr,none": 0.011856625977890117,
121
+ "alias": " - blimp_existential_there_object_raising"
122
+ },
123
+ "blimp_existential_there_quantifiers_1": {
124
+ "acc,none": 0.998,
125
+ "acc_stderr,none": 0.001413505570557794,
126
+ "alias": " - blimp_existential_there_quantifiers_1"
127
+ },
128
+ "blimp_existential_there_quantifiers_2": {
129
+ "acc,none": 0.361,
130
+ "acc_stderr,none": 0.015195720118175129,
131
+ "alias": " - blimp_existential_there_quantifiers_2"
132
+ },
133
+ "blimp_existential_there_subject_raising": {
134
+ "acc,none": 0.904,
135
+ "acc_stderr,none": 0.009320454434783222,
136
+ "alias": " - blimp_existential_there_subject_raising"
137
+ },
138
+ "blimp_expletive_it_object_raising": {
139
+ "acc,none": 0.797,
140
+ "acc_stderr,none": 0.012726073744598285,
141
+ "alias": " - blimp_expletive_it_object_raising"
142
+ },
143
+ "blimp_inchoative": {
144
+ "acc,none": 0.734,
145
+ "acc_stderr,none": 0.013979965645145143,
146
+ "alias": " - blimp_inchoative"
147
+ },
148
+ "blimp_intransitive": {
149
+ "acc,none": 0.862,
150
+ "acc_stderr,none": 0.010912152632504387,
151
+ "alias": " - blimp_intransitive"
152
+ },
153
+ "blimp_irregular_past_participle_adjectives": {
154
+ "acc,none": 0.876,
155
+ "acc_stderr,none": 0.010427498872343961,
156
+ "alias": " - blimp_irregular_past_participle_adjectives"
157
+ },
158
+ "blimp_irregular_past_participle_verbs": {
159
+ "acc,none": 0.908,
160
+ "acc_stderr,none": 0.009144376393151118,
161
+ "alias": " - blimp_irregular_past_participle_verbs"
162
+ },
163
+ "blimp_irregular_plural_subject_verb_agreement_1": {
164
+ "acc,none": 0.947,
165
+ "acc_stderr,none": 0.007088105617246447,
166
+ "alias": " - blimp_irregular_plural_subject_verb_agreement_1"
167
+ },
168
+ "blimp_irregular_plural_subject_verb_agreement_2": {
169
+ "acc,none": 0.939,
170
+ "acc_stderr,none": 0.007572076091557422,
171
+ "alias": " - blimp_irregular_plural_subject_verb_agreement_2"
172
+ },
173
+ "blimp_left_branch_island_echo_question": {
174
+ "acc,none": 0.678,
175
+ "acc_stderr,none": 0.014782913600996662,
176
+ "alias": " - blimp_left_branch_island_echo_question"
177
+ },
178
+ "blimp_left_branch_island_simple_question": {
179
+ "acc,none": 0.892,
180
+ "acc_stderr,none": 0.009820001651345694,
181
+ "alias": " - blimp_left_branch_island_simple_question"
182
+ },
183
+ "blimp_matrix_question_npi_licensor_present": {
184
+ "acc,none": 0.603,
185
+ "acc_stderr,none": 0.015480007449307989,
186
+ "alias": " - blimp_matrix_question_npi_licensor_present"
187
+ },
188
+ "blimp_npi_present_1": {
189
+ "acc,none": 0.653,
190
+ "acc_stderr,none": 0.015060472031706625,
191
+ "alias": " - blimp_npi_present_1"
192
+ },
193
+ "blimp_npi_present_2": {
194
+ "acc,none": 0.692,
195
+ "acc_stderr,none": 0.01460648312734276,
196
+ "alias": " - blimp_npi_present_2"
197
+ },
198
+ "blimp_only_npi_licensor_present": {
199
+ "acc,none": 0.887,
200
+ "acc_stderr,none": 0.010016552866696863,
201
+ "alias": " - blimp_only_npi_licensor_present"
202
+ },
203
+ "blimp_only_npi_scope": {
204
+ "acc,none": 0.763,
205
+ "acc_stderr,none": 0.01345407046257795,
206
+ "alias": " - blimp_only_npi_scope"
207
+ },
208
+ "blimp_passive_1": {
209
+ "acc,none": 0.902,
210
+ "acc_stderr,none": 0.009406619184621214,
211
+ "alias": " - blimp_passive_1"
212
+ },
213
+ "blimp_passive_2": {
214
+ "acc,none": 0.918,
215
+ "acc_stderr,none": 0.008680515615523715,
216
+ "alias": " - blimp_passive_2"
217
+ },
218
+ "blimp_principle_A_c_command": {
219
+ "acc,none": 0.804,
220
+ "acc_stderr,none": 0.012559527926707373,
221
+ "alias": " - blimp_principle_A_c_command"
222
+ },
223
+ "blimp_principle_A_case_1": {
224
+ "acc,none": 1.0,
225
+ "acc_stderr,none": 0.0,
226
+ "alias": " - blimp_principle_A_case_1"
227
+ },
228
+ "blimp_principle_A_case_2": {
229
+ "acc,none": 0.952,
230
+ "acc_stderr,none": 0.006763264133666695,
231
+ "alias": " - blimp_principle_A_case_2"
232
+ },
233
+ "blimp_principle_A_domain_1": {
234
+ "acc,none": 0.973,
235
+ "acc_stderr,none": 0.00512808904927529,
236
+ "alias": " - blimp_principle_A_domain_1"
237
+ },
238
+ "blimp_principle_A_domain_2": {
239
+ "acc,none": 0.884,
240
+ "acc_stderr,none": 0.010131468138756998,
241
+ "alias": " - blimp_principle_A_domain_2"
242
+ },
243
+ "blimp_principle_A_domain_3": {
244
+ "acc,none": 0.753,
245
+ "acc_stderr,none": 0.01364467578131413,
246
+ "alias": " - blimp_principle_A_domain_3"
247
+ },
248
+ "blimp_principle_A_reconstruction": {
249
+ "acc,none": 0.702,
250
+ "acc_stderr,none": 0.014470846741134715,
251
+ "alias": " - blimp_principle_A_reconstruction"
252
+ },
253
+ "blimp_regular_plural_subject_verb_agreement_1": {
254
+ "acc,none": 0.969,
255
+ "acc_stderr,none": 0.005483527064679195,
256
+ "alias": " - blimp_regular_plural_subject_verb_agreement_1"
257
+ },
258
+ "blimp_regular_plural_subject_verb_agreement_2": {
259
+ "acc,none": 0.925,
260
+ "acc_stderr,none": 0.008333333333333335,
261
+ "alias": " - blimp_regular_plural_subject_verb_agreement_2"
262
+ },
263
+ "blimp_sentential_negation_npi_licensor_present": {
264
+ "acc,none": 0.998,
265
+ "acc_stderr,none": 0.0014135055705578026,
266
+ "alias": " - blimp_sentential_negation_npi_licensor_present"
267
+ },
268
+ "blimp_sentential_negation_npi_scope": {
269
+ "acc,none": 0.656,
270
+ "acc_stderr,none": 0.015029633724408945,
271
+ "alias": " - blimp_sentential_negation_npi_scope"
272
+ },
273
+ "blimp_sentential_subject_island": {
274
+ "acc,none": 0.523,
275
+ "acc_stderr,none": 0.015802554246726094,
276
+ "alias": " - blimp_sentential_subject_island"
277
+ },
278
+ "blimp_superlative_quantifiers_1": {
279
+ "acc,none": 0.737,
280
+ "acc_stderr,none": 0.01392928659425975,
281
+ "alias": " - blimp_superlative_quantifiers_1"
282
+ },
283
+ "blimp_superlative_quantifiers_2": {
284
+ "acc,none": 0.928,
285
+ "acc_stderr,none": 0.008178195576218681,
286
+ "alias": " - blimp_superlative_quantifiers_2"
287
+ },
288
+ "blimp_tough_vs_raising_1": {
289
+ "acc,none": 0.717,
290
+ "acc_stderr,none": 0.014251810906481744,
291
+ "alias": " - blimp_tough_vs_raising_1"
292
+ },
293
+ "blimp_tough_vs_raising_2": {
294
+ "acc,none": 0.9,
295
+ "acc_stderr,none": 0.009491579957525044,
296
+ "alias": " - blimp_tough_vs_raising_2"
297
+ },
298
+ "blimp_transitive": {
299
+ "acc,none": 0.924,
300
+ "acc_stderr,none": 0.008384169266796387,
301
+ "alias": " - blimp_transitive"
302
+ },
303
+ "blimp_wh_island": {
304
+ "acc,none": 0.774,
305
+ "acc_stderr,none": 0.01323250161908533,
306
+ "alias": " - blimp_wh_island"
307
+ },
308
+ "blimp_wh_questions_object_gap": {
309
+ "acc,none": 0.868,
310
+ "acc_stderr,none": 0.010709373963528033,
311
+ "alias": " - blimp_wh_questions_object_gap"
312
+ },
313
+ "blimp_wh_questions_subject_gap": {
314
+ "acc,none": 0.953,
315
+ "acc_stderr,none": 0.006695956678163042,
316
+ "alias": " - blimp_wh_questions_subject_gap"
317
+ },
318
+ "blimp_wh_questions_subject_gap_long_distance": {
319
+ "acc,none": 0.946,
320
+ "acc_stderr,none": 0.007150883521295437,
321
+ "alias": " - blimp_wh_questions_subject_gap_long_distance"
322
+ },
323
+ "blimp_wh_vs_that_no_gap": {
324
+ "acc,none": 0.985,
325
+ "acc_stderr,none": 0.0038457495745030006,
326
+ "alias": " - blimp_wh_vs_that_no_gap"
327
+ },
328
+ "blimp_wh_vs_that_no_gap_long_distance": {
329
+ "acc,none": 0.979,
330
+ "acc_stderr,none": 0.0045364721513064974,
331
+ "alias": " - blimp_wh_vs_that_no_gap_long_distance"
332
+ },
333
+ "blimp_wh_vs_that_with_gap": {
334
+ "acc,none": 0.412,
335
+ "acc_stderr,none": 0.0155723632920151,
336
+ "alias": " - blimp_wh_vs_that_with_gap"
337
+ },
338
+ "blimp_wh_vs_that_with_gap_long_distance": {
339
+ "acc,none": 0.334,
340
+ "acc_stderr,none": 0.014922019523732963,
341
+ "alias": " - blimp_wh_vs_that_with_gap_long_distance"
342
+ }
343
+ },
344
+ "groups": {
345
+ "blimp": {
346
+ "acc,none": 0.844,
347
+ "acc_stderr,none": 0.13676486091184517,
348
+ "alias": "blimp"
349
+ }
350
+ },
351
+ "configs": {
352
+ "blimp_adjunct_island": {
353
+ "task": "blimp_adjunct_island",
354
+ "group": "blimp",
355
+ "dataset_path": "blimp",
356
+ "dataset_name": "adjunct_island",
357
+ "validation_split": "train",
358
+ "doc_to_text": "",
359
+ "doc_to_target": 0,
360
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
361
+ "description": "",
362
+ "target_delimiter": " ",
363
+ "fewshot_delimiter": "\n\n",
364
+ "num_fewshot": 0,
365
+ "metric_list": [
366
+ {
367
+ "metric": "acc"
368
+ }
369
+ ],
370
+ "output_type": "multiple_choice",
371
+ "repeats": 1,
372
+ "should_decontaminate": true,
373
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
374
+ "metadata": {
375
+ "version": 1.0
376
+ }
377
+ },
378
+ "blimp_anaphor_gender_agreement": {
379
+ "task": "blimp_anaphor_gender_agreement",
380
+ "group": "blimp",
381
+ "dataset_path": "blimp",
382
+ "dataset_name": "anaphor_gender_agreement",
383
+ "validation_split": "train",
384
+ "doc_to_text": "",
385
+ "doc_to_target": 0,
386
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
387
+ "description": "",
388
+ "target_delimiter": " ",
389
+ "fewshot_delimiter": "\n\n",
390
+ "num_fewshot": 0,
391
+ "metric_list": [
392
+ {
393
+ "metric": "acc"
394
+ }
395
+ ],
396
+ "output_type": "multiple_choice",
397
+ "repeats": 1,
398
+ "should_decontaminate": true,
399
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
400
+ "metadata": {
401
+ "version": 1.0
402
+ }
403
+ },
404
+ "blimp_anaphor_number_agreement": {
405
+ "task": "blimp_anaphor_number_agreement",
406
+ "group": "blimp",
407
+ "dataset_path": "blimp",
408
+ "dataset_name": "anaphor_number_agreement",
409
+ "validation_split": "train",
410
+ "doc_to_text": "",
411
+ "doc_to_target": 0,
412
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
413
+ "description": "",
414
+ "target_delimiter": " ",
415
+ "fewshot_delimiter": "\n\n",
416
+ "num_fewshot": 0,
417
+ "metric_list": [
418
+ {
419
+ "metric": "acc"
420
+ }
421
+ ],
422
+ "output_type": "multiple_choice",
423
+ "repeats": 1,
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
426
+ "metadata": {
427
+ "version": 1.0
428
+ }
429
+ },
430
+ "blimp_animate_subject_passive": {
431
+ "task": "blimp_animate_subject_passive",
432
+ "group": "blimp",
433
+ "dataset_path": "blimp",
434
+ "dataset_name": "animate_subject_passive",
435
+ "validation_split": "train",
436
+ "doc_to_text": "",
437
+ "doc_to_target": 0,
438
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
439
+ "description": "",
440
+ "target_delimiter": " ",
441
+ "fewshot_delimiter": "\n\n",
442
+ "num_fewshot": 0,
443
+ "metric_list": [
444
+ {
445
+ "metric": "acc"
446
+ }
447
+ ],
448
+ "output_type": "multiple_choice",
449
+ "repeats": 1,
450
+ "should_decontaminate": true,
451
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
452
+ "metadata": {
453
+ "version": 1.0
454
+ }
455
+ },
456
+ "blimp_animate_subject_trans": {
457
+ "task": "blimp_animate_subject_trans",
458
+ "group": "blimp",
459
+ "dataset_path": "blimp",
460
+ "dataset_name": "animate_subject_trans",
461
+ "validation_split": "train",
462
+ "doc_to_text": "",
463
+ "doc_to_target": 0,
464
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
465
+ "description": "",
466
+ "target_delimiter": " ",
467
+ "fewshot_delimiter": "\n\n",
468
+ "num_fewshot": 0,
469
+ "metric_list": [
470
+ {
471
+ "metric": "acc"
472
+ }
473
+ ],
474
+ "output_type": "multiple_choice",
475
+ "repeats": 1,
476
+ "should_decontaminate": true,
477
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
478
+ "metadata": {
479
+ "version": 1.0
480
+ }
481
+ },
482
+ "blimp_causative": {
483
+ "task": "blimp_causative",
484
+ "group": "blimp",
485
+ "dataset_path": "blimp",
486
+ "dataset_name": "causative",
487
+ "validation_split": "train",
488
+ "doc_to_text": "",
489
+ "doc_to_target": 0,
490
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
491
+ "description": "",
492
+ "target_delimiter": " ",
493
+ "fewshot_delimiter": "\n\n",
494
+ "num_fewshot": 0,
495
+ "metric_list": [
496
+ {
497
+ "metric": "acc"
498
+ }
499
+ ],
500
+ "output_type": "multiple_choice",
501
+ "repeats": 1,
502
+ "should_decontaminate": true,
503
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
504
+ "metadata": {
505
+ "version": 1.0
506
+ }
507
+ },
508
+ "blimp_complex_NP_island": {
509
+ "task": "blimp_complex_NP_island",
510
+ "group": "blimp",
511
+ "dataset_path": "blimp",
512
+ "dataset_name": "complex_NP_island",
513
+ "validation_split": "train",
514
+ "doc_to_text": "",
515
+ "doc_to_target": 0,
516
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
517
+ "description": "",
518
+ "target_delimiter": " ",
519
+ "fewshot_delimiter": "\n\n",
520
+ "num_fewshot": 0,
521
+ "metric_list": [
522
+ {
523
+ "metric": "acc"
524
+ }
525
+ ],
526
+ "output_type": "multiple_choice",
527
+ "repeats": 1,
528
+ "should_decontaminate": true,
529
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
530
+ "metadata": {
531
+ "version": 1.0
532
+ }
533
+ },
534
+ "blimp_coordinate_structure_constraint_complex_left_branch": {
535
+ "task": "blimp_coordinate_structure_constraint_complex_left_branch",
536
+ "group": "blimp",
537
+ "dataset_path": "blimp",
538
+ "dataset_name": "coordinate_structure_constraint_complex_left_branch",
539
+ "validation_split": "train",
540
+ "doc_to_text": "",
541
+ "doc_to_target": 0,
542
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
543
+ "description": "",
544
+ "target_delimiter": " ",
545
+ "fewshot_delimiter": "\n\n",
546
+ "num_fewshot": 0,
547
+ "metric_list": [
548
+ {
549
+ "metric": "acc"
550
+ }
551
+ ],
552
+ "output_type": "multiple_choice",
553
+ "repeats": 1,
554
+ "should_decontaminate": true,
555
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
556
+ "metadata": {
557
+ "version": 1.0
558
+ }
559
+ },
560
+ "blimp_coordinate_structure_constraint_object_extraction": {
561
+ "task": "blimp_coordinate_structure_constraint_object_extraction",
562
+ "group": "blimp",
563
+ "dataset_path": "blimp",
564
+ "dataset_name": "coordinate_structure_constraint_object_extraction",
565
+ "validation_split": "train",
566
+ "doc_to_text": "",
567
+ "doc_to_target": 0,
568
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
569
+ "description": "",
570
+ "target_delimiter": " ",
571
+ "fewshot_delimiter": "\n\n",
572
+ "num_fewshot": 0,
573
+ "metric_list": [
574
+ {
575
+ "metric": "acc"
576
+ }
577
+ ],
578
+ "output_type": "multiple_choice",
579
+ "repeats": 1,
580
+ "should_decontaminate": true,
581
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
582
+ "metadata": {
583
+ "version": 1.0
584
+ }
585
+ },
586
+ "blimp_determiner_noun_agreement_1": {
587
+ "task": "blimp_determiner_noun_agreement_1",
588
+ "group": "blimp",
589
+ "dataset_path": "blimp",
590
+ "dataset_name": "determiner_noun_agreement_1",
591
+ "validation_split": "train",
592
+ "doc_to_text": "",
593
+ "doc_to_target": 0,
594
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
595
+ "description": "",
596
+ "target_delimiter": " ",
597
+ "fewshot_delimiter": "\n\n",
598
+ "num_fewshot": 0,
599
+ "metric_list": [
600
+ {
601
+ "metric": "acc"
602
+ }
603
+ ],
604
+ "output_type": "multiple_choice",
605
+ "repeats": 1,
606
+ "should_decontaminate": true,
607
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
608
+ "metadata": {
609
+ "version": 1.0
610
+ }
611
+ },
612
+ "blimp_determiner_noun_agreement_2": {
613
+ "task": "blimp_determiner_noun_agreement_2",
614
+ "group": "blimp",
615
+ "dataset_path": "blimp",
616
+ "dataset_name": "determiner_noun_agreement_2",
617
+ "validation_split": "train",
618
+ "doc_to_text": "",
619
+ "doc_to_target": 0,
620
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
621
+ "description": "",
622
+ "target_delimiter": " ",
623
+ "fewshot_delimiter": "\n\n",
624
+ "num_fewshot": 0,
625
+ "metric_list": [
626
+ {
627
+ "metric": "acc"
628
+ }
629
+ ],
630
+ "output_type": "multiple_choice",
631
+ "repeats": 1,
632
+ "should_decontaminate": true,
633
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
634
+ "metadata": {
635
+ "version": 1.0
636
+ }
637
+ },
638
+ "blimp_determiner_noun_agreement_irregular_1": {
639
+ "task": "blimp_determiner_noun_agreement_irregular_1",
640
+ "group": "blimp",
641
+ "dataset_path": "blimp",
642
+ "dataset_name": "determiner_noun_agreement_irregular_1",
643
+ "validation_split": "train",
644
+ "doc_to_text": "",
645
+ "doc_to_target": 0,
646
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
647
+ "description": "",
648
+ "target_delimiter": " ",
649
+ "fewshot_delimiter": "\n\n",
650
+ "num_fewshot": 0,
651
+ "metric_list": [
652
+ {
653
+ "metric": "acc"
654
+ }
655
+ ],
656
+ "output_type": "multiple_choice",
657
+ "repeats": 1,
658
+ "should_decontaminate": true,
659
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
660
+ "metadata": {
661
+ "version": 1.0
662
+ }
663
+ },
664
+ "blimp_determiner_noun_agreement_irregular_2": {
665
+ "task": "blimp_determiner_noun_agreement_irregular_2",
666
+ "group": "blimp",
667
+ "dataset_path": "blimp",
668
+ "dataset_name": "determiner_noun_agreement_irregular_2",
669
+ "validation_split": "train",
670
+ "doc_to_text": "",
671
+ "doc_to_target": 0,
672
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
673
+ "description": "",
674
+ "target_delimiter": " ",
675
+ "fewshot_delimiter": "\n\n",
676
+ "num_fewshot": 0,
677
+ "metric_list": [
678
+ {
679
+ "metric": "acc"
680
+ }
681
+ ],
682
+ "output_type": "multiple_choice",
683
+ "repeats": 1,
684
+ "should_decontaminate": true,
685
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
686
+ "metadata": {
687
+ "version": 1.0
688
+ }
689
+ },
690
+ "blimp_determiner_noun_agreement_with_adj_2": {
691
+ "task": "blimp_determiner_noun_agreement_with_adj_2",
692
+ "group": "blimp",
693
+ "dataset_path": "blimp",
694
+ "dataset_name": "determiner_noun_agreement_with_adj_2",
695
+ "validation_split": "train",
696
+ "doc_to_text": "",
697
+ "doc_to_target": 0,
698
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
699
+ "description": "",
700
+ "target_delimiter": " ",
701
+ "fewshot_delimiter": "\n\n",
702
+ "num_fewshot": 0,
703
+ "metric_list": [
704
+ {
705
+ "metric": "acc"
706
+ }
707
+ ],
708
+ "output_type": "multiple_choice",
709
+ "repeats": 1,
710
+ "should_decontaminate": true,
711
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
712
+ "metadata": {
713
+ "version": 1.0
714
+ }
715
+ },
716
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": {
717
+ "task": "blimp_determiner_noun_agreement_with_adj_irregular_1",
718
+ "group": "blimp",
719
+ "dataset_path": "blimp",
720
+ "dataset_name": "determiner_noun_agreement_with_adj_irregular_1",
721
+ "validation_split": "train",
722
+ "doc_to_text": "",
723
+ "doc_to_target": 0,
724
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
725
+ "description": "",
726
+ "target_delimiter": " ",
727
+ "fewshot_delimiter": "\n\n",
728
+ "num_fewshot": 0,
729
+ "metric_list": [
730
+ {
731
+ "metric": "acc"
732
+ }
733
+ ],
734
+ "output_type": "multiple_choice",
735
+ "repeats": 1,
736
+ "should_decontaminate": true,
737
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
738
+ "metadata": {
739
+ "version": 1.0
740
+ }
741
+ },
742
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": {
743
+ "task": "blimp_determiner_noun_agreement_with_adj_irregular_2",
744
+ "group": "blimp",
745
+ "dataset_path": "blimp",
746
+ "dataset_name": "determiner_noun_agreement_with_adj_irregular_2",
747
+ "validation_split": "train",
748
+ "doc_to_text": "",
749
+ "doc_to_target": 0,
750
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
751
+ "description": "",
752
+ "target_delimiter": " ",
753
+ "fewshot_delimiter": "\n\n",
754
+ "num_fewshot": 0,
755
+ "metric_list": [
756
+ {
757
+ "metric": "acc"
758
+ }
759
+ ],
760
+ "output_type": "multiple_choice",
761
+ "repeats": 1,
762
+ "should_decontaminate": true,
763
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
764
+ "metadata": {
765
+ "version": 1.0
766
+ }
767
+ },
768
+ "blimp_determiner_noun_agreement_with_adjective_1": {
769
+ "task": "blimp_determiner_noun_agreement_with_adjective_1",
770
+ "group": "blimp",
771
+ "dataset_path": "blimp",
772
+ "dataset_name": "determiner_noun_agreement_with_adjective_1",
773
+ "validation_split": "train",
774
+ "doc_to_text": "",
775
+ "doc_to_target": 0,
776
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
777
+ "description": "",
778
+ "target_delimiter": " ",
779
+ "fewshot_delimiter": "\n\n",
780
+ "num_fewshot": 0,
781
+ "metric_list": [
782
+ {
783
+ "metric": "acc"
784
+ }
785
+ ],
786
+ "output_type": "multiple_choice",
787
+ "repeats": 1,
788
+ "should_decontaminate": true,
789
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
790
+ "metadata": {
791
+ "version": 1.0
792
+ }
793
+ },
794
+ "blimp_distractor_agreement_relational_noun": {
795
+ "task": "blimp_distractor_agreement_relational_noun",
796
+ "group": "blimp",
797
+ "dataset_path": "blimp",
798
+ "dataset_name": "distractor_agreement_relational_noun",
799
+ "validation_split": "train",
800
+ "doc_to_text": "",
801
+ "doc_to_target": 0,
802
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
803
+ "description": "",
804
+ "target_delimiter": " ",
805
+ "fewshot_delimiter": "\n\n",
806
+ "num_fewshot": 0,
807
+ "metric_list": [
808
+ {
809
+ "metric": "acc"
810
+ }
811
+ ],
812
+ "output_type": "multiple_choice",
813
+ "repeats": 1,
814
+ "should_decontaminate": true,
815
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
816
+ "metadata": {
817
+ "version": 1.0
818
+ }
819
+ },
820
+ "blimp_distractor_agreement_relative_clause": {
821
+ "task": "blimp_distractor_agreement_relative_clause",
822
+ "group": "blimp",
823
+ "dataset_path": "blimp",
824
+ "dataset_name": "distractor_agreement_relative_clause",
825
+ "validation_split": "train",
826
+ "doc_to_text": "",
827
+ "doc_to_target": 0,
828
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
829
+ "description": "",
830
+ "target_delimiter": " ",
831
+ "fewshot_delimiter": "\n\n",
832
+ "num_fewshot": 0,
833
+ "metric_list": [
834
+ {
835
+ "metric": "acc"
836
+ }
837
+ ],
838
+ "output_type": "multiple_choice",
839
+ "repeats": 1,
840
+ "should_decontaminate": true,
841
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
842
+ "metadata": {
843
+ "version": 1.0
844
+ }
845
+ },
846
+ "blimp_drop_argument": {
847
+ "task": "blimp_drop_argument",
848
+ "group": "blimp",
849
+ "dataset_path": "blimp",
850
+ "dataset_name": "drop_argument",
851
+ "validation_split": "train",
852
+ "doc_to_text": "",
853
+ "doc_to_target": 0,
854
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
855
+ "description": "",
856
+ "target_delimiter": " ",
857
+ "fewshot_delimiter": "\n\n",
858
+ "num_fewshot": 0,
859
+ "metric_list": [
860
+ {
861
+ "metric": "acc"
862
+ }
863
+ ],
864
+ "output_type": "multiple_choice",
865
+ "repeats": 1,
866
+ "should_decontaminate": true,
867
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
868
+ "metadata": {
869
+ "version": 1.0
870
+ }
871
+ },
872
+ "blimp_ellipsis_n_bar_1": {
873
+ "task": "blimp_ellipsis_n_bar_1",
874
+ "group": "blimp",
875
+ "dataset_path": "blimp",
876
+ "dataset_name": "ellipsis_n_bar_1",
877
+ "validation_split": "train",
878
+ "doc_to_text": "",
879
+ "doc_to_target": 0,
880
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
881
+ "description": "",
882
+ "target_delimiter": " ",
883
+ "fewshot_delimiter": "\n\n",
884
+ "num_fewshot": 0,
885
+ "metric_list": [
886
+ {
887
+ "metric": "acc"
888
+ }
889
+ ],
890
+ "output_type": "multiple_choice",
891
+ "repeats": 1,
892
+ "should_decontaminate": true,
893
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
894
+ "metadata": {
895
+ "version": 1.0
896
+ }
897
+ },
898
+ "blimp_ellipsis_n_bar_2": {
899
+ "task": "blimp_ellipsis_n_bar_2",
900
+ "group": "blimp",
901
+ "dataset_path": "blimp",
902
+ "dataset_name": "ellipsis_n_bar_2",
903
+ "validation_split": "train",
904
+ "doc_to_text": "",
905
+ "doc_to_target": 0,
906
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
907
+ "description": "",
908
+ "target_delimiter": " ",
909
+ "fewshot_delimiter": "\n\n",
910
+ "num_fewshot": 0,
911
+ "metric_list": [
912
+ {
913
+ "metric": "acc"
914
+ }
915
+ ],
916
+ "output_type": "multiple_choice",
917
+ "repeats": 1,
918
+ "should_decontaminate": true,
919
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
920
+ "metadata": {
921
+ "version": 1.0
922
+ }
923
+ },
924
+ "blimp_existential_there_object_raising": {
925
+ "task": "blimp_existential_there_object_raising",
926
+ "group": "blimp",
927
+ "dataset_path": "blimp",
928
+ "dataset_name": "existential_there_object_raising",
929
+ "validation_split": "train",
930
+ "doc_to_text": "",
931
+ "doc_to_target": 0,
932
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
933
+ "description": "",
934
+ "target_delimiter": " ",
935
+ "fewshot_delimiter": "\n\n",
936
+ "num_fewshot": 0,
937
+ "metric_list": [
938
+ {
939
+ "metric": "acc"
940
+ }
941
+ ],
942
+ "output_type": "multiple_choice",
943
+ "repeats": 1,
944
+ "should_decontaminate": true,
945
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
946
+ "metadata": {
947
+ "version": 1.0
948
+ }
949
+ },
950
+ "blimp_existential_there_quantifiers_1": {
951
+ "task": "blimp_existential_there_quantifiers_1",
952
+ "group": "blimp",
953
+ "dataset_path": "blimp",
954
+ "dataset_name": "existential_there_quantifiers_1",
955
+ "validation_split": "train",
956
+ "doc_to_text": "",
957
+ "doc_to_target": 0,
958
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
959
+ "description": "",
960
+ "target_delimiter": " ",
961
+ "fewshot_delimiter": "\n\n",
962
+ "num_fewshot": 0,
963
+ "metric_list": [
964
+ {
965
+ "metric": "acc"
966
+ }
967
+ ],
968
+ "output_type": "multiple_choice",
969
+ "repeats": 1,
970
+ "should_decontaminate": true,
971
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
972
+ "metadata": {
973
+ "version": 1.0
974
+ }
975
+ },
976
+ "blimp_existential_there_quantifiers_2": {
977
+ "task": "blimp_existential_there_quantifiers_2",
978
+ "group": "blimp",
979
+ "dataset_path": "blimp",
980
+ "dataset_name": "existential_there_quantifiers_2",
981
+ "validation_split": "train",
982
+ "doc_to_text": "",
983
+ "doc_to_target": 0,
984
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
985
+ "description": "",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "num_fewshot": 0,
989
+ "metric_list": [
990
+ {
991
+ "metric": "acc"
992
+ }
993
+ ],
994
+ "output_type": "multiple_choice",
995
+ "repeats": 1,
996
+ "should_decontaminate": true,
997
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
998
+ "metadata": {
999
+ "version": 1.0
1000
+ }
1001
+ },
1002
+ "blimp_existential_there_subject_raising": {
1003
+ "task": "blimp_existential_there_subject_raising",
1004
+ "group": "blimp",
1005
+ "dataset_path": "blimp",
1006
+ "dataset_name": "existential_there_subject_raising",
1007
+ "validation_split": "train",
1008
+ "doc_to_text": "",
1009
+ "doc_to_target": 0,
1010
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1011
+ "description": "",
1012
+ "target_delimiter": " ",
1013
+ "fewshot_delimiter": "\n\n",
1014
+ "num_fewshot": 0,
1015
+ "metric_list": [
1016
+ {
1017
+ "metric": "acc"
1018
+ }
1019
+ ],
1020
+ "output_type": "multiple_choice",
1021
+ "repeats": 1,
1022
+ "should_decontaminate": true,
1023
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1024
+ "metadata": {
1025
+ "version": 1.0
1026
+ }
1027
+ },
1028
+ "blimp_expletive_it_object_raising": {
1029
+ "task": "blimp_expletive_it_object_raising",
1030
+ "group": "blimp",
1031
+ "dataset_path": "blimp",
1032
+ "dataset_name": "expletive_it_object_raising",
1033
+ "validation_split": "train",
1034
+ "doc_to_text": "",
1035
+ "doc_to_target": 0,
1036
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1037
+ "description": "",
1038
+ "target_delimiter": " ",
1039
+ "fewshot_delimiter": "\n\n",
1040
+ "num_fewshot": 0,
1041
+ "metric_list": [
1042
+ {
1043
+ "metric": "acc"
1044
+ }
1045
+ ],
1046
+ "output_type": "multiple_choice",
1047
+ "repeats": 1,
1048
+ "should_decontaminate": true,
1049
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1050
+ "metadata": {
1051
+ "version": 1.0
1052
+ }
1053
+ },
1054
+ "blimp_inchoative": {
1055
+ "task": "blimp_inchoative",
1056
+ "group": "blimp",
1057
+ "dataset_path": "blimp",
1058
+ "dataset_name": "inchoative",
1059
+ "validation_split": "train",
1060
+ "doc_to_text": "",
1061
+ "doc_to_target": 0,
1062
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1063
+ "description": "",
1064
+ "target_delimiter": " ",
1065
+ "fewshot_delimiter": "\n\n",
1066
+ "num_fewshot": 0,
1067
+ "metric_list": [
1068
+ {
1069
+ "metric": "acc"
1070
+ }
1071
+ ],
1072
+ "output_type": "multiple_choice",
1073
+ "repeats": 1,
1074
+ "should_decontaminate": true,
1075
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1076
+ "metadata": {
1077
+ "version": 1.0
1078
+ }
1079
+ },
1080
+ "blimp_intransitive": {
1081
+ "task": "blimp_intransitive",
1082
+ "group": "blimp",
1083
+ "dataset_path": "blimp",
1084
+ "dataset_name": "intransitive",
1085
+ "validation_split": "train",
1086
+ "doc_to_text": "",
1087
+ "doc_to_target": 0,
1088
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1089
+ "description": "",
1090
+ "target_delimiter": " ",
1091
+ "fewshot_delimiter": "\n\n",
1092
+ "num_fewshot": 0,
1093
+ "metric_list": [
1094
+ {
1095
+ "metric": "acc"
1096
+ }
1097
+ ],
1098
+ "output_type": "multiple_choice",
1099
+ "repeats": 1,
1100
+ "should_decontaminate": true,
1101
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1102
+ "metadata": {
1103
+ "version": 1.0
1104
+ }
1105
+ },
1106
+ "blimp_irregular_past_participle_adjectives": {
1107
+ "task": "blimp_irregular_past_participle_adjectives",
1108
+ "group": "blimp",
1109
+ "dataset_path": "blimp",
1110
+ "dataset_name": "irregular_past_participle_adjectives",
1111
+ "validation_split": "train",
1112
+ "doc_to_text": "",
1113
+ "doc_to_target": 0,
1114
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1115
+ "description": "",
1116
+ "target_delimiter": " ",
1117
+ "fewshot_delimiter": "\n\n",
1118
+ "num_fewshot": 0,
1119
+ "metric_list": [
1120
+ {
1121
+ "metric": "acc"
1122
+ }
1123
+ ],
1124
+ "output_type": "multiple_choice",
1125
+ "repeats": 1,
1126
+ "should_decontaminate": true,
1127
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1128
+ "metadata": {
1129
+ "version": 1.0
1130
+ }
1131
+ },
1132
+ "blimp_irregular_past_participle_verbs": {
1133
+ "task": "blimp_irregular_past_participle_verbs",
1134
+ "group": "blimp",
1135
+ "dataset_path": "blimp",
1136
+ "dataset_name": "irregular_past_participle_verbs",
1137
+ "validation_split": "train",
1138
+ "doc_to_text": "",
1139
+ "doc_to_target": 0,
1140
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1141
+ "description": "",
1142
+ "target_delimiter": " ",
1143
+ "fewshot_delimiter": "\n\n",
1144
+ "num_fewshot": 0,
1145
+ "metric_list": [
1146
+ {
1147
+ "metric": "acc"
1148
+ }
1149
+ ],
1150
+ "output_type": "multiple_choice",
1151
+ "repeats": 1,
1152
+ "should_decontaminate": true,
1153
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1154
+ "metadata": {
1155
+ "version": 1.0
1156
+ }
1157
+ },
1158
+ "blimp_irregular_plural_subject_verb_agreement_1": {
1159
+ "task": "blimp_irregular_plural_subject_verb_agreement_1",
1160
+ "group": "blimp",
1161
+ "dataset_path": "blimp",
1162
+ "dataset_name": "irregular_plural_subject_verb_agreement_1",
1163
+ "validation_split": "train",
1164
+ "doc_to_text": "",
1165
+ "doc_to_target": 0,
1166
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1167
+ "description": "",
1168
+ "target_delimiter": " ",
1169
+ "fewshot_delimiter": "\n\n",
1170
+ "num_fewshot": 0,
1171
+ "metric_list": [
1172
+ {
1173
+ "metric": "acc"
1174
+ }
1175
+ ],
1176
+ "output_type": "multiple_choice",
1177
+ "repeats": 1,
1178
+ "should_decontaminate": true,
1179
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1180
+ "metadata": {
1181
+ "version": 1.0
1182
+ }
1183
+ },
1184
+ "blimp_irregular_plural_subject_verb_agreement_2": {
1185
+ "task": "blimp_irregular_plural_subject_verb_agreement_2",
1186
+ "group": "blimp",
1187
+ "dataset_path": "blimp",
1188
+ "dataset_name": "irregular_plural_subject_verb_agreement_2",
1189
+ "validation_split": "train",
1190
+ "doc_to_text": "",
1191
+ "doc_to_target": 0,
1192
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1193
+ "description": "",
1194
+ "target_delimiter": " ",
1195
+ "fewshot_delimiter": "\n\n",
1196
+ "num_fewshot": 0,
1197
+ "metric_list": [
1198
+ {
1199
+ "metric": "acc"
1200
+ }
1201
+ ],
1202
+ "output_type": "multiple_choice",
1203
+ "repeats": 1,
1204
+ "should_decontaminate": true,
1205
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1206
+ "metadata": {
1207
+ "version": 1.0
1208
+ }
1209
+ },
1210
+ "blimp_left_branch_island_echo_question": {
1211
+ "task": "blimp_left_branch_island_echo_question",
1212
+ "group": "blimp",
1213
+ "dataset_path": "blimp",
1214
+ "dataset_name": "left_branch_island_echo_question",
1215
+ "validation_split": "train",
1216
+ "doc_to_text": "",
1217
+ "doc_to_target": 0,
1218
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1219
+ "description": "",
1220
+ "target_delimiter": " ",
1221
+ "fewshot_delimiter": "\n\n",
1222
+ "num_fewshot": 0,
1223
+ "metric_list": [
1224
+ {
1225
+ "metric": "acc"
1226
+ }
1227
+ ],
1228
+ "output_type": "multiple_choice",
1229
+ "repeats": 1,
1230
+ "should_decontaminate": true,
1231
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1232
+ "metadata": {
1233
+ "version": 1.0
1234
+ }
1235
+ },
1236
+ "blimp_left_branch_island_simple_question": {
1237
+ "task": "blimp_left_branch_island_simple_question",
1238
+ "group": "blimp",
1239
+ "dataset_path": "blimp",
1240
+ "dataset_name": "left_branch_island_simple_question",
1241
+ "validation_split": "train",
1242
+ "doc_to_text": "",
1243
+ "doc_to_target": 0,
1244
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1245
+ "description": "",
1246
+ "target_delimiter": " ",
1247
+ "fewshot_delimiter": "\n\n",
1248
+ "num_fewshot": 0,
1249
+ "metric_list": [
1250
+ {
1251
+ "metric": "acc"
1252
+ }
1253
+ ],
1254
+ "output_type": "multiple_choice",
1255
+ "repeats": 1,
1256
+ "should_decontaminate": true,
1257
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1258
+ "metadata": {
1259
+ "version": 1.0
1260
+ }
1261
+ },
1262
+ "blimp_matrix_question_npi_licensor_present": {
1263
+ "task": "blimp_matrix_question_npi_licensor_present",
1264
+ "group": "blimp",
1265
+ "dataset_path": "blimp",
1266
+ "dataset_name": "matrix_question_npi_licensor_present",
1267
+ "validation_split": "train",
1268
+ "doc_to_text": "",
1269
+ "doc_to_target": 0,
1270
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1271
+ "description": "",
1272
+ "target_delimiter": " ",
1273
+ "fewshot_delimiter": "\n\n",
1274
+ "num_fewshot": 0,
1275
+ "metric_list": [
1276
+ {
1277
+ "metric": "acc"
1278
+ }
1279
+ ],
1280
+ "output_type": "multiple_choice",
1281
+ "repeats": 1,
1282
+ "should_decontaminate": true,
1283
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1284
+ "metadata": {
1285
+ "version": 1.0
1286
+ }
1287
+ },
1288
+ "blimp_npi_present_1": {
1289
+ "task": "blimp_npi_present_1",
1290
+ "group": "blimp",
1291
+ "dataset_path": "blimp",
1292
+ "dataset_name": "npi_present_1",
1293
+ "validation_split": "train",
1294
+ "doc_to_text": "",
1295
+ "doc_to_target": 0,
1296
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1297
+ "description": "",
1298
+ "target_delimiter": " ",
1299
+ "fewshot_delimiter": "\n\n",
1300
+ "num_fewshot": 0,
1301
+ "metric_list": [
1302
+ {
1303
+ "metric": "acc"
1304
+ }
1305
+ ],
1306
+ "output_type": "multiple_choice",
1307
+ "repeats": 1,
1308
+ "should_decontaminate": true,
1309
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1310
+ "metadata": {
1311
+ "version": 1.0
1312
+ }
1313
+ },
1314
+ "blimp_npi_present_2": {
1315
+ "task": "blimp_npi_present_2",
1316
+ "group": "blimp",
1317
+ "dataset_path": "blimp",
1318
+ "dataset_name": "npi_present_2",
1319
+ "validation_split": "train",
1320
+ "doc_to_text": "",
1321
+ "doc_to_target": 0,
1322
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1323
+ "description": "",
1324
+ "target_delimiter": " ",
1325
+ "fewshot_delimiter": "\n\n",
1326
+ "num_fewshot": 0,
1327
+ "metric_list": [
1328
+ {
1329
+ "metric": "acc"
1330
+ }
1331
+ ],
1332
+ "output_type": "multiple_choice",
1333
+ "repeats": 1,
1334
+ "should_decontaminate": true,
1335
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1336
+ "metadata": {
1337
+ "version": 1.0
1338
+ }
1339
+ },
1340
+ "blimp_only_npi_licensor_present": {
1341
+ "task": "blimp_only_npi_licensor_present",
1342
+ "group": "blimp",
1343
+ "dataset_path": "blimp",
1344
+ "dataset_name": "only_npi_licensor_present",
1345
+ "validation_split": "train",
1346
+ "doc_to_text": "",
1347
+ "doc_to_target": 0,
1348
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1349
+ "description": "",
1350
+ "target_delimiter": " ",
1351
+ "fewshot_delimiter": "\n\n",
1352
+ "num_fewshot": 0,
1353
+ "metric_list": [
1354
+ {
1355
+ "metric": "acc"
1356
+ }
1357
+ ],
1358
+ "output_type": "multiple_choice",
1359
+ "repeats": 1,
1360
+ "should_decontaminate": true,
1361
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1362
+ "metadata": {
1363
+ "version": 1.0
1364
+ }
1365
+ },
1366
+ "blimp_only_npi_scope": {
1367
+ "task": "blimp_only_npi_scope",
1368
+ "group": "blimp",
1369
+ "dataset_path": "blimp",
1370
+ "dataset_name": "only_npi_scope",
1371
+ "validation_split": "train",
1372
+ "doc_to_text": "",
1373
+ "doc_to_target": 0,
1374
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1375
+ "description": "",
1376
+ "target_delimiter": " ",
1377
+ "fewshot_delimiter": "\n\n",
1378
+ "num_fewshot": 0,
1379
+ "metric_list": [
1380
+ {
1381
+ "metric": "acc"
1382
+ }
1383
+ ],
1384
+ "output_type": "multiple_choice",
1385
+ "repeats": 1,
1386
+ "should_decontaminate": true,
1387
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1388
+ "metadata": {
1389
+ "version": 1.0
1390
+ }
1391
+ },
1392
+ "blimp_passive_1": {
1393
+ "task": "blimp_passive_1",
1394
+ "group": "blimp",
1395
+ "dataset_path": "blimp",
1396
+ "dataset_name": "passive_1",
1397
+ "validation_split": "train",
1398
+ "doc_to_text": "",
1399
+ "doc_to_target": 0,
1400
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1401
+ "description": "",
1402
+ "target_delimiter": " ",
1403
+ "fewshot_delimiter": "\n\n",
1404
+ "num_fewshot": 0,
1405
+ "metric_list": [
1406
+ {
1407
+ "metric": "acc"
1408
+ }
1409
+ ],
1410
+ "output_type": "multiple_choice",
1411
+ "repeats": 1,
1412
+ "should_decontaminate": true,
1413
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1414
+ "metadata": {
1415
+ "version": 1.0
1416
+ }
1417
+ },
1418
+ "blimp_passive_2": {
1419
+ "task": "blimp_passive_2",
1420
+ "group": "blimp",
1421
+ "dataset_path": "blimp",
1422
+ "dataset_name": "passive_2",
1423
+ "validation_split": "train",
1424
+ "doc_to_text": "",
1425
+ "doc_to_target": 0,
1426
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1427
+ "description": "",
1428
+ "target_delimiter": " ",
1429
+ "fewshot_delimiter": "\n\n",
1430
+ "num_fewshot": 0,
1431
+ "metric_list": [
1432
+ {
1433
+ "metric": "acc"
1434
+ }
1435
+ ],
1436
+ "output_type": "multiple_choice",
1437
+ "repeats": 1,
1438
+ "should_decontaminate": true,
1439
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1440
+ "metadata": {
1441
+ "version": 1.0
1442
+ }
1443
+ },
1444
+ "blimp_principle_A_c_command": {
1445
+ "task": "blimp_principle_A_c_command",
1446
+ "group": "blimp",
1447
+ "dataset_path": "blimp",
1448
+ "dataset_name": "principle_A_c_command",
1449
+ "validation_split": "train",
1450
+ "doc_to_text": "",
1451
+ "doc_to_target": 0,
1452
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1453
+ "description": "",
1454
+ "target_delimiter": " ",
1455
+ "fewshot_delimiter": "\n\n",
1456
+ "num_fewshot": 0,
1457
+ "metric_list": [
1458
+ {
1459
+ "metric": "acc"
1460
+ }
1461
+ ],
1462
+ "output_type": "multiple_choice",
1463
+ "repeats": 1,
1464
+ "should_decontaminate": true,
1465
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1466
+ "metadata": {
1467
+ "version": 1.0
1468
+ }
1469
+ },
1470
+ "blimp_principle_A_case_1": {
1471
+ "task": "blimp_principle_A_case_1",
1472
+ "group": "blimp",
1473
+ "dataset_path": "blimp",
1474
+ "dataset_name": "principle_A_case_1",
1475
+ "validation_split": "train",
1476
+ "doc_to_text": "",
1477
+ "doc_to_target": 0,
1478
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1479
+ "description": "",
1480
+ "target_delimiter": " ",
1481
+ "fewshot_delimiter": "\n\n",
1482
+ "num_fewshot": 0,
1483
+ "metric_list": [
1484
+ {
1485
+ "metric": "acc"
1486
+ }
1487
+ ],
1488
+ "output_type": "multiple_choice",
1489
+ "repeats": 1,
1490
+ "should_decontaminate": true,
1491
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1492
+ "metadata": {
1493
+ "version": 1.0
1494
+ }
1495
+ },
1496
+ "blimp_principle_A_case_2": {
1497
+ "task": "blimp_principle_A_case_2",
1498
+ "group": "blimp",
1499
+ "dataset_path": "blimp",
1500
+ "dataset_name": "principle_A_case_2",
1501
+ "validation_split": "train",
1502
+ "doc_to_text": "",
1503
+ "doc_to_target": 0,
1504
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1505
+ "description": "",
1506
+ "target_delimiter": " ",
1507
+ "fewshot_delimiter": "\n\n",
1508
+ "num_fewshot": 0,
1509
+ "metric_list": [
1510
+ {
1511
+ "metric": "acc"
1512
+ }
1513
+ ],
1514
+ "output_type": "multiple_choice",
1515
+ "repeats": 1,
1516
+ "should_decontaminate": true,
1517
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1518
+ "metadata": {
1519
+ "version": 1.0
1520
+ }
1521
+ },
1522
+ "blimp_principle_A_domain_1": {
1523
+ "task": "blimp_principle_A_domain_1",
1524
+ "group": "blimp",
1525
+ "dataset_path": "blimp",
1526
+ "dataset_name": "principle_A_domain_1",
1527
+ "validation_split": "train",
1528
+ "doc_to_text": "",
1529
+ "doc_to_target": 0,
1530
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1531
+ "description": "",
1532
+ "target_delimiter": " ",
1533
+ "fewshot_delimiter": "\n\n",
1534
+ "num_fewshot": 0,
1535
+ "metric_list": [
1536
+ {
1537
+ "metric": "acc"
1538
+ }
1539
+ ],
1540
+ "output_type": "multiple_choice",
1541
+ "repeats": 1,
1542
+ "should_decontaminate": true,
1543
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1544
+ "metadata": {
1545
+ "version": 1.0
1546
+ }
1547
+ },
1548
+ "blimp_principle_A_domain_2": {
1549
+ "task": "blimp_principle_A_domain_2",
1550
+ "group": "blimp",
1551
+ "dataset_path": "blimp",
1552
+ "dataset_name": "principle_A_domain_2",
1553
+ "validation_split": "train",
1554
+ "doc_to_text": "",
1555
+ "doc_to_target": 0,
1556
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1557
+ "description": "",
1558
+ "target_delimiter": " ",
1559
+ "fewshot_delimiter": "\n\n",
1560
+ "num_fewshot": 0,
1561
+ "metric_list": [
1562
+ {
1563
+ "metric": "acc"
1564
+ }
1565
+ ],
1566
+ "output_type": "multiple_choice",
1567
+ "repeats": 1,
1568
+ "should_decontaminate": true,
1569
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1570
+ "metadata": {
1571
+ "version": 1.0
1572
+ }
1573
+ },
1574
+ "blimp_principle_A_domain_3": {
1575
+ "task": "blimp_principle_A_domain_3",
1576
+ "group": "blimp",
1577
+ "dataset_path": "blimp",
1578
+ "dataset_name": "principle_A_domain_3",
1579
+ "validation_split": "train",
1580
+ "doc_to_text": "",
1581
+ "doc_to_target": 0,
1582
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1583
+ "description": "",
1584
+ "target_delimiter": " ",
1585
+ "fewshot_delimiter": "\n\n",
1586
+ "num_fewshot": 0,
1587
+ "metric_list": [
1588
+ {
1589
+ "metric": "acc"
1590
+ }
1591
+ ],
1592
+ "output_type": "multiple_choice",
1593
+ "repeats": 1,
1594
+ "should_decontaminate": true,
1595
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1596
+ "metadata": {
1597
+ "version": 1.0
1598
+ }
1599
+ },
1600
+ "blimp_principle_A_reconstruction": {
1601
+ "task": "blimp_principle_A_reconstruction",
1602
+ "group": "blimp",
1603
+ "dataset_path": "blimp",
1604
+ "dataset_name": "principle_A_reconstruction",
1605
+ "validation_split": "train",
1606
+ "doc_to_text": "",
1607
+ "doc_to_target": 0,
1608
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1609
+ "description": "",
1610
+ "target_delimiter": " ",
1611
+ "fewshot_delimiter": "\n\n",
1612
+ "num_fewshot": 0,
1613
+ "metric_list": [
1614
+ {
1615
+ "metric": "acc"
1616
+ }
1617
+ ],
1618
+ "output_type": "multiple_choice",
1619
+ "repeats": 1,
1620
+ "should_decontaminate": true,
1621
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1622
+ "metadata": {
1623
+ "version": 1.0
1624
+ }
1625
+ },
1626
+ "blimp_regular_plural_subject_verb_agreement_1": {
1627
+ "task": "blimp_regular_plural_subject_verb_agreement_1",
1628
+ "group": "blimp",
1629
+ "dataset_path": "blimp",
1630
+ "dataset_name": "regular_plural_subject_verb_agreement_1",
1631
+ "validation_split": "train",
1632
+ "doc_to_text": "",
1633
+ "doc_to_target": 0,
1634
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1635
+ "description": "",
1636
+ "target_delimiter": " ",
1637
+ "fewshot_delimiter": "\n\n",
1638
+ "num_fewshot": 0,
1639
+ "metric_list": [
1640
+ {
1641
+ "metric": "acc"
1642
+ }
1643
+ ],
1644
+ "output_type": "multiple_choice",
1645
+ "repeats": 1,
1646
+ "should_decontaminate": true,
1647
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1648
+ "metadata": {
1649
+ "version": 1.0
1650
+ }
1651
+ },
1652
+ "blimp_regular_plural_subject_verb_agreement_2": {
1653
+ "task": "blimp_regular_plural_subject_verb_agreement_2",
1654
+ "group": "blimp",
1655
+ "dataset_path": "blimp",
1656
+ "dataset_name": "regular_plural_subject_verb_agreement_2",
1657
+ "validation_split": "train",
1658
+ "doc_to_text": "",
1659
+ "doc_to_target": 0,
1660
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1661
+ "description": "",
1662
+ "target_delimiter": " ",
1663
+ "fewshot_delimiter": "\n\n",
1664
+ "num_fewshot": 0,
1665
+ "metric_list": [
1666
+ {
1667
+ "metric": "acc"
1668
+ }
1669
+ ],
1670
+ "output_type": "multiple_choice",
1671
+ "repeats": 1,
1672
+ "should_decontaminate": true,
1673
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1674
+ "metadata": {
1675
+ "version": 1.0
1676
+ }
1677
+ },
1678
+ "blimp_sentential_negation_npi_licensor_present": {
1679
+ "task": "blimp_sentential_negation_npi_licensor_present",
1680
+ "group": "blimp",
1681
+ "dataset_path": "blimp",
1682
+ "dataset_name": "sentential_negation_npi_licensor_present",
1683
+ "validation_split": "train",
1684
+ "doc_to_text": "",
1685
+ "doc_to_target": 0,
1686
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1687
+ "description": "",
1688
+ "target_delimiter": " ",
1689
+ "fewshot_delimiter": "\n\n",
1690
+ "num_fewshot": 0,
1691
+ "metric_list": [
1692
+ {
1693
+ "metric": "acc"
1694
+ }
1695
+ ],
1696
+ "output_type": "multiple_choice",
1697
+ "repeats": 1,
1698
+ "should_decontaminate": true,
1699
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1700
+ "metadata": {
1701
+ "version": 1.0
1702
+ }
1703
+ },
1704
+ "blimp_sentential_negation_npi_scope": {
1705
+ "task": "blimp_sentential_negation_npi_scope",
1706
+ "group": "blimp",
1707
+ "dataset_path": "blimp",
1708
+ "dataset_name": "sentential_negation_npi_scope",
1709
+ "validation_split": "train",
1710
+ "doc_to_text": "",
1711
+ "doc_to_target": 0,
1712
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1713
+ "description": "",
1714
+ "target_delimiter": " ",
1715
+ "fewshot_delimiter": "\n\n",
1716
+ "num_fewshot": 0,
1717
+ "metric_list": [
1718
+ {
1719
+ "metric": "acc"
1720
+ }
1721
+ ],
1722
+ "output_type": "multiple_choice",
1723
+ "repeats": 1,
1724
+ "should_decontaminate": true,
1725
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1726
+ "metadata": {
1727
+ "version": 1.0
1728
+ }
1729
+ },
1730
+ "blimp_sentential_subject_island": {
1731
+ "task": "blimp_sentential_subject_island",
1732
+ "group": "blimp",
1733
+ "dataset_path": "blimp",
1734
+ "dataset_name": "sentential_subject_island",
1735
+ "validation_split": "train",
1736
+ "doc_to_text": "",
1737
+ "doc_to_target": 0,
1738
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1739
+ "description": "",
1740
+ "target_delimiter": " ",
1741
+ "fewshot_delimiter": "\n\n",
1742
+ "num_fewshot": 0,
1743
+ "metric_list": [
1744
+ {
1745
+ "metric": "acc"
1746
+ }
1747
+ ],
1748
+ "output_type": "multiple_choice",
1749
+ "repeats": 1,
1750
+ "should_decontaminate": true,
1751
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1752
+ "metadata": {
1753
+ "version": 1.0
1754
+ }
1755
+ },
1756
+ "blimp_superlative_quantifiers_1": {
1757
+ "task": "blimp_superlative_quantifiers_1",
1758
+ "group": "blimp",
1759
+ "dataset_path": "blimp",
1760
+ "dataset_name": "superlative_quantifiers_1",
1761
+ "validation_split": "train",
1762
+ "doc_to_text": "",
1763
+ "doc_to_target": 0,
1764
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1765
+ "description": "",
1766
+ "target_delimiter": " ",
1767
+ "fewshot_delimiter": "\n\n",
1768
+ "num_fewshot": 0,
1769
+ "metric_list": [
1770
+ {
1771
+ "metric": "acc"
1772
+ }
1773
+ ],
1774
+ "output_type": "multiple_choice",
1775
+ "repeats": 1,
1776
+ "should_decontaminate": true,
1777
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1778
+ "metadata": {
1779
+ "version": 1.0
1780
+ }
1781
+ },
1782
+ "blimp_superlative_quantifiers_2": {
1783
+ "task": "blimp_superlative_quantifiers_2",
1784
+ "group": "blimp",
1785
+ "dataset_path": "blimp",
1786
+ "dataset_name": "superlative_quantifiers_2",
1787
+ "validation_split": "train",
1788
+ "doc_to_text": "",
1789
+ "doc_to_target": 0,
1790
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1791
+ "description": "",
1792
+ "target_delimiter": " ",
1793
+ "fewshot_delimiter": "\n\n",
1794
+ "num_fewshot": 0,
1795
+ "metric_list": [
1796
+ {
1797
+ "metric": "acc"
1798
+ }
1799
+ ],
1800
+ "output_type": "multiple_choice",
1801
+ "repeats": 1,
1802
+ "should_decontaminate": true,
1803
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1804
+ "metadata": {
1805
+ "version": 1.0
1806
+ }
1807
+ },
1808
+ "blimp_tough_vs_raising_1": {
1809
+ "task": "blimp_tough_vs_raising_1",
1810
+ "group": "blimp",
1811
+ "dataset_path": "blimp",
1812
+ "dataset_name": "tough_vs_raising_1",
1813
+ "validation_split": "train",
1814
+ "doc_to_text": "",
1815
+ "doc_to_target": 0,
1816
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1817
+ "description": "",
1818
+ "target_delimiter": " ",
1819
+ "fewshot_delimiter": "\n\n",
1820
+ "num_fewshot": 0,
1821
+ "metric_list": [
1822
+ {
1823
+ "metric": "acc"
1824
+ }
1825
+ ],
1826
+ "output_type": "multiple_choice",
1827
+ "repeats": 1,
1828
+ "should_decontaminate": true,
1829
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1830
+ "metadata": {
1831
+ "version": 1.0
1832
+ }
1833
+ },
1834
+ "blimp_tough_vs_raising_2": {
1835
+ "task": "blimp_tough_vs_raising_2",
1836
+ "group": "blimp",
1837
+ "dataset_path": "blimp",
1838
+ "dataset_name": "tough_vs_raising_2",
1839
+ "validation_split": "train",
1840
+ "doc_to_text": "",
1841
+ "doc_to_target": 0,
1842
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1843
+ "description": "",
1844
+ "target_delimiter": " ",
1845
+ "fewshot_delimiter": "\n\n",
1846
+ "num_fewshot": 0,
1847
+ "metric_list": [
1848
+ {
1849
+ "metric": "acc"
1850
+ }
1851
+ ],
1852
+ "output_type": "multiple_choice",
1853
+ "repeats": 1,
1854
+ "should_decontaminate": true,
1855
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1856
+ "metadata": {
1857
+ "version": 1.0
1858
+ }
1859
+ },
1860
+ "blimp_transitive": {
1861
+ "task": "blimp_transitive",
1862
+ "group": "blimp",
1863
+ "dataset_path": "blimp",
1864
+ "dataset_name": "transitive",
1865
+ "validation_split": "train",
1866
+ "doc_to_text": "",
1867
+ "doc_to_target": 0,
1868
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1869
+ "description": "",
1870
+ "target_delimiter": " ",
1871
+ "fewshot_delimiter": "\n\n",
1872
+ "num_fewshot": 0,
1873
+ "metric_list": [
1874
+ {
1875
+ "metric": "acc"
1876
+ }
1877
+ ],
1878
+ "output_type": "multiple_choice",
1879
+ "repeats": 1,
1880
+ "should_decontaminate": true,
1881
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1882
+ "metadata": {
1883
+ "version": 1.0
1884
+ }
1885
+ },
1886
+ "blimp_wh_island": {
1887
+ "task": "blimp_wh_island",
1888
+ "group": "blimp",
1889
+ "dataset_path": "blimp",
1890
+ "dataset_name": "wh_island",
1891
+ "validation_split": "train",
1892
+ "doc_to_text": "",
1893
+ "doc_to_target": 0,
1894
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1895
+ "description": "",
1896
+ "target_delimiter": " ",
1897
+ "fewshot_delimiter": "\n\n",
1898
+ "num_fewshot": 0,
1899
+ "metric_list": [
1900
+ {
1901
+ "metric": "acc"
1902
+ }
1903
+ ],
1904
+ "output_type": "multiple_choice",
1905
+ "repeats": 1,
1906
+ "should_decontaminate": true,
1907
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1908
+ "metadata": {
1909
+ "version": 1.0
1910
+ }
1911
+ },
1912
+ "blimp_wh_questions_object_gap": {
1913
+ "task": "blimp_wh_questions_object_gap",
1914
+ "group": "blimp",
1915
+ "dataset_path": "blimp",
1916
+ "dataset_name": "wh_questions_object_gap",
1917
+ "validation_split": "train",
1918
+ "doc_to_text": "",
1919
+ "doc_to_target": 0,
1920
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1921
+ "description": "",
1922
+ "target_delimiter": " ",
1923
+ "fewshot_delimiter": "\n\n",
1924
+ "num_fewshot": 0,
1925
+ "metric_list": [
1926
+ {
1927
+ "metric": "acc"
1928
+ }
1929
+ ],
1930
+ "output_type": "multiple_choice",
1931
+ "repeats": 1,
1932
+ "should_decontaminate": true,
1933
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1934
+ "metadata": {
1935
+ "version": 1.0
1936
+ }
1937
+ },
1938
+ "blimp_wh_questions_subject_gap": {
1939
+ "task": "blimp_wh_questions_subject_gap",
1940
+ "group": "blimp",
1941
+ "dataset_path": "blimp",
1942
+ "dataset_name": "wh_questions_subject_gap",
1943
+ "validation_split": "train",
1944
+ "doc_to_text": "",
1945
+ "doc_to_target": 0,
1946
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1947
+ "description": "",
1948
+ "target_delimiter": " ",
1949
+ "fewshot_delimiter": "\n\n",
1950
+ "num_fewshot": 0,
1951
+ "metric_list": [
1952
+ {
1953
+ "metric": "acc"
1954
+ }
1955
+ ],
1956
+ "output_type": "multiple_choice",
1957
+ "repeats": 1,
1958
+ "should_decontaminate": true,
1959
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1960
+ "metadata": {
1961
+ "version": 1.0
1962
+ }
1963
+ },
1964
+ "blimp_wh_questions_subject_gap_long_distance": {
1965
+ "task": "blimp_wh_questions_subject_gap_long_distance",
1966
+ "group": "blimp",
1967
+ "dataset_path": "blimp",
1968
+ "dataset_name": "wh_questions_subject_gap_long_distance",
1969
+ "validation_split": "train",
1970
+ "doc_to_text": "",
1971
+ "doc_to_target": 0,
1972
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1973
+ "description": "",
1974
+ "target_delimiter": " ",
1975
+ "fewshot_delimiter": "\n\n",
1976
+ "num_fewshot": 0,
1977
+ "metric_list": [
1978
+ {
1979
+ "metric": "acc"
1980
+ }
1981
+ ],
1982
+ "output_type": "multiple_choice",
1983
+ "repeats": 1,
1984
+ "should_decontaminate": true,
1985
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
1986
+ "metadata": {
1987
+ "version": 1.0
1988
+ }
1989
+ },
1990
+ "blimp_wh_vs_that_no_gap": {
1991
+ "task": "blimp_wh_vs_that_no_gap",
1992
+ "group": "blimp",
1993
+ "dataset_path": "blimp",
1994
+ "dataset_name": "wh_vs_that_no_gap",
1995
+ "validation_split": "train",
1996
+ "doc_to_text": "",
1997
+ "doc_to_target": 0,
1998
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
1999
+ "description": "",
2000
+ "target_delimiter": " ",
2001
+ "fewshot_delimiter": "\n\n",
2002
+ "num_fewshot": 0,
2003
+ "metric_list": [
2004
+ {
2005
+ "metric": "acc"
2006
+ }
2007
+ ],
2008
+ "output_type": "multiple_choice",
2009
+ "repeats": 1,
2010
+ "should_decontaminate": true,
2011
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2012
+ "metadata": {
2013
+ "version": 1.0
2014
+ }
2015
+ },
2016
+ "blimp_wh_vs_that_no_gap_long_distance": {
2017
+ "task": "blimp_wh_vs_that_no_gap_long_distance",
2018
+ "group": "blimp",
2019
+ "dataset_path": "blimp",
2020
+ "dataset_name": "wh_vs_that_no_gap_long_distance",
2021
+ "validation_split": "train",
2022
+ "doc_to_text": "",
2023
+ "doc_to_target": 0,
2024
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2025
+ "description": "",
2026
+ "target_delimiter": " ",
2027
+ "fewshot_delimiter": "\n\n",
2028
+ "num_fewshot": 0,
2029
+ "metric_list": [
2030
+ {
2031
+ "metric": "acc"
2032
+ }
2033
+ ],
2034
+ "output_type": "multiple_choice",
2035
+ "repeats": 1,
2036
+ "should_decontaminate": true,
2037
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2038
+ "metadata": {
2039
+ "version": 1.0
2040
+ }
2041
+ },
2042
+ "blimp_wh_vs_that_with_gap": {
2043
+ "task": "blimp_wh_vs_that_with_gap",
2044
+ "group": "blimp",
2045
+ "dataset_path": "blimp",
2046
+ "dataset_name": "wh_vs_that_with_gap",
2047
+ "validation_split": "train",
2048
+ "doc_to_text": "",
2049
+ "doc_to_target": 0,
2050
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2051
+ "description": "",
2052
+ "target_delimiter": " ",
2053
+ "fewshot_delimiter": "\n\n",
2054
+ "num_fewshot": 0,
2055
+ "metric_list": [
2056
+ {
2057
+ "metric": "acc"
2058
+ }
2059
+ ],
2060
+ "output_type": "multiple_choice",
2061
+ "repeats": 1,
2062
+ "should_decontaminate": true,
2063
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2064
+ "metadata": {
2065
+ "version": 1.0
2066
+ }
2067
+ },
2068
+ "blimp_wh_vs_that_with_gap_long_distance": {
2069
+ "task": "blimp_wh_vs_that_with_gap_long_distance",
2070
+ "group": "blimp",
2071
+ "dataset_path": "blimp",
2072
+ "dataset_name": "wh_vs_that_with_gap_long_distance",
2073
+ "validation_split": "train",
2074
+ "doc_to_text": "",
2075
+ "doc_to_target": 0,
2076
+ "doc_to_choice": "{{[sentence_good, sentence_bad]}}",
2077
+ "description": "",
2078
+ "target_delimiter": " ",
2079
+ "fewshot_delimiter": "\n\n",
2080
+ "num_fewshot": 0,
2081
+ "metric_list": [
2082
+ {
2083
+ "metric": "acc"
2084
+ }
2085
+ ],
2086
+ "output_type": "multiple_choice",
2087
+ "repeats": 1,
2088
+ "should_decontaminate": true,
2089
+ "doc_to_decontamination_query": "{{sentence_good}} {{sentence_bad}}",
2090
+ "metadata": {
2091
+ "version": 1.0
2092
+ }
2093
+ }
2094
+ },
2095
+ "versions": {
2096
+ "blimp": "N/A",
2097
+ "blimp_adjunct_island": 1.0,
2098
+ "blimp_anaphor_gender_agreement": 1.0,
2099
+ "blimp_anaphor_number_agreement": 1.0,
2100
+ "blimp_animate_subject_passive": 1.0,
2101
+ "blimp_animate_subject_trans": 1.0,
2102
+ "blimp_causative": 1.0,
2103
+ "blimp_complex_NP_island": 1.0,
2104
+ "blimp_coordinate_structure_constraint_complex_left_branch": 1.0,
2105
+ "blimp_coordinate_structure_constraint_object_extraction": 1.0,
2106
+ "blimp_determiner_noun_agreement_1": 1.0,
2107
+ "blimp_determiner_noun_agreement_2": 1.0,
2108
+ "blimp_determiner_noun_agreement_irregular_1": 1.0,
2109
+ "blimp_determiner_noun_agreement_irregular_2": 1.0,
2110
+ "blimp_determiner_noun_agreement_with_adj_2": 1.0,
2111
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": 1.0,
2112
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": 1.0,
2113
+ "blimp_determiner_noun_agreement_with_adjective_1": 1.0,
2114
+ "blimp_distractor_agreement_relational_noun": 1.0,
2115
+ "blimp_distractor_agreement_relative_clause": 1.0,
2116
+ "blimp_drop_argument": 1.0,
2117
+ "blimp_ellipsis_n_bar_1": 1.0,
2118
+ "blimp_ellipsis_n_bar_2": 1.0,
2119
+ "blimp_existential_there_object_raising": 1.0,
2120
+ "blimp_existential_there_quantifiers_1": 1.0,
2121
+ "blimp_existential_there_quantifiers_2": 1.0,
2122
+ "blimp_existential_there_subject_raising": 1.0,
2123
+ "blimp_expletive_it_object_raising": 1.0,
2124
+ "blimp_inchoative": 1.0,
2125
+ "blimp_intransitive": 1.0,
2126
+ "blimp_irregular_past_participle_adjectives": 1.0,
2127
+ "blimp_irregular_past_participle_verbs": 1.0,
2128
+ "blimp_irregular_plural_subject_verb_agreement_1": 1.0,
2129
+ "blimp_irregular_plural_subject_verb_agreement_2": 1.0,
2130
+ "blimp_left_branch_island_echo_question": 1.0,
2131
+ "blimp_left_branch_island_simple_question": 1.0,
2132
+ "blimp_matrix_question_npi_licensor_present": 1.0,
2133
+ "blimp_npi_present_1": 1.0,
2134
+ "blimp_npi_present_2": 1.0,
2135
+ "blimp_only_npi_licensor_present": 1.0,
2136
+ "blimp_only_npi_scope": 1.0,
2137
+ "blimp_passive_1": 1.0,
2138
+ "blimp_passive_2": 1.0,
2139
+ "blimp_principle_A_c_command": 1.0,
2140
+ "blimp_principle_A_case_1": 1.0,
2141
+ "blimp_principle_A_case_2": 1.0,
2142
+ "blimp_principle_A_domain_1": 1.0,
2143
+ "blimp_principle_A_domain_2": 1.0,
2144
+ "blimp_principle_A_domain_3": 1.0,
2145
+ "blimp_principle_A_reconstruction": 1.0,
2146
+ "blimp_regular_plural_subject_verb_agreement_1": 1.0,
2147
+ "blimp_regular_plural_subject_verb_agreement_2": 1.0,
2148
+ "blimp_sentential_negation_npi_licensor_present": 1.0,
2149
+ "blimp_sentential_negation_npi_scope": 1.0,
2150
+ "blimp_sentential_subject_island": 1.0,
2151
+ "blimp_superlative_quantifiers_1": 1.0,
2152
+ "blimp_superlative_quantifiers_2": 1.0,
2153
+ "blimp_tough_vs_raising_1": 1.0,
2154
+ "blimp_tough_vs_raising_2": 1.0,
2155
+ "blimp_transitive": 1.0,
2156
+ "blimp_wh_island": 1.0,
2157
+ "blimp_wh_questions_object_gap": 1.0,
2158
+ "blimp_wh_questions_subject_gap": 1.0,
2159
+ "blimp_wh_questions_subject_gap_long_distance": 1.0,
2160
+ "blimp_wh_vs_that_no_gap": 1.0,
2161
+ "blimp_wh_vs_that_no_gap_long_distance": 1.0,
2162
+ "blimp_wh_vs_that_with_gap": 1.0,
2163
+ "blimp_wh_vs_that_with_gap_long_distance": 1.0
2164
+ },
2165
+ "n-shot": {
2166
+ "blimp": 0,
2167
+ "blimp_adjunct_island": 0,
2168
+ "blimp_anaphor_gender_agreement": 0,
2169
+ "blimp_anaphor_number_agreement": 0,
2170
+ "blimp_animate_subject_passive": 0,
2171
+ "blimp_animate_subject_trans": 0,
2172
+ "blimp_causative": 0,
2173
+ "blimp_complex_NP_island": 0,
2174
+ "blimp_coordinate_structure_constraint_complex_left_branch": 0,
2175
+ "blimp_coordinate_structure_constraint_object_extraction": 0,
2176
+ "blimp_determiner_noun_agreement_1": 0,
2177
+ "blimp_determiner_noun_agreement_2": 0,
2178
+ "blimp_determiner_noun_agreement_irregular_1": 0,
2179
+ "blimp_determiner_noun_agreement_irregular_2": 0,
2180
+ "blimp_determiner_noun_agreement_with_adj_2": 0,
2181
+ "blimp_determiner_noun_agreement_with_adj_irregular_1": 0,
2182
+ "blimp_determiner_noun_agreement_with_adj_irregular_2": 0,
2183
+ "blimp_determiner_noun_agreement_with_adjective_1": 0,
2184
+ "blimp_distractor_agreement_relational_noun": 0,
2185
+ "blimp_distractor_agreement_relative_clause": 0,
2186
+ "blimp_drop_argument": 0,
2187
+ "blimp_ellipsis_n_bar_1": 0,
2188
+ "blimp_ellipsis_n_bar_2": 0,
2189
+ "blimp_existential_there_object_raising": 0,
2190
+ "blimp_existential_there_quantifiers_1": 0,
2191
+ "blimp_existential_there_quantifiers_2": 0,
2192
+ "blimp_existential_there_subject_raising": 0,
2193
+ "blimp_expletive_it_object_raising": 0,
2194
+ "blimp_inchoative": 0,
2195
+ "blimp_intransitive": 0,
2196
+ "blimp_irregular_past_participle_adjectives": 0,
2197
+ "blimp_irregular_past_participle_verbs": 0,
2198
+ "blimp_irregular_plural_subject_verb_agreement_1": 0,
2199
+ "blimp_irregular_plural_subject_verb_agreement_2": 0,
2200
+ "blimp_left_branch_island_echo_question": 0,
2201
+ "blimp_left_branch_island_simple_question": 0,
2202
+ "blimp_matrix_question_npi_licensor_present": 0,
2203
+ "blimp_npi_present_1": 0,
2204
+ "blimp_npi_present_2": 0,
2205
+ "blimp_only_npi_licensor_present": 0,
2206
+ "blimp_only_npi_scope": 0,
2207
+ "blimp_passive_1": 0,
2208
+ "blimp_passive_2": 0,
2209
+ "blimp_principle_A_c_command": 0,
2210
+ "blimp_principle_A_case_1": 0,
2211
+ "blimp_principle_A_case_2": 0,
2212
+ "blimp_principle_A_domain_1": 0,
2213
+ "blimp_principle_A_domain_2": 0,
2214
+ "blimp_principle_A_domain_3": 0,
2215
+ "blimp_principle_A_reconstruction": 0,
2216
+ "blimp_regular_plural_subject_verb_agreement_1": 0,
2217
+ "blimp_regular_plural_subject_verb_agreement_2": 0,
2218
+ "blimp_sentential_negation_npi_licensor_present": 0,
2219
+ "blimp_sentential_negation_npi_scope": 0,
2220
+ "blimp_sentential_subject_island": 0,
2221
+ "blimp_superlative_quantifiers_1": 0,
2222
+ "blimp_superlative_quantifiers_2": 0,
2223
+ "blimp_tough_vs_raising_1": 0,
2224
+ "blimp_tough_vs_raising_2": 0,
2225
+ "blimp_transitive": 0,
2226
+ "blimp_wh_island": 0,
2227
+ "blimp_wh_questions_object_gap": 0,
2228
+ "blimp_wh_questions_subject_gap": 0,
2229
+ "blimp_wh_questions_subject_gap_long_distance": 0,
2230
+ "blimp_wh_vs_that_no_gap": 0,
2231
+ "blimp_wh_vs_that_no_gap_long_distance": 0,
2232
+ "blimp_wh_vs_that_with_gap": 0,
2233
+ "blimp_wh_vs_that_with_gap_long_distance": 0
2234
+ },
2235
+ "config": {
2236
+ "model": "hf",
2237
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
2238
+ "batch_size": "auto",
2239
+ "batch_sizes": [
2240
+ 64
2241
+ ],
2242
+ "device": null,
2243
+ "use_cache": null,
2244
+ "limit": null,
2245
+ "bootstrap_iters": 100000,
2246
+ "gen_kwargs": null
2247
+ },
2248
+ "git_hash": "97a2520"
2249
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/blimp/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e0bc0923c0c60ebe28df88a4d78a8e14c02430d99f038f8eec969e4b95de7b6
3
+ size 264320
lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c72c24031ba5ae9bbc98a82954626d68b0fcc9fb0eb194ab006e579f1aedb048
3
+ size 2346172
lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
The diff for this file is too large to render. See raw diff
 
lm-eval-output/m8than/Finch-14B-Continued-10/cmmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0bf6969c750a384b8791352c5c38000daecd05a5e6b6447eef8a855f7ffe713
3
+ size 131088
lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2fda5c4fa79fdafa6cb9ebf26e3842687fc6bbc56f21a57dae359d2d3a0bc0a
3
+ size 10176
lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "copa": {
4
+ "acc,none": 0.87,
5
+ "acc_stderr,none": 0.033799766898963086,
6
+ "alias": "copa"
7
+ }
8
+ },
9
+ "configs": {
10
+ "copa": {
11
+ "task": "copa",
12
+ "group": [
13
+ "super-glue-lm-eval-v1"
14
+ ],
15
+ "dataset_path": "super_glue",
16
+ "dataset_name": "copa",
17
+ "training_split": "train",
18
+ "validation_split": "validation",
19
+ "doc_to_text": "def doc_to_text(doc):\n # Drop the period\n connector = {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\" {connector}\"\n",
20
+ "doc_to_target": "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n return \" \" + convert_choice(correct_choice)\n",
21
+ "doc_to_choice": "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "metric_list": [
26
+ {
27
+ "metric": "acc"
28
+ }
29
+ ],
30
+ "output_type": "multiple_choice",
31
+ "repeats": 1,
32
+ "should_decontaminate": false,
33
+ "metadata": {
34
+ "version": 1.0
35
+ }
36
+ }
37
+ },
38
+ "versions": {
39
+ "copa": 1.0
40
+ },
41
+ "n-shot": {
42
+ "copa": 0
43
+ },
44
+ "config": {
45
+ "model": "hf",
46
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
47
+ "batch_size": "auto",
48
+ "batch_sizes": [
49
+ 64
50
+ ],
51
+ "device": null,
52
+ "use_cache": null,
53
+ "limit": null,
54
+ "bootstrap_iters": 100000,
55
+ "gen_kwargs": null
56
+ },
57
+ "git_hash": "97a2520"
58
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/copa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:655879fd66cf21e8862d5710cac4e5a3a33da6a6f609cb189829a45fb4a2ca04
3
+ size 17426
lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f4cc588b8f519018e7354d410901927585484261d812063a11058db0afa832e
3
+ size 8325739
lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "glue": {
4
+ "acc,none": 0.6522451167222487,
5
+ "acc_stderr,none": 0.006846274775420319,
6
+ "f1,none": 0.6456216077148048,
7
+ "f1_stderr,none": 0.0002505570191561242,
8
+ "mcc,none": 0.0,
9
+ "mcc_stderr,none": 0.0,
10
+ "alias": "glue"
11
+ },
12
+ "cola": {
13
+ "mcc,none": 0.0,
14
+ "mcc_stderr,none": 0.0,
15
+ "alias": " - cola"
16
+ },
17
+ "mnli": {
18
+ "acc,none": 0.801426388181355,
19
+ "acc_stderr,none": 0.004026888084487691,
20
+ "alias": " - mnli"
21
+ },
22
+ "mnli_mismatch": {
23
+ "acc,none": 0.7915988608624899,
24
+ "acc_stderr,none": 0.004096413384733941,
25
+ "alias": " - mnli_mismatch"
26
+ },
27
+ "mrpc": {
28
+ "acc,none": 0.6887254901960784,
29
+ "acc_stderr,none": 0.022950790715623736,
30
+ "f1,none": 0.8140556368960469,
31
+ "f1_stderr,none": 0.01619265753417425,
32
+ "alias": " - mrpc"
33
+ },
34
+ "qnli": {
35
+ "acc,none": 0.4946000366099213,
36
+ "acc_stderr,none": 0.00676501598687746,
37
+ "alias": " - qnli"
38
+ },
39
+ "qqp": {
40
+ "acc,none": 0.6018550581251546,
41
+ "acc_stderr,none": 0.0024345576278988323,
42
+ "f1,none": 0.6441629639454429,
43
+ "f1_stderr,none": 0.0026231073767726413,
44
+ "alias": " - qqp"
45
+ },
46
+ "rte": {
47
+ "acc,none": 0.7545126353790613,
48
+ "acc_stderr,none": 0.025905578160457157,
49
+ "alias": " - rte"
50
+ },
51
+ "sst2": {
52
+ "acc,none": 0.6869266055045872,
53
+ "acc_stderr,none": 0.015713364044401386,
54
+ "alias": " - sst2"
55
+ },
56
+ "wnli": {
57
+ "acc,none": 0.5211267605633803,
58
+ "acc_stderr,none": 0.05970805879899504,
59
+ "alias": " - wnli"
60
+ }
61
+ },
62
+ "groups": {
63
+ "glue": {
64
+ "acc,none": 0.6522451167222487,
65
+ "acc_stderr,none": 0.006846274775420319,
66
+ "f1,none": 0.6456216077148048,
67
+ "f1_stderr,none": 0.0002505570191561242,
68
+ "mcc,none": 0.0,
69
+ "mcc_stderr,none": 0.0,
70
+ "alias": "glue"
71
+ }
72
+ },
73
+ "configs": {
74
+ "cola": {
75
+ "task": "cola",
76
+ "group": "glue",
77
+ "dataset_path": "glue",
78
+ "dataset_name": "cola",
79
+ "training_split": "train",
80
+ "validation_split": "validation",
81
+ "doc_to_text": "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:",
82
+ "doc_to_target": "label",
83
+ "doc_to_choice": [
84
+ "no",
85
+ "yes"
86
+ ],
87
+ "description": "",
88
+ "target_delimiter": " ",
89
+ "fewshot_delimiter": "\n\n",
90
+ "metric_list": [
91
+ {
92
+ "metric": "mcc"
93
+ }
94
+ ],
95
+ "output_type": "multiple_choice",
96
+ "repeats": 1,
97
+ "should_decontaminate": true,
98
+ "doc_to_decontamination_query": "sentence",
99
+ "metadata": {
100
+ "version": 1.0
101
+ }
102
+ },
103
+ "mnli": {
104
+ "task": "mnli",
105
+ "group": "glue",
106
+ "dataset_path": "glue",
107
+ "dataset_name": "mnli",
108
+ "training_split": "train",
109
+ "validation_split": "validation_matched",
110
+ "doc_to_text": "def doc_to_text(doc) -> str:\n return \"{}\\nQuestion: {} True, False or Neither?\\nAnswer:\".format(\n doc[\"premise\"],\n doc[\"hypothesis\"].strip()\n + (\"\" if doc[\"hypothesis\"].strip().endswith(\".\") else \".\"),\n )\n",
111
+ "doc_to_target": "label",
112
+ "doc_to_choice": [
113
+ "True",
114
+ "Neither",
115
+ "False"
116
+ ],
117
+ "description": "",
118
+ "target_delimiter": " ",
119
+ "fewshot_delimiter": "\n\n",
120
+ "metric_list": [
121
+ {
122
+ "metric": "acc"
123
+ }
124
+ ],
125
+ "output_type": "multiple_choice",
126
+ "repeats": 1,
127
+ "should_decontaminate": false,
128
+ "metadata": {
129
+ "version": 1.0
130
+ }
131
+ },
132
+ "mnli_mismatch": {
133
+ "task": "mnli_mismatch",
134
+ "group": "glue",
135
+ "dataset_path": "glue",
136
+ "dataset_name": "mnli",
137
+ "training_split": "train",
138
+ "validation_split": "validation_mismatched",
139
+ "doc_to_text": "def doc_to_text(doc) -> str:\n return \"{}\\nQuestion: {} True, False or Neither?\\nAnswer:\".format(\n doc[\"premise\"],\n doc[\"hypothesis\"].strip()\n + (\"\" if doc[\"hypothesis\"].strip().endswith(\".\") else \".\"),\n )\n",
140
+ "doc_to_target": "label",
141
+ "doc_to_choice": [
142
+ "True",
143
+ "Neither",
144
+ "False"
145
+ ],
146
+ "description": "",
147
+ "target_delimiter": " ",
148
+ "fewshot_delimiter": "\n\n",
149
+ "metric_list": [
150
+ {
151
+ "metric": "acc"
152
+ }
153
+ ],
154
+ "output_type": "multiple_choice",
155
+ "repeats": 1,
156
+ "should_decontaminate": false,
157
+ "metadata": {
158
+ "version": 1.0
159
+ }
160
+ },
161
+ "mrpc": {
162
+ "task": "mrpc",
163
+ "group": "glue",
164
+ "dataset_path": "glue",
165
+ "dataset_name": "mrpc",
166
+ "training_split": "train",
167
+ "validation_split": "validation",
168
+ "doc_to_text": "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:",
169
+ "doc_to_target": "label",
170
+ "doc_to_choice": [
171
+ "no",
172
+ "yes"
173
+ ],
174
+ "description": "",
175
+ "target_delimiter": " ",
176
+ "fewshot_delimiter": "\n\n",
177
+ "metric_list": [
178
+ {
179
+ "metric": "acc"
180
+ },
181
+ {
182
+ "metric": "f1"
183
+ }
184
+ ],
185
+ "output_type": "multiple_choice",
186
+ "repeats": 1,
187
+ "should_decontaminate": false,
188
+ "metadata": {
189
+ "version": 1.0
190
+ }
191
+ },
192
+ "qnli": {
193
+ "task": "qnli",
194
+ "group": "glue",
195
+ "dataset_path": "glue",
196
+ "dataset_name": "qnli",
197
+ "training_split": "train",
198
+ "validation_split": "validation",
199
+ "doc_to_text": "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:",
200
+ "doc_to_target": "label",
201
+ "doc_to_choice": [
202
+ "yes",
203
+ "no"
204
+ ],
205
+ "description": "",
206
+ "target_delimiter": " ",
207
+ "fewshot_delimiter": "\n\n",
208
+ "metric_list": [
209
+ {
210
+ "metric": "acc"
211
+ }
212
+ ],
213
+ "output_type": "multiple_choice",
214
+ "repeats": 1,
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.0
218
+ }
219
+ },
220
+ "qqp": {
221
+ "task": "qqp",
222
+ "group": "glue",
223
+ "dataset_path": "glue",
224
+ "dataset_name": "qqp",
225
+ "training_split": "train",
226
+ "validation_split": "validation",
227
+ "doc_to_text": "\nSentence 1: {{question1}}\nSentence 2: {{question2}}\nAnswer:",
228
+ "doc_to_target": "label",
229
+ "doc_to_choice": [
230
+ "no",
231
+ "yes"
232
+ ],
233
+ "description": "",
234
+ "target_delimiter": " ",
235
+ "fewshot_delimiter": "\n\n",
236
+ "metric_list": [
237
+ {
238
+ "metric": "acc"
239
+ },
240
+ {
241
+ "metric": "f1"
242
+ }
243
+ ],
244
+ "output_type": "multiple_choice",
245
+ "repeats": 1,
246
+ "should_decontaminate": false,
247
+ "metadata": {
248
+ "version": 1.0
249
+ }
250
+ },
251
+ "rte": {
252
+ "task": "rte",
253
+ "group": "glue",
254
+ "dataset_path": "glue",
255
+ "dataset_name": "rte",
256
+ "training_split": "train",
257
+ "validation_split": "validation",
258
+ "doc_to_text": "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:",
259
+ "doc_to_target": "label",
260
+ "doc_to_choice": [
261
+ "True",
262
+ "False"
263
+ ],
264
+ "description": "",
265
+ "target_delimiter": " ",
266
+ "fewshot_delimiter": "\n\n",
267
+ "metric_list": [
268
+ {
269
+ "metric": "acc"
270
+ }
271
+ ],
272
+ "output_type": "multiple_choice",
273
+ "repeats": 1,
274
+ "should_decontaminate": false,
275
+ "metadata": {
276
+ "version": 1.0
277
+ }
278
+ },
279
+ "sst2": {
280
+ "task": "sst2",
281
+ "group": "glue",
282
+ "dataset_path": "glue",
283
+ "dataset_name": "sst2",
284
+ "training_split": "train",
285
+ "validation_split": "validation",
286
+ "doc_to_text": "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:",
287
+ "doc_to_target": "label",
288
+ "doc_to_choice": [
289
+ "negative",
290
+ "positive"
291
+ ],
292
+ "description": "",
293
+ "target_delimiter": " ",
294
+ "fewshot_delimiter": "\n\n",
295
+ "metric_list": [
296
+ {
297
+ "metric": "acc"
298
+ }
299
+ ],
300
+ "output_type": "multiple_choice",
301
+ "repeats": 1,
302
+ "should_decontaminate": false,
303
+ "metadata": {
304
+ "version": 1.0
305
+ }
306
+ },
307
+ "wnli": {
308
+ "task": "wnli",
309
+ "group": "glue",
310
+ "dataset_path": "glue",
311
+ "dataset_name": "wnli",
312
+ "training_split": "train",
313
+ "validation_split": "validation",
314
+ "doc_to_text": "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:",
315
+ "doc_to_target": "label",
316
+ "doc_to_choice": [
317
+ "False",
318
+ "True"
319
+ ],
320
+ "description": "",
321
+ "target_delimiter": " ",
322
+ "fewshot_delimiter": "\n\n",
323
+ "metric_list": [
324
+ {
325
+ "metric": "acc"
326
+ }
327
+ ],
328
+ "output_type": "multiple_choice",
329
+ "repeats": 1,
330
+ "should_decontaminate": false,
331
+ "metadata": {
332
+ "version": 2.0
333
+ }
334
+ }
335
+ },
336
+ "versions": {
337
+ "cola": 1.0,
338
+ "glue": "N/A",
339
+ "mnli": 1.0,
340
+ "mnli_mismatch": 1.0,
341
+ "mrpc": 1.0,
342
+ "qnli": 1.0,
343
+ "qqp": 1.0,
344
+ "rte": 1.0,
345
+ "sst2": 1.0,
346
+ "wnli": 2.0
347
+ },
348
+ "n-shot": {
349
+ "cola": 0,
350
+ "glue": 0,
351
+ "mnli": 0,
352
+ "mnli_mismatch": 0,
353
+ "mrpc": 0,
354
+ "qnli": 0,
355
+ "qqp": 0,
356
+ "rte": 0,
357
+ "sst2": 0,
358
+ "wnli": 0
359
+ },
360
+ "config": {
361
+ "model": "hf",
362
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
363
+ "batch_size": "auto",
364
+ "batch_sizes": [
365
+ 64
366
+ ],
367
+ "device": null,
368
+ "use_cache": null,
369
+ "limit": null,
370
+ "bootstrap_iters": 100000,
371
+ "gen_kwargs": null
372
+ },
373
+ "git_hash": "97a2520"
374
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/glue/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:320cf6b2c66c59982aa6b5b1d1d4945c463b48236498f4bb0880245480ff1fb2
3
+ size 78593
lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d4282aa1d6ee9ee7c5786cdbefb7724311f470d5d1842653c50980f93341fd
3
+ size 4886702
lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "hellaswag": {
4
+ "acc,none": 0.5891256721768572,
5
+ "acc_stderr,none": 0.004909870006388839,
6
+ "acc_norm,none": 0.7842063333997211,
7
+ "acc_norm_stderr,none": 0.004105310748596489,
8
+ "alias": "hellaswag"
9
+ }
10
+ },
11
+ "configs": {
12
+ "hellaswag": {
13
+ "task": "hellaswag",
14
+ "group": [
15
+ "multiple_choice"
16
+ ],
17
+ "dataset_path": "hellaswag",
18
+ "training_split": "train",
19
+ "validation_split": "validation",
20
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
21
+ "doc_to_text": "{{query}}",
22
+ "doc_to_target": "{{label}}",
23
+ "doc_to_choice": "choices",
24
+ "description": "",
25
+ "target_delimiter": " ",
26
+ "fewshot_delimiter": "\n\n",
27
+ "metric_list": [
28
+ {
29
+ "metric": "acc",
30
+ "aggregation": "mean",
31
+ "higher_is_better": true
32
+ },
33
+ {
34
+ "metric": "acc_norm",
35
+ "aggregation": "mean",
36
+ "higher_is_better": true
37
+ }
38
+ ],
39
+ "output_type": "multiple_choice",
40
+ "repeats": 1,
41
+ "should_decontaminate": false,
42
+ "metadata": {
43
+ "version": 1.0
44
+ }
45
+ }
46
+ },
47
+ "versions": {
48
+ "hellaswag": 1.0
49
+ },
50
+ "n-shot": {
51
+ "hellaswag": 0
52
+ },
53
+ "config": {
54
+ "model": "hf",
55
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
56
+ "batch_size": "auto",
57
+ "batch_sizes": [
58
+ 64
59
+ ],
60
+ "device": null,
61
+ "use_cache": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": null
65
+ },
66
+ "git_hash": "97a2520"
67
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/hellaswag/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:204cf1d800824d486813106ffeaadc561d97d47ddc57b74a1a2bff61a1d2e338
3
+ size 60171
lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b03775aa52bb8652e2f0f17c729cc6ae036972584cbc5b12524fb5dd65f9eb
3
+ size 1970918
lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "lambada": {
4
+ "perplexity,none": 3.277432397804061,
5
+ "perplexity_stderr,none": 0.14540231578208046,
6
+ "acc,none": 0.7308364059771008,
7
+ "acc_stderr,none": 0.017065519206547915,
8
+ "alias": "lambada"
9
+ },
10
+ "lambada_openai": {
11
+ "perplexity,none": 3.014627189664524,
12
+ "perplexity_stderr,none": 0.054847634258423886,
13
+ "acc,none": 0.7626625266834853,
14
+ "acc_stderr,none": 0.005927361760928846,
15
+ "alias": " - lambada_openai"
16
+ },
17
+ "lambada_standard": {
18
+ "perplexity,none": 3.5402376059435974,
19
+ "perplexity_stderr,none": 0.06884414208960295,
20
+ "acc,none": 0.6990102852707161,
21
+ "acc_stderr,none": 0.006390424136449911,
22
+ "alias": " - lambada_standard"
23
+ }
24
+ },
25
+ "groups": {
26
+ "lambada": {
27
+ "perplexity,none": 3.277432397804061,
28
+ "perplexity_stderr,none": 0.14540231578208046,
29
+ "acc,none": 0.7308364059771008,
30
+ "acc_stderr,none": 0.017065519206547915,
31
+ "alias": "lambada"
32
+ }
33
+ },
34
+ "configs": {
35
+ "lambada_openai": {
36
+ "task": "lambada_openai",
37
+ "group": [
38
+ "lambada"
39
+ ],
40
+ "dataset_path": "EleutherAI/lambada_openai",
41
+ "dataset_name": "default",
42
+ "test_split": "test",
43
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
44
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
45
+ "description": "",
46
+ "target_delimiter": " ",
47
+ "fewshot_delimiter": "\n\n",
48
+ "metric_list": [
49
+ {
50
+ "metric": "perplexity",
51
+ "aggregation": "perplexity",
52
+ "higher_is_better": false
53
+ },
54
+ {
55
+ "metric": "acc",
56
+ "aggregation": "mean",
57
+ "higher_is_better": true
58
+ }
59
+ ],
60
+ "output_type": "loglikelihood",
61
+ "repeats": 1,
62
+ "should_decontaminate": true,
63
+ "doc_to_decontamination_query": "{{text}}",
64
+ "metadata": {
65
+ "version": 1.0
66
+ }
67
+ },
68
+ "lambada_standard": {
69
+ "task": "lambada_standard",
70
+ "group": [
71
+ "lambada"
72
+ ],
73
+ "dataset_path": "lambada",
74
+ "validation_split": "validation",
75
+ "test_split": "test",
76
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
77
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
78
+ "description": "",
79
+ "target_delimiter": " ",
80
+ "fewshot_delimiter": "\n\n",
81
+ "metric_list": [
82
+ {
83
+ "metric": "perplexity",
84
+ "aggregation": "perplexity",
85
+ "higher_is_better": false
86
+ },
87
+ {
88
+ "metric": "acc",
89
+ "aggregation": "mean",
90
+ "higher_is_better": true
91
+ }
92
+ ],
93
+ "output_type": "loglikelihood",
94
+ "repeats": 1,
95
+ "should_decontaminate": true,
96
+ "doc_to_decontamination_query": "{{text}}",
97
+ "metadata": {
98
+ "version": 1.0
99
+ }
100
+ }
101
+ },
102
+ "versions": {
103
+ "lambada": "N/A",
104
+ "lambada_openai": 1.0,
105
+ "lambada_standard": 1.0
106
+ },
107
+ "n-shot": {
108
+ "lambada": 0,
109
+ "lambada_openai": 0,
110
+ "lambada_standard": 0
111
+ },
112
+ "config": {
113
+ "model": "hf",
114
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
115
+ "batch_size": "auto",
116
+ "batch_sizes": [
117
+ 64
118
+ ],
119
+ "device": null,
120
+ "use_cache": null,
121
+ "limit": null,
122
+ "bootstrap_iters": 100000,
123
+ "gen_kwargs": null
124
+ },
125
+ "git_hash": "97a2520"
126
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/lambada/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4911c26fc9a0775aa726bc365d292cd7b23681f7a5adf2a9353bc0a930991ea
3
+ size 22119
lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48249a726591a47ba58f04ed4e9d0641c5a750ac1a6f4319b0a930c97a5c3a78
3
+ size 5221769
lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "lambada_multilingual": {
4
+ "perplexity,none": 16.57427443313553,
5
+ "perplexity_stderr,none": 6.396109588907219,
6
+ "acc,none": 0.570230933436833,
7
+ "acc_stderr,none": 0.08023321842466458,
8
+ "alias": "lambada_multilingual"
9
+ },
10
+ "lambada_openai_mt_de": {
11
+ "perplexity,none": 27.31172906921195,
12
+ "perplexity_stderr,none": 1.4878292833817073,
13
+ "acc,none": 0.46031437997283137,
14
+ "acc_stderr,none": 0.0069440008789686735,
15
+ "alias": " - lambada_openai_mt_de"
16
+ },
17
+ "lambada_openai_mt_en": {
18
+ "perplexity,none": 3.0157965175769377,
19
+ "perplexity_stderr,none": 0.05489109740466202,
20
+ "acc,none": 0.7622744032602368,
21
+ "acc_stderr,none": 0.0059306966971974595,
22
+ "alias": " - lambada_openai_mt_en"
23
+ },
24
+ "lambada_openai_mt_es": {
25
+ "perplexity,none": 22.615944887100966,
26
+ "perplexity_stderr,none": 1.0817049125217812,
27
+ "acc,none": 0.49039394527459734,
28
+ "acc_stderr,none": 0.006964691949428186,
29
+ "alias": " - lambada_openai_mt_es"
30
+ },
31
+ "lambada_openai_mt_fr": {
32
+ "perplexity,none": 13.102482530597442,
33
+ "perplexity_stderr,none": 0.6224812834214482,
34
+ "acc,none": 0.5862604308169999,
35
+ "acc_stderr,none": 0.006861528841487097,
36
+ "alias": " - lambada_openai_mt_fr"
37
+ },
38
+ "lambada_openai_mt_it": {
39
+ "perplexity,none": 16.825419161190336,
40
+ "perplexity_stderr,none": 0.8769978333971412,
41
+ "acc,none": 0.5519115078594993,
42
+ "acc_stderr,none": 0.00692833203679387,
43
+ "alias": " - lambada_openai_mt_it"
44
+ }
45
+ },
46
+ "groups": {
47
+ "lambada_multilingual": {
48
+ "perplexity,none": 16.57427443313553,
49
+ "perplexity_stderr,none": 6.396109588907219,
50
+ "acc,none": 0.570230933436833,
51
+ "acc_stderr,none": 0.08023321842466458,
52
+ "alias": "lambada_multilingual"
53
+ }
54
+ },
55
+ "configs": {
56
+ "lambada_openai_mt_de": {
57
+ "task": "lambada_openai_mt_de",
58
+ "group": [
59
+ "lambada_multilingual"
60
+ ],
61
+ "dataset_path": "EleutherAI/lambada_openai",
62
+ "dataset_name": "de",
63
+ "test_split": "test",
64
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
65
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
66
+ "description": "",
67
+ "target_delimiter": " ",
68
+ "fewshot_delimiter": "\n\n",
69
+ "metric_list": [
70
+ {
71
+ "metric": "perplexity",
72
+ "aggregation": "perplexity",
73
+ "higher_is_better": false
74
+ },
75
+ {
76
+ "metric": "acc",
77
+ "aggregation": "mean",
78
+ "higher_is_better": true
79
+ }
80
+ ],
81
+ "output_type": "loglikelihood",
82
+ "repeats": 1,
83
+ "should_decontaminate": true,
84
+ "doc_to_decontamination_query": "{{text}}",
85
+ "metadata": {
86
+ "version": 1.0
87
+ }
88
+ },
89
+ "lambada_openai_mt_en": {
90
+ "task": "lambada_openai_mt_en",
91
+ "group": [
92
+ "lambada_multilingual"
93
+ ],
94
+ "dataset_path": "EleutherAI/lambada_openai",
95
+ "dataset_name": "en",
96
+ "test_split": "test",
97
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
98
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
99
+ "description": "",
100
+ "target_delimiter": " ",
101
+ "fewshot_delimiter": "\n\n",
102
+ "metric_list": [
103
+ {
104
+ "metric": "perplexity",
105
+ "aggregation": "perplexity",
106
+ "higher_is_better": false
107
+ },
108
+ {
109
+ "metric": "acc",
110
+ "aggregation": "mean",
111
+ "higher_is_better": true
112
+ }
113
+ ],
114
+ "output_type": "loglikelihood",
115
+ "repeats": 1,
116
+ "should_decontaminate": true,
117
+ "doc_to_decontamination_query": "{{text}}",
118
+ "metadata": {
119
+ "version": 1.0
120
+ }
121
+ },
122
+ "lambada_openai_mt_es": {
123
+ "task": "lambada_openai_mt_es",
124
+ "group": [
125
+ "lambada_multilingual"
126
+ ],
127
+ "dataset_path": "EleutherAI/lambada_openai",
128
+ "dataset_name": "es",
129
+ "test_split": "test",
130
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
131
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
132
+ "description": "",
133
+ "target_delimiter": " ",
134
+ "fewshot_delimiter": "\n\n",
135
+ "metric_list": [
136
+ {
137
+ "metric": "perplexity",
138
+ "aggregation": "perplexity",
139
+ "higher_is_better": false
140
+ },
141
+ {
142
+ "metric": "acc",
143
+ "aggregation": "mean",
144
+ "higher_is_better": true
145
+ }
146
+ ],
147
+ "output_type": "loglikelihood",
148
+ "repeats": 1,
149
+ "should_decontaminate": true,
150
+ "doc_to_decontamination_query": "{{text}}",
151
+ "metadata": {
152
+ "version": 1.0
153
+ }
154
+ },
155
+ "lambada_openai_mt_fr": {
156
+ "task": "lambada_openai_mt_fr",
157
+ "group": [
158
+ "lambada_multilingual"
159
+ ],
160
+ "dataset_path": "EleutherAI/lambada_openai",
161
+ "dataset_name": "fr",
162
+ "test_split": "test",
163
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
164
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
165
+ "description": "",
166
+ "target_delimiter": " ",
167
+ "fewshot_delimiter": "\n\n",
168
+ "metric_list": [
169
+ {
170
+ "metric": "perplexity",
171
+ "aggregation": "perplexity",
172
+ "higher_is_better": false
173
+ },
174
+ {
175
+ "metric": "acc",
176
+ "aggregation": "mean",
177
+ "higher_is_better": true
178
+ }
179
+ ],
180
+ "output_type": "loglikelihood",
181
+ "repeats": 1,
182
+ "should_decontaminate": true,
183
+ "doc_to_decontamination_query": "{{text}}",
184
+ "metadata": {
185
+ "version": 1.0
186
+ }
187
+ },
188
+ "lambada_openai_mt_it": {
189
+ "task": "lambada_openai_mt_it",
190
+ "group": [
191
+ "lambada_multilingual"
192
+ ],
193
+ "dataset_path": "EleutherAI/lambada_openai",
194
+ "dataset_name": "it",
195
+ "test_split": "test",
196
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
197
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
198
+ "description": "",
199
+ "target_delimiter": " ",
200
+ "fewshot_delimiter": "\n\n",
201
+ "metric_list": [
202
+ {
203
+ "metric": "perplexity",
204
+ "aggregation": "perplexity",
205
+ "higher_is_better": false
206
+ },
207
+ {
208
+ "metric": "acc",
209
+ "aggregation": "mean",
210
+ "higher_is_better": true
211
+ }
212
+ ],
213
+ "output_type": "loglikelihood",
214
+ "repeats": 1,
215
+ "should_decontaminate": true,
216
+ "doc_to_decontamination_query": "{{text}}",
217
+ "metadata": {
218
+ "version": 1.0
219
+ }
220
+ }
221
+ },
222
+ "versions": {
223
+ "lambada_multilingual": "N/A",
224
+ "lambada_openai_mt_de": 1.0,
225
+ "lambada_openai_mt_en": 1.0,
226
+ "lambada_openai_mt_es": 1.0,
227
+ "lambada_openai_mt_fr": 1.0,
228
+ "lambada_openai_mt_it": 1.0
229
+ },
230
+ "n-shot": {
231
+ "lambada_multilingual": 0,
232
+ "lambada_openai_mt_de": 0,
233
+ "lambada_openai_mt_en": 0,
234
+ "lambada_openai_mt_es": 0,
235
+ "lambada_openai_mt_fr": 0,
236
+ "lambada_openai_mt_it": 0
237
+ },
238
+ "config": {
239
+ "model": "hf",
240
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
241
+ "batch_size": "auto",
242
+ "batch_sizes": [
243
+ 64
244
+ ],
245
+ "device": null,
246
+ "use_cache": null,
247
+ "limit": null,
248
+ "bootstrap_iters": 100000,
249
+ "gen_kwargs": null
250
+ },
251
+ "git_hash": "97a2520"
252
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/lambada_multilingual/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a3aae1cd22b66971d481723d757e110b01923c2c94c685398cfc1e1524673ca
3
+ size 36778
lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a833d5fe4b937fe1a7d41f269e397e4ea6f89514e17b5b29d806505acc264dcf
3
+ size 309574
lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "logiqa": {
4
+ "acc,none": 0.23963133640552994,
5
+ "acc_stderr,none": 0.016742766935101436,
6
+ "acc_norm,none": 0.2980030721966206,
7
+ "acc_norm_stderr,none": 0.0179399528838245,
8
+ "alias": "logiqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "logiqa": {
13
+ "task": "logiqa",
14
+ "dataset_path": "EleutherAI/logiqa",
15
+ "dataset_name": "logiqa",
16
+ "training_split": "train",
17
+ "validation_split": "validation",
18
+ "test_split": "test",
19
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Passage: <passage>\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [\"a\", \"b\", \"c\", \"d\"]\n prompt = \"Passage: \" + doc[\"context\"] + \"\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
20
+ "doc_to_target": "def doc_to_target(doc) -> int:\n choices = [\"a\", \"b\", \"c\", \"d\"]\n return choices.index(doc[\"label\"].strip())\n",
21
+ "doc_to_choice": "{{options}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "metric_list": [
26
+ {
27
+ "metric": "acc",
28
+ "aggregation": "mean",
29
+ "higher_is_better": true
30
+ },
31
+ {
32
+ "metric": "acc_norm",
33
+ "aggregation": "mean",
34
+ "higher_is_better": true
35
+ }
36
+ ],
37
+ "output_type": "multiple_choice",
38
+ "repeats": 1,
39
+ "should_decontaminate": true,
40
+ "doc_to_decontamination_query": "{{context}}",
41
+ "metadata": {
42
+ "version": 1.0
43
+ }
44
+ }
45
+ },
46
+ "versions": {
47
+ "logiqa": 1.0
48
+ },
49
+ "n-shot": {
50
+ "logiqa": 0
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
55
+ "batch_size": "auto",
56
+ "batch_sizes": [
57
+ 64
58
+ ],
59
+ "device": null,
60
+ "use_cache": null,
61
+ "limit": null,
62
+ "bootstrap_iters": 100000,
63
+ "gen_kwargs": null
64
+ },
65
+ "git_hash": "97a2520"
66
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/logiqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b604b72aab371fba76802b55be66c88b15cc6cea0d633320ddc2baa1597c79c9
3
+ size 14633
lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79504d9215e173fc924c86a15c2f72f1e14a9e3edc1b34c0bc3ed91ccbd58df6
3
+ size 4072031
lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,2594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "mmlu": {
4
+ "acc,none": 0.5616721264777097,
5
+ "acc_stderr,none": 0.12922245420838252,
6
+ "alias": "mmlu"
7
+ },
8
+ "mmlu_humanities": {
9
+ "alias": " - humanities",
10
+ "acc,none": 0.5094580233793836,
11
+ "acc_stderr,none": 0.1438564975883652
12
+ },
13
+ "mmlu_formal_logic": {
14
+ "alias": " - formal_logic",
15
+ "acc,none": 0.36507936507936506,
16
+ "acc_stderr,none": 0.04306241259127154
17
+ },
18
+ "mmlu_high_school_european_history": {
19
+ "alias": " - high_school_european_history",
20
+ "acc,none": 0.7212121212121212,
21
+ "acc_stderr,none": 0.0350143870629678
22
+ },
23
+ "mmlu_high_school_us_history": {
24
+ "alias": " - high_school_us_history",
25
+ "acc,none": 0.7401960784313726,
26
+ "acc_stderr,none": 0.03077855467869326
27
+ },
28
+ "mmlu_high_school_world_history": {
29
+ "alias": " - high_school_world_history",
30
+ "acc,none": 0.7468354430379747,
31
+ "acc_stderr,none": 0.028304657943035303
32
+ },
33
+ "mmlu_international_law": {
34
+ "alias": " - international_law",
35
+ "acc,none": 0.6942148760330579,
36
+ "acc_stderr,none": 0.04205953933884122
37
+ },
38
+ "mmlu_jurisprudence": {
39
+ "alias": " - jurisprudence",
40
+ "acc,none": 0.6851851851851852,
41
+ "acc_stderr,none": 0.04489931073591312
42
+ },
43
+ "mmlu_logical_fallacies": {
44
+ "alias": " - logical_fallacies",
45
+ "acc,none": 0.6625766871165644,
46
+ "acc_stderr,none": 0.037149084099355745
47
+ },
48
+ "mmlu_moral_disputes": {
49
+ "alias": " - moral_disputes",
50
+ "acc,none": 0.6329479768786127,
51
+ "acc_stderr,none": 0.025950054337654085
52
+ },
53
+ "mmlu_moral_scenarios": {
54
+ "alias": " - moral_scenarios",
55
+ "acc,none": 0.24022346368715083,
56
+ "acc_stderr,none": 0.014288343803925302
57
+ },
58
+ "mmlu_philosophy": {
59
+ "alias": " - philosophy",
60
+ "acc,none": 0.639871382636656,
61
+ "acc_stderr,none": 0.027264297599804015
62
+ },
63
+ "mmlu_prehistory": {
64
+ "alias": " - prehistory",
65
+ "acc,none": 0.6234567901234568,
66
+ "acc_stderr,none": 0.026959344518747787
67
+ },
68
+ "mmlu_professional_law": {
69
+ "alias": " - professional_law",
70
+ "acc,none": 0.43415906127770537,
71
+ "acc_stderr,none": 0.01265903323706725
72
+ },
73
+ "mmlu_world_religions": {
74
+ "alias": " - world_religions",
75
+ "acc,none": 0.8011695906432749,
76
+ "acc_stderr,none": 0.030611116557432528
77
+ },
78
+ "mmlu_other": {
79
+ "alias": " - other",
80
+ "acc,none": 0.6298680399098808,
81
+ "acc_stderr,none": 0.10072231796338442
82
+ },
83
+ "mmlu_business_ethics": {
84
+ "alias": " - business_ethics",
85
+ "acc,none": 0.57,
86
+ "acc_stderr,none": 0.04975698519562427
87
+ },
88
+ "mmlu_clinical_knowledge": {
89
+ "alias": " - clinical_knowledge",
90
+ "acc,none": 0.6113207547169811,
91
+ "acc_stderr,none": 0.030000485448675986
92
+ },
93
+ "mmlu_college_medicine": {
94
+ "alias": " - college_medicine",
95
+ "acc,none": 0.5780346820809249,
96
+ "acc_stderr,none": 0.03765746693865151
97
+ },
98
+ "mmlu_global_facts": {
99
+ "alias": " - global_facts",
100
+ "acc,none": 0.3,
101
+ "acc_stderr,none": 0.046056618647183814
102
+ },
103
+ "mmlu_human_aging": {
104
+ "alias": " - human_aging",
105
+ "acc,none": 0.6502242152466368,
106
+ "acc_stderr,none": 0.03200736719484503
107
+ },
108
+ "mmlu_management": {
109
+ "alias": " - management",
110
+ "acc,none": 0.6990291262135923,
111
+ "acc_stderr,none": 0.04541609446503948
112
+ },
113
+ "mmlu_marketing": {
114
+ "alias": " - marketing",
115
+ "acc,none": 0.8076923076923077,
116
+ "acc_stderr,none": 0.02581923325648375
117
+ },
118
+ "mmlu_medical_genetics": {
119
+ "alias": " - medical_genetics",
120
+ "acc,none": 0.72,
121
+ "acc_stderr,none": 0.045126085985421296
122
+ },
123
+ "mmlu_miscellaneous": {
124
+ "alias": " - miscellaneous",
125
+ "acc,none": 0.7484035759897829,
126
+ "acc_stderr,none": 0.015517322365529622
127
+ },
128
+ "mmlu_nutrition": {
129
+ "alias": " - nutrition",
130
+ "acc,none": 0.6339869281045751,
131
+ "acc_stderr,none": 0.02758281141515962
132
+ },
133
+ "mmlu_professional_accounting": {
134
+ "alias": " - professional_accounting",
135
+ "acc,none": 0.40425531914893614,
136
+ "acc_stderr,none": 0.029275532159704725
137
+ },
138
+ "mmlu_professional_medicine": {
139
+ "alias": " - professional_medicine",
140
+ "acc,none": 0.5845588235294118,
141
+ "acc_stderr,none": 0.02993534270787776
142
+ },
143
+ "mmlu_virology": {
144
+ "alias": " - virology",
145
+ "acc,none": 0.463855421686747,
146
+ "acc_stderr,none": 0.03882310850890594
147
+ },
148
+ "mmlu_social_sciences": {
149
+ "alias": " - social_sciences",
150
+ "acc,none": 0.6603834904127397,
151
+ "acc_stderr,none": 0.09514680794625115
152
+ },
153
+ "mmlu_econometrics": {
154
+ "alias": " - econometrics",
155
+ "acc,none": 0.3508771929824561,
156
+ "acc_stderr,none": 0.04489539350270698
157
+ },
158
+ "mmlu_high_school_geography": {
159
+ "alias": " - high_school_geography",
160
+ "acc,none": 0.7323232323232324,
161
+ "acc_stderr,none": 0.03154449888270286
162
+ },
163
+ "mmlu_high_school_government_and_politics": {
164
+ "alias": " - high_school_government_and_politics",
165
+ "acc,none": 0.7772020725388601,
166
+ "acc_stderr,none": 0.030031147977641545
167
+ },
168
+ "mmlu_high_school_macroeconomics": {
169
+ "alias": " - high_school_macroeconomics",
170
+ "acc,none": 0.5743589743589743,
171
+ "acc_stderr,none": 0.025069094387296535
172
+ },
173
+ "mmlu_high_school_microeconomics": {
174
+ "alias": " - high_school_microeconomics",
175
+ "acc,none": 0.5756302521008403,
176
+ "acc_stderr,none": 0.032104790510157764
177
+ },
178
+ "mmlu_high_school_psychology": {
179
+ "alias": " - high_school_psychology",
180
+ "acc,none": 0.7798165137614679,
181
+ "acc_stderr,none": 0.017765978652327576
182
+ },
183
+ "mmlu_human_sexuality": {
184
+ "alias": " - human_sexuality",
185
+ "acc,none": 0.6717557251908397,
186
+ "acc_stderr,none": 0.04118438565806298
187
+ },
188
+ "mmlu_professional_psychology": {
189
+ "alias": " - professional_psychology",
190
+ "acc,none": 0.5702614379084967,
191
+ "acc_stderr,none": 0.020027122784928547
192
+ },
193
+ "mmlu_public_relations": {
194
+ "alias": " - public_relations",
195
+ "acc,none": 0.6454545454545455,
196
+ "acc_stderr,none": 0.04582004841505415
197
+ },
198
+ "mmlu_security_studies": {
199
+ "alias": " - security_studies",
200
+ "acc,none": 0.6285714285714286,
201
+ "acc_stderr,none": 0.030932858792789855
202
+ },
203
+ "mmlu_sociology": {
204
+ "alias": " - sociology",
205
+ "acc,none": 0.8258706467661692,
206
+ "acc_stderr,none": 0.026814951200421606
207
+ },
208
+ "mmlu_us_foreign_policy": {
209
+ "alias": " - us_foreign_policy",
210
+ "acc,none": 0.83,
211
+ "acc_stderr,none": 0.03775251680686371
212
+ },
213
+ "mmlu_stem": {
214
+ "alias": " - stem",
215
+ "acc,none": 0.47605455122105933,
216
+ "acc_stderr,none": 0.11287864111088165
217
+ },
218
+ "mmlu_abstract_algebra": {
219
+ "alias": " - abstract_algebra",
220
+ "acc,none": 0.35,
221
+ "acc_stderr,none": 0.047937248544110196
222
+ },
223
+ "mmlu_anatomy": {
224
+ "alias": " - anatomy",
225
+ "acc,none": 0.5925925925925926,
226
+ "acc_stderr,none": 0.04244633238353228
227
+ },
228
+ "mmlu_astronomy": {
229
+ "alias": " - astronomy",
230
+ "acc,none": 0.5592105263157895,
231
+ "acc_stderr,none": 0.04040311062490436
232
+ },
233
+ "mmlu_college_biology": {
234
+ "alias": " - college_biology",
235
+ "acc,none": 0.625,
236
+ "acc_stderr,none": 0.04048439222695598
237
+ },
238
+ "mmlu_college_chemistry": {
239
+ "alias": " - college_chemistry",
240
+ "acc,none": 0.37,
241
+ "acc_stderr,none": 0.04852365870939099
242
+ },
243
+ "mmlu_college_computer_science": {
244
+ "alias": " - college_computer_science",
245
+ "acc,none": 0.47,
246
+ "acc_stderr,none": 0.05016135580465919
247
+ },
248
+ "mmlu_college_mathematics": {
249
+ "alias": " - college_mathematics",
250
+ "acc,none": 0.37,
251
+ "acc_stderr,none": 0.048523658709391
252
+ },
253
+ "mmlu_college_physics": {
254
+ "alias": " - college_physics",
255
+ "acc,none": 0.38235294117647056,
256
+ "acc_stderr,none": 0.04835503696107223
257
+ },
258
+ "mmlu_computer_security": {
259
+ "alias": " - computer_security",
260
+ "acc,none": 0.7,
261
+ "acc_stderr,none": 0.046056618647183814
262
+ },
263
+ "mmlu_conceptual_physics": {
264
+ "alias": " - conceptual_physics",
265
+ "acc,none": 0.43829787234042555,
266
+ "acc_stderr,none": 0.03243618636108101
267
+ },
268
+ "mmlu_electrical_engineering": {
269
+ "alias": " - electrical_engineering",
270
+ "acc,none": 0.5517241379310345,
271
+ "acc_stderr,none": 0.041443118108781526
272
+ },
273
+ "mmlu_elementary_mathematics": {
274
+ "alias": " - elementary_mathematics",
275
+ "acc,none": 0.36507936507936506,
276
+ "acc_stderr,none": 0.024796060602699958
277
+ },
278
+ "mmlu_high_school_biology": {
279
+ "alias": " - high_school_biology",
280
+ "acc,none": 0.7129032258064516,
281
+ "acc_stderr,none": 0.025736542745594528
282
+ },
283
+ "mmlu_high_school_chemistry": {
284
+ "alias": " - high_school_chemistry",
285
+ "acc,none": 0.4433497536945813,
286
+ "acc_stderr,none": 0.03495334582162933
287
+ },
288
+ "mmlu_high_school_computer_science": {
289
+ "alias": " - high_school_computer_science",
290
+ "acc,none": 0.57,
291
+ "acc_stderr,none": 0.04975698519562428
292
+ },
293
+ "mmlu_high_school_mathematics": {
294
+ "alias": " - high_school_mathematics",
295
+ "acc,none": 0.2962962962962963,
296
+ "acc_stderr,none": 0.027840811495871937
297
+ },
298
+ "mmlu_high_school_physics": {
299
+ "alias": " - high_school_physics",
300
+ "acc,none": 0.3509933774834437,
301
+ "acc_stderr,none": 0.03896981964257375
302
+ },
303
+ "mmlu_high_school_statistics": {
304
+ "alias": " - high_school_statistics",
305
+ "acc,none": 0.49537037037037035,
306
+ "acc_stderr,none": 0.03409825519163572
307
+ },
308
+ "mmlu_machine_learning": {
309
+ "alias": " - machine_learning",
310
+ "acc,none": 0.4642857142857143,
311
+ "acc_stderr,none": 0.04733667890053756
312
+ }
313
+ },
314
+ "groups": {
315
+ "mmlu": {
316
+ "acc,none": 0.5616721264777097,
317
+ "acc_stderr,none": 0.12922245420838252,
318
+ "alias": "mmlu"
319
+ },
320
+ "mmlu_humanities": {
321
+ "alias": " - humanities",
322
+ "acc,none": 0.5094580233793836,
323
+ "acc_stderr,none": 0.1438564975883652
324
+ },
325
+ "mmlu_other": {
326
+ "alias": " - other",
327
+ "acc,none": 0.6298680399098808,
328
+ "acc_stderr,none": 0.10072231796338442
329
+ },
330
+ "mmlu_social_sciences": {
331
+ "alias": " - social_sciences",
332
+ "acc,none": 0.6603834904127397,
333
+ "acc_stderr,none": 0.09514680794625115
334
+ },
335
+ "mmlu_stem": {
336
+ "alias": " - stem",
337
+ "acc,none": 0.47605455122105933,
338
+ "acc_stderr,none": 0.11287864111088165
339
+ }
340
+ },
341
+ "configs": {
342
+ "mmlu_abstract_algebra": {
343
+ "task": "mmlu_abstract_algebra",
344
+ "task_alias": "abstract_algebra",
345
+ "group": "mmlu_stem",
346
+ "group_alias": "stem",
347
+ "dataset_path": "hails/mmlu_no_train",
348
+ "dataset_name": "abstract_algebra",
349
+ "test_split": "test",
350
+ "fewshot_split": "dev",
351
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
352
+ "doc_to_target": "answer",
353
+ "doc_to_choice": [
354
+ "A",
355
+ "B",
356
+ "C",
357
+ "D"
358
+ ],
359
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
360
+ "target_delimiter": " ",
361
+ "fewshot_delimiter": "\n\n",
362
+ "fewshot_config": {
363
+ "sampler": "first_n"
364
+ },
365
+ "metric_list": [
366
+ {
367
+ "metric": "acc",
368
+ "aggregation": "mean",
369
+ "higher_is_better": true
370
+ }
371
+ ],
372
+ "output_type": "multiple_choice",
373
+ "repeats": 1,
374
+ "should_decontaminate": false,
375
+ "metadata": {
376
+ "version": 0.0
377
+ }
378
+ },
379
+ "mmlu_anatomy": {
380
+ "task": "mmlu_anatomy",
381
+ "task_alias": "anatomy",
382
+ "group": "mmlu_stem",
383
+ "group_alias": "stem",
384
+ "dataset_path": "hails/mmlu_no_train",
385
+ "dataset_name": "anatomy",
386
+ "test_split": "test",
387
+ "fewshot_split": "dev",
388
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
389
+ "doc_to_target": "answer",
390
+ "doc_to_choice": [
391
+ "A",
392
+ "B",
393
+ "C",
394
+ "D"
395
+ ],
396
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
397
+ "target_delimiter": " ",
398
+ "fewshot_delimiter": "\n\n",
399
+ "fewshot_config": {
400
+ "sampler": "first_n"
401
+ },
402
+ "metric_list": [
403
+ {
404
+ "metric": "acc",
405
+ "aggregation": "mean",
406
+ "higher_is_better": true
407
+ }
408
+ ],
409
+ "output_type": "multiple_choice",
410
+ "repeats": 1,
411
+ "should_decontaminate": false,
412
+ "metadata": {
413
+ "version": 0.0
414
+ }
415
+ },
416
+ "mmlu_astronomy": {
417
+ "task": "mmlu_astronomy",
418
+ "task_alias": "astronomy",
419
+ "group": "mmlu_stem",
420
+ "group_alias": "stem",
421
+ "dataset_path": "hails/mmlu_no_train",
422
+ "dataset_name": "astronomy",
423
+ "test_split": "test",
424
+ "fewshot_split": "dev",
425
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
426
+ "doc_to_target": "answer",
427
+ "doc_to_choice": [
428
+ "A",
429
+ "B",
430
+ "C",
431
+ "D"
432
+ ],
433
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
434
+ "target_delimiter": " ",
435
+ "fewshot_delimiter": "\n\n",
436
+ "fewshot_config": {
437
+ "sampler": "first_n"
438
+ },
439
+ "metric_list": [
440
+ {
441
+ "metric": "acc",
442
+ "aggregation": "mean",
443
+ "higher_is_better": true
444
+ }
445
+ ],
446
+ "output_type": "multiple_choice",
447
+ "repeats": 1,
448
+ "should_decontaminate": false,
449
+ "metadata": {
450
+ "version": 0.0
451
+ }
452
+ },
453
+ "mmlu_business_ethics": {
454
+ "task": "mmlu_business_ethics",
455
+ "task_alias": "business_ethics",
456
+ "group": "mmlu_other",
457
+ "group_alias": "other",
458
+ "dataset_path": "hails/mmlu_no_train",
459
+ "dataset_name": "business_ethics",
460
+ "test_split": "test",
461
+ "fewshot_split": "dev",
462
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
463
+ "doc_to_target": "answer",
464
+ "doc_to_choice": [
465
+ "A",
466
+ "B",
467
+ "C",
468
+ "D"
469
+ ],
470
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
471
+ "target_delimiter": " ",
472
+ "fewshot_delimiter": "\n\n",
473
+ "fewshot_config": {
474
+ "sampler": "first_n"
475
+ },
476
+ "metric_list": [
477
+ {
478
+ "metric": "acc",
479
+ "aggregation": "mean",
480
+ "higher_is_better": true
481
+ }
482
+ ],
483
+ "output_type": "multiple_choice",
484
+ "repeats": 1,
485
+ "should_decontaminate": false,
486
+ "metadata": {
487
+ "version": 0.0
488
+ }
489
+ },
490
+ "mmlu_clinical_knowledge": {
491
+ "task": "mmlu_clinical_knowledge",
492
+ "task_alias": "clinical_knowledge",
493
+ "group": "mmlu_other",
494
+ "group_alias": "other",
495
+ "dataset_path": "hails/mmlu_no_train",
496
+ "dataset_name": "clinical_knowledge",
497
+ "test_split": "test",
498
+ "fewshot_split": "dev",
499
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
500
+ "doc_to_target": "answer",
501
+ "doc_to_choice": [
502
+ "A",
503
+ "B",
504
+ "C",
505
+ "D"
506
+ ],
507
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
508
+ "target_delimiter": " ",
509
+ "fewshot_delimiter": "\n\n",
510
+ "fewshot_config": {
511
+ "sampler": "first_n"
512
+ },
513
+ "metric_list": [
514
+ {
515
+ "metric": "acc",
516
+ "aggregation": "mean",
517
+ "higher_is_better": true
518
+ }
519
+ ],
520
+ "output_type": "multiple_choice",
521
+ "repeats": 1,
522
+ "should_decontaminate": false,
523
+ "metadata": {
524
+ "version": 0.0
525
+ }
526
+ },
527
+ "mmlu_college_biology": {
528
+ "task": "mmlu_college_biology",
529
+ "task_alias": "college_biology",
530
+ "group": "mmlu_stem",
531
+ "group_alias": "stem",
532
+ "dataset_path": "hails/mmlu_no_train",
533
+ "dataset_name": "college_biology",
534
+ "test_split": "test",
535
+ "fewshot_split": "dev",
536
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
537
+ "doc_to_target": "answer",
538
+ "doc_to_choice": [
539
+ "A",
540
+ "B",
541
+ "C",
542
+ "D"
543
+ ],
544
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
545
+ "target_delimiter": " ",
546
+ "fewshot_delimiter": "\n\n",
547
+ "fewshot_config": {
548
+ "sampler": "first_n"
549
+ },
550
+ "metric_list": [
551
+ {
552
+ "metric": "acc",
553
+ "aggregation": "mean",
554
+ "higher_is_better": true
555
+ }
556
+ ],
557
+ "output_type": "multiple_choice",
558
+ "repeats": 1,
559
+ "should_decontaminate": false,
560
+ "metadata": {
561
+ "version": 0.0
562
+ }
563
+ },
564
+ "mmlu_college_chemistry": {
565
+ "task": "mmlu_college_chemistry",
566
+ "task_alias": "college_chemistry",
567
+ "group": "mmlu_stem",
568
+ "group_alias": "stem",
569
+ "dataset_path": "hails/mmlu_no_train",
570
+ "dataset_name": "college_chemistry",
571
+ "test_split": "test",
572
+ "fewshot_split": "dev",
573
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
574
+ "doc_to_target": "answer",
575
+ "doc_to_choice": [
576
+ "A",
577
+ "B",
578
+ "C",
579
+ "D"
580
+ ],
581
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
582
+ "target_delimiter": " ",
583
+ "fewshot_delimiter": "\n\n",
584
+ "fewshot_config": {
585
+ "sampler": "first_n"
586
+ },
587
+ "metric_list": [
588
+ {
589
+ "metric": "acc",
590
+ "aggregation": "mean",
591
+ "higher_is_better": true
592
+ }
593
+ ],
594
+ "output_type": "multiple_choice",
595
+ "repeats": 1,
596
+ "should_decontaminate": false,
597
+ "metadata": {
598
+ "version": 0.0
599
+ }
600
+ },
601
+ "mmlu_college_computer_science": {
602
+ "task": "mmlu_college_computer_science",
603
+ "task_alias": "college_computer_science",
604
+ "group": "mmlu_stem",
605
+ "group_alias": "stem",
606
+ "dataset_path": "hails/mmlu_no_train",
607
+ "dataset_name": "college_computer_science",
608
+ "test_split": "test",
609
+ "fewshot_split": "dev",
610
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
611
+ "doc_to_target": "answer",
612
+ "doc_to_choice": [
613
+ "A",
614
+ "B",
615
+ "C",
616
+ "D"
617
+ ],
618
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
619
+ "target_delimiter": " ",
620
+ "fewshot_delimiter": "\n\n",
621
+ "fewshot_config": {
622
+ "sampler": "first_n"
623
+ },
624
+ "metric_list": [
625
+ {
626
+ "metric": "acc",
627
+ "aggregation": "mean",
628
+ "higher_is_better": true
629
+ }
630
+ ],
631
+ "output_type": "multiple_choice",
632
+ "repeats": 1,
633
+ "should_decontaminate": false,
634
+ "metadata": {
635
+ "version": 0.0
636
+ }
637
+ },
638
+ "mmlu_college_mathematics": {
639
+ "task": "mmlu_college_mathematics",
640
+ "task_alias": "college_mathematics",
641
+ "group": "mmlu_stem",
642
+ "group_alias": "stem",
643
+ "dataset_path": "hails/mmlu_no_train",
644
+ "dataset_name": "college_mathematics",
645
+ "test_split": "test",
646
+ "fewshot_split": "dev",
647
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
648
+ "doc_to_target": "answer",
649
+ "doc_to_choice": [
650
+ "A",
651
+ "B",
652
+ "C",
653
+ "D"
654
+ ],
655
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
656
+ "target_delimiter": " ",
657
+ "fewshot_delimiter": "\n\n",
658
+ "fewshot_config": {
659
+ "sampler": "first_n"
660
+ },
661
+ "metric_list": [
662
+ {
663
+ "metric": "acc",
664
+ "aggregation": "mean",
665
+ "higher_is_better": true
666
+ }
667
+ ],
668
+ "output_type": "multiple_choice",
669
+ "repeats": 1,
670
+ "should_decontaminate": false,
671
+ "metadata": {
672
+ "version": 0.0
673
+ }
674
+ },
675
+ "mmlu_college_medicine": {
676
+ "task": "mmlu_college_medicine",
677
+ "task_alias": "college_medicine",
678
+ "group": "mmlu_other",
679
+ "group_alias": "other",
680
+ "dataset_path": "hails/mmlu_no_train",
681
+ "dataset_name": "college_medicine",
682
+ "test_split": "test",
683
+ "fewshot_split": "dev",
684
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
685
+ "doc_to_target": "answer",
686
+ "doc_to_choice": [
687
+ "A",
688
+ "B",
689
+ "C",
690
+ "D"
691
+ ],
692
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
693
+ "target_delimiter": " ",
694
+ "fewshot_delimiter": "\n\n",
695
+ "fewshot_config": {
696
+ "sampler": "first_n"
697
+ },
698
+ "metric_list": [
699
+ {
700
+ "metric": "acc",
701
+ "aggregation": "mean",
702
+ "higher_is_better": true
703
+ }
704
+ ],
705
+ "output_type": "multiple_choice",
706
+ "repeats": 1,
707
+ "should_decontaminate": false,
708
+ "metadata": {
709
+ "version": 0.0
710
+ }
711
+ },
712
+ "mmlu_college_physics": {
713
+ "task": "mmlu_college_physics",
714
+ "task_alias": "college_physics",
715
+ "group": "mmlu_stem",
716
+ "group_alias": "stem",
717
+ "dataset_path": "hails/mmlu_no_train",
718
+ "dataset_name": "college_physics",
719
+ "test_split": "test",
720
+ "fewshot_split": "dev",
721
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
722
+ "doc_to_target": "answer",
723
+ "doc_to_choice": [
724
+ "A",
725
+ "B",
726
+ "C",
727
+ "D"
728
+ ],
729
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
730
+ "target_delimiter": " ",
731
+ "fewshot_delimiter": "\n\n",
732
+ "fewshot_config": {
733
+ "sampler": "first_n"
734
+ },
735
+ "metric_list": [
736
+ {
737
+ "metric": "acc",
738
+ "aggregation": "mean",
739
+ "higher_is_better": true
740
+ }
741
+ ],
742
+ "output_type": "multiple_choice",
743
+ "repeats": 1,
744
+ "should_decontaminate": false,
745
+ "metadata": {
746
+ "version": 0.0
747
+ }
748
+ },
749
+ "mmlu_computer_security": {
750
+ "task": "mmlu_computer_security",
751
+ "task_alias": "computer_security",
752
+ "group": "mmlu_stem",
753
+ "group_alias": "stem",
754
+ "dataset_path": "hails/mmlu_no_train",
755
+ "dataset_name": "computer_security",
756
+ "test_split": "test",
757
+ "fewshot_split": "dev",
758
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
759
+ "doc_to_target": "answer",
760
+ "doc_to_choice": [
761
+ "A",
762
+ "B",
763
+ "C",
764
+ "D"
765
+ ],
766
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
767
+ "target_delimiter": " ",
768
+ "fewshot_delimiter": "\n\n",
769
+ "fewshot_config": {
770
+ "sampler": "first_n"
771
+ },
772
+ "metric_list": [
773
+ {
774
+ "metric": "acc",
775
+ "aggregation": "mean",
776
+ "higher_is_better": true
777
+ }
778
+ ],
779
+ "output_type": "multiple_choice",
780
+ "repeats": 1,
781
+ "should_decontaminate": false,
782
+ "metadata": {
783
+ "version": 0.0
784
+ }
785
+ },
786
+ "mmlu_conceptual_physics": {
787
+ "task": "mmlu_conceptual_physics",
788
+ "task_alias": "conceptual_physics",
789
+ "group": "mmlu_stem",
790
+ "group_alias": "stem",
791
+ "dataset_path": "hails/mmlu_no_train",
792
+ "dataset_name": "conceptual_physics",
793
+ "test_split": "test",
794
+ "fewshot_split": "dev",
795
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
796
+ "doc_to_target": "answer",
797
+ "doc_to_choice": [
798
+ "A",
799
+ "B",
800
+ "C",
801
+ "D"
802
+ ],
803
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
804
+ "target_delimiter": " ",
805
+ "fewshot_delimiter": "\n\n",
806
+ "fewshot_config": {
807
+ "sampler": "first_n"
808
+ },
809
+ "metric_list": [
810
+ {
811
+ "metric": "acc",
812
+ "aggregation": "mean",
813
+ "higher_is_better": true
814
+ }
815
+ ],
816
+ "output_type": "multiple_choice",
817
+ "repeats": 1,
818
+ "should_decontaminate": false,
819
+ "metadata": {
820
+ "version": 0.0
821
+ }
822
+ },
823
+ "mmlu_econometrics": {
824
+ "task": "mmlu_econometrics",
825
+ "task_alias": "econometrics",
826
+ "group": "mmlu_social_sciences",
827
+ "group_alias": "social_sciences",
828
+ "dataset_path": "hails/mmlu_no_train",
829
+ "dataset_name": "econometrics",
830
+ "test_split": "test",
831
+ "fewshot_split": "dev",
832
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
833
+ "doc_to_target": "answer",
834
+ "doc_to_choice": [
835
+ "A",
836
+ "B",
837
+ "C",
838
+ "D"
839
+ ],
840
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
841
+ "target_delimiter": " ",
842
+ "fewshot_delimiter": "\n\n",
843
+ "fewshot_config": {
844
+ "sampler": "first_n"
845
+ },
846
+ "metric_list": [
847
+ {
848
+ "metric": "acc",
849
+ "aggregation": "mean",
850
+ "higher_is_better": true
851
+ }
852
+ ],
853
+ "output_type": "multiple_choice",
854
+ "repeats": 1,
855
+ "should_decontaminate": false,
856
+ "metadata": {
857
+ "version": 0.0
858
+ }
859
+ },
860
+ "mmlu_electrical_engineering": {
861
+ "task": "mmlu_electrical_engineering",
862
+ "task_alias": "electrical_engineering",
863
+ "group": "mmlu_stem",
864
+ "group_alias": "stem",
865
+ "dataset_path": "hails/mmlu_no_train",
866
+ "dataset_name": "electrical_engineering",
867
+ "test_split": "test",
868
+ "fewshot_split": "dev",
869
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
870
+ "doc_to_target": "answer",
871
+ "doc_to_choice": [
872
+ "A",
873
+ "B",
874
+ "C",
875
+ "D"
876
+ ],
877
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
878
+ "target_delimiter": " ",
879
+ "fewshot_delimiter": "\n\n",
880
+ "fewshot_config": {
881
+ "sampler": "first_n"
882
+ },
883
+ "metric_list": [
884
+ {
885
+ "metric": "acc",
886
+ "aggregation": "mean",
887
+ "higher_is_better": true
888
+ }
889
+ ],
890
+ "output_type": "multiple_choice",
891
+ "repeats": 1,
892
+ "should_decontaminate": false,
893
+ "metadata": {
894
+ "version": 0.0
895
+ }
896
+ },
897
+ "mmlu_elementary_mathematics": {
898
+ "task": "mmlu_elementary_mathematics",
899
+ "task_alias": "elementary_mathematics",
900
+ "group": "mmlu_stem",
901
+ "group_alias": "stem",
902
+ "dataset_path": "hails/mmlu_no_train",
903
+ "dataset_name": "elementary_mathematics",
904
+ "test_split": "test",
905
+ "fewshot_split": "dev",
906
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
907
+ "doc_to_target": "answer",
908
+ "doc_to_choice": [
909
+ "A",
910
+ "B",
911
+ "C",
912
+ "D"
913
+ ],
914
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
915
+ "target_delimiter": " ",
916
+ "fewshot_delimiter": "\n\n",
917
+ "fewshot_config": {
918
+ "sampler": "first_n"
919
+ },
920
+ "metric_list": [
921
+ {
922
+ "metric": "acc",
923
+ "aggregation": "mean",
924
+ "higher_is_better": true
925
+ }
926
+ ],
927
+ "output_type": "multiple_choice",
928
+ "repeats": 1,
929
+ "should_decontaminate": false,
930
+ "metadata": {
931
+ "version": 0.0
932
+ }
933
+ },
934
+ "mmlu_formal_logic": {
935
+ "task": "mmlu_formal_logic",
936
+ "task_alias": "formal_logic",
937
+ "group": "mmlu_humanities",
938
+ "group_alias": "humanities",
939
+ "dataset_path": "hails/mmlu_no_train",
940
+ "dataset_name": "formal_logic",
941
+ "test_split": "test",
942
+ "fewshot_split": "dev",
943
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
944
+ "doc_to_target": "answer",
945
+ "doc_to_choice": [
946
+ "A",
947
+ "B",
948
+ "C",
949
+ "D"
950
+ ],
951
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
952
+ "target_delimiter": " ",
953
+ "fewshot_delimiter": "\n\n",
954
+ "fewshot_config": {
955
+ "sampler": "first_n"
956
+ },
957
+ "metric_list": [
958
+ {
959
+ "metric": "acc",
960
+ "aggregation": "mean",
961
+ "higher_is_better": true
962
+ }
963
+ ],
964
+ "output_type": "multiple_choice",
965
+ "repeats": 1,
966
+ "should_decontaminate": false,
967
+ "metadata": {
968
+ "version": 0.0
969
+ }
970
+ },
971
+ "mmlu_global_facts": {
972
+ "task": "mmlu_global_facts",
973
+ "task_alias": "global_facts",
974
+ "group": "mmlu_other",
975
+ "group_alias": "other",
976
+ "dataset_path": "hails/mmlu_no_train",
977
+ "dataset_name": "global_facts",
978
+ "test_split": "test",
979
+ "fewshot_split": "dev",
980
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
981
+ "doc_to_target": "answer",
982
+ "doc_to_choice": [
983
+ "A",
984
+ "B",
985
+ "C",
986
+ "D"
987
+ ],
988
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
989
+ "target_delimiter": " ",
990
+ "fewshot_delimiter": "\n\n",
991
+ "fewshot_config": {
992
+ "sampler": "first_n"
993
+ },
994
+ "metric_list": [
995
+ {
996
+ "metric": "acc",
997
+ "aggregation": "mean",
998
+ "higher_is_better": true
999
+ }
1000
+ ],
1001
+ "output_type": "multiple_choice",
1002
+ "repeats": 1,
1003
+ "should_decontaminate": false,
1004
+ "metadata": {
1005
+ "version": 0.0
1006
+ }
1007
+ },
1008
+ "mmlu_high_school_biology": {
1009
+ "task": "mmlu_high_school_biology",
1010
+ "task_alias": "high_school_biology",
1011
+ "group": "mmlu_stem",
1012
+ "group_alias": "stem",
1013
+ "dataset_path": "hails/mmlu_no_train",
1014
+ "dataset_name": "high_school_biology",
1015
+ "test_split": "test",
1016
+ "fewshot_split": "dev",
1017
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1018
+ "doc_to_target": "answer",
1019
+ "doc_to_choice": [
1020
+ "A",
1021
+ "B",
1022
+ "C",
1023
+ "D"
1024
+ ],
1025
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
1026
+ "target_delimiter": " ",
1027
+ "fewshot_delimiter": "\n\n",
1028
+ "fewshot_config": {
1029
+ "sampler": "first_n"
1030
+ },
1031
+ "metric_list": [
1032
+ {
1033
+ "metric": "acc",
1034
+ "aggregation": "mean",
1035
+ "higher_is_better": true
1036
+ }
1037
+ ],
1038
+ "output_type": "multiple_choice",
1039
+ "repeats": 1,
1040
+ "should_decontaminate": false,
1041
+ "metadata": {
1042
+ "version": 0.0
1043
+ }
1044
+ },
1045
+ "mmlu_high_school_chemistry": {
1046
+ "task": "mmlu_high_school_chemistry",
1047
+ "task_alias": "high_school_chemistry",
1048
+ "group": "mmlu_stem",
1049
+ "group_alias": "stem",
1050
+ "dataset_path": "hails/mmlu_no_train",
1051
+ "dataset_name": "high_school_chemistry",
1052
+ "test_split": "test",
1053
+ "fewshot_split": "dev",
1054
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1055
+ "doc_to_target": "answer",
1056
+ "doc_to_choice": [
1057
+ "A",
1058
+ "B",
1059
+ "C",
1060
+ "D"
1061
+ ],
1062
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
1063
+ "target_delimiter": " ",
1064
+ "fewshot_delimiter": "\n\n",
1065
+ "fewshot_config": {
1066
+ "sampler": "first_n"
1067
+ },
1068
+ "metric_list": [
1069
+ {
1070
+ "metric": "acc",
1071
+ "aggregation": "mean",
1072
+ "higher_is_better": true
1073
+ }
1074
+ ],
1075
+ "output_type": "multiple_choice",
1076
+ "repeats": 1,
1077
+ "should_decontaminate": false,
1078
+ "metadata": {
1079
+ "version": 0.0
1080
+ }
1081
+ },
1082
+ "mmlu_high_school_computer_science": {
1083
+ "task": "mmlu_high_school_computer_science",
1084
+ "task_alias": "high_school_computer_science",
1085
+ "group": "mmlu_stem",
1086
+ "group_alias": "stem",
1087
+ "dataset_path": "hails/mmlu_no_train",
1088
+ "dataset_name": "high_school_computer_science",
1089
+ "test_split": "test",
1090
+ "fewshot_split": "dev",
1091
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1092
+ "doc_to_target": "answer",
1093
+ "doc_to_choice": [
1094
+ "A",
1095
+ "B",
1096
+ "C",
1097
+ "D"
1098
+ ],
1099
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
1100
+ "target_delimiter": " ",
1101
+ "fewshot_delimiter": "\n\n",
1102
+ "fewshot_config": {
1103
+ "sampler": "first_n"
1104
+ },
1105
+ "metric_list": [
1106
+ {
1107
+ "metric": "acc",
1108
+ "aggregation": "mean",
1109
+ "higher_is_better": true
1110
+ }
1111
+ ],
1112
+ "output_type": "multiple_choice",
1113
+ "repeats": 1,
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 0.0
1117
+ }
1118
+ },
1119
+ "mmlu_high_school_european_history": {
1120
+ "task": "mmlu_high_school_european_history",
1121
+ "task_alias": "high_school_european_history",
1122
+ "group": "mmlu_humanities",
1123
+ "group_alias": "humanities",
1124
+ "dataset_path": "hails/mmlu_no_train",
1125
+ "dataset_name": "high_school_european_history",
1126
+ "test_split": "test",
1127
+ "fewshot_split": "dev",
1128
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1129
+ "doc_to_target": "answer",
1130
+ "doc_to_choice": [
1131
+ "A",
1132
+ "B",
1133
+ "C",
1134
+ "D"
1135
+ ],
1136
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
1137
+ "target_delimiter": " ",
1138
+ "fewshot_delimiter": "\n\n",
1139
+ "fewshot_config": {
1140
+ "sampler": "first_n"
1141
+ },
1142
+ "metric_list": [
1143
+ {
1144
+ "metric": "acc",
1145
+ "aggregation": "mean",
1146
+ "higher_is_better": true
1147
+ }
1148
+ ],
1149
+ "output_type": "multiple_choice",
1150
+ "repeats": 1,
1151
+ "should_decontaminate": false,
1152
+ "metadata": {
1153
+ "version": 0.0
1154
+ }
1155
+ },
1156
+ "mmlu_high_school_geography": {
1157
+ "task": "mmlu_high_school_geography",
1158
+ "task_alias": "high_school_geography",
1159
+ "group": "mmlu_social_sciences",
1160
+ "group_alias": "social_sciences",
1161
+ "dataset_path": "hails/mmlu_no_train",
1162
+ "dataset_name": "high_school_geography",
1163
+ "test_split": "test",
1164
+ "fewshot_split": "dev",
1165
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1166
+ "doc_to_target": "answer",
1167
+ "doc_to_choice": [
1168
+ "A",
1169
+ "B",
1170
+ "C",
1171
+ "D"
1172
+ ],
1173
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
1174
+ "target_delimiter": " ",
1175
+ "fewshot_delimiter": "\n\n",
1176
+ "fewshot_config": {
1177
+ "sampler": "first_n"
1178
+ },
1179
+ "metric_list": [
1180
+ {
1181
+ "metric": "acc",
1182
+ "aggregation": "mean",
1183
+ "higher_is_better": true
1184
+ }
1185
+ ],
1186
+ "output_type": "multiple_choice",
1187
+ "repeats": 1,
1188
+ "should_decontaminate": false,
1189
+ "metadata": {
1190
+ "version": 0.0
1191
+ }
1192
+ },
1193
+ "mmlu_high_school_government_and_politics": {
1194
+ "task": "mmlu_high_school_government_and_politics",
1195
+ "task_alias": "high_school_government_and_politics",
1196
+ "group": "mmlu_social_sciences",
1197
+ "group_alias": "social_sciences",
1198
+ "dataset_path": "hails/mmlu_no_train",
1199
+ "dataset_name": "high_school_government_and_politics",
1200
+ "test_split": "test",
1201
+ "fewshot_split": "dev",
1202
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1203
+ "doc_to_target": "answer",
1204
+ "doc_to_choice": [
1205
+ "A",
1206
+ "B",
1207
+ "C",
1208
+ "D"
1209
+ ],
1210
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
1211
+ "target_delimiter": " ",
1212
+ "fewshot_delimiter": "\n\n",
1213
+ "fewshot_config": {
1214
+ "sampler": "first_n"
1215
+ },
1216
+ "metric_list": [
1217
+ {
1218
+ "metric": "acc",
1219
+ "aggregation": "mean",
1220
+ "higher_is_better": true
1221
+ }
1222
+ ],
1223
+ "output_type": "multiple_choice",
1224
+ "repeats": 1,
1225
+ "should_decontaminate": false,
1226
+ "metadata": {
1227
+ "version": 0.0
1228
+ }
1229
+ },
1230
+ "mmlu_high_school_macroeconomics": {
1231
+ "task": "mmlu_high_school_macroeconomics",
1232
+ "task_alias": "high_school_macroeconomics",
1233
+ "group": "mmlu_social_sciences",
1234
+ "group_alias": "social_sciences",
1235
+ "dataset_path": "hails/mmlu_no_train",
1236
+ "dataset_name": "high_school_macroeconomics",
1237
+ "test_split": "test",
1238
+ "fewshot_split": "dev",
1239
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1240
+ "doc_to_target": "answer",
1241
+ "doc_to_choice": [
1242
+ "A",
1243
+ "B",
1244
+ "C",
1245
+ "D"
1246
+ ],
1247
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
1248
+ "target_delimiter": " ",
1249
+ "fewshot_delimiter": "\n\n",
1250
+ "fewshot_config": {
1251
+ "sampler": "first_n"
1252
+ },
1253
+ "metric_list": [
1254
+ {
1255
+ "metric": "acc",
1256
+ "aggregation": "mean",
1257
+ "higher_is_better": true
1258
+ }
1259
+ ],
1260
+ "output_type": "multiple_choice",
1261
+ "repeats": 1,
1262
+ "should_decontaminate": false,
1263
+ "metadata": {
1264
+ "version": 0.0
1265
+ }
1266
+ },
1267
+ "mmlu_high_school_mathematics": {
1268
+ "task": "mmlu_high_school_mathematics",
1269
+ "task_alias": "high_school_mathematics",
1270
+ "group": "mmlu_stem",
1271
+ "group_alias": "stem",
1272
+ "dataset_path": "hails/mmlu_no_train",
1273
+ "dataset_name": "high_school_mathematics",
1274
+ "test_split": "test",
1275
+ "fewshot_split": "dev",
1276
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1277
+ "doc_to_target": "answer",
1278
+ "doc_to_choice": [
1279
+ "A",
1280
+ "B",
1281
+ "C",
1282
+ "D"
1283
+ ],
1284
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
1285
+ "target_delimiter": " ",
1286
+ "fewshot_delimiter": "\n\n",
1287
+ "fewshot_config": {
1288
+ "sampler": "first_n"
1289
+ },
1290
+ "metric_list": [
1291
+ {
1292
+ "metric": "acc",
1293
+ "aggregation": "mean",
1294
+ "higher_is_better": true
1295
+ }
1296
+ ],
1297
+ "output_type": "multiple_choice",
1298
+ "repeats": 1,
1299
+ "should_decontaminate": false,
1300
+ "metadata": {
1301
+ "version": 0.0
1302
+ }
1303
+ },
1304
+ "mmlu_high_school_microeconomics": {
1305
+ "task": "mmlu_high_school_microeconomics",
1306
+ "task_alias": "high_school_microeconomics",
1307
+ "group": "mmlu_social_sciences",
1308
+ "group_alias": "social_sciences",
1309
+ "dataset_path": "hails/mmlu_no_train",
1310
+ "dataset_name": "high_school_microeconomics",
1311
+ "test_split": "test",
1312
+ "fewshot_split": "dev",
1313
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1314
+ "doc_to_target": "answer",
1315
+ "doc_to_choice": [
1316
+ "A",
1317
+ "B",
1318
+ "C",
1319
+ "D"
1320
+ ],
1321
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
1322
+ "target_delimiter": " ",
1323
+ "fewshot_delimiter": "\n\n",
1324
+ "fewshot_config": {
1325
+ "sampler": "first_n"
1326
+ },
1327
+ "metric_list": [
1328
+ {
1329
+ "metric": "acc",
1330
+ "aggregation": "mean",
1331
+ "higher_is_better": true
1332
+ }
1333
+ ],
1334
+ "output_type": "multiple_choice",
1335
+ "repeats": 1,
1336
+ "should_decontaminate": false,
1337
+ "metadata": {
1338
+ "version": 0.0
1339
+ }
1340
+ },
1341
+ "mmlu_high_school_physics": {
1342
+ "task": "mmlu_high_school_physics",
1343
+ "task_alias": "high_school_physics",
1344
+ "group": "mmlu_stem",
1345
+ "group_alias": "stem",
1346
+ "dataset_path": "hails/mmlu_no_train",
1347
+ "dataset_name": "high_school_physics",
1348
+ "test_split": "test",
1349
+ "fewshot_split": "dev",
1350
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1351
+ "doc_to_target": "answer",
1352
+ "doc_to_choice": [
1353
+ "A",
1354
+ "B",
1355
+ "C",
1356
+ "D"
1357
+ ],
1358
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
1359
+ "target_delimiter": " ",
1360
+ "fewshot_delimiter": "\n\n",
1361
+ "fewshot_config": {
1362
+ "sampler": "first_n"
1363
+ },
1364
+ "metric_list": [
1365
+ {
1366
+ "metric": "acc",
1367
+ "aggregation": "mean",
1368
+ "higher_is_better": true
1369
+ }
1370
+ ],
1371
+ "output_type": "multiple_choice",
1372
+ "repeats": 1,
1373
+ "should_decontaminate": false,
1374
+ "metadata": {
1375
+ "version": 0.0
1376
+ }
1377
+ },
1378
+ "mmlu_high_school_psychology": {
1379
+ "task": "mmlu_high_school_psychology",
1380
+ "task_alias": "high_school_psychology",
1381
+ "group": "mmlu_social_sciences",
1382
+ "group_alias": "social_sciences",
1383
+ "dataset_path": "hails/mmlu_no_train",
1384
+ "dataset_name": "high_school_psychology",
1385
+ "test_split": "test",
1386
+ "fewshot_split": "dev",
1387
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1388
+ "doc_to_target": "answer",
1389
+ "doc_to_choice": [
1390
+ "A",
1391
+ "B",
1392
+ "C",
1393
+ "D"
1394
+ ],
1395
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
1396
+ "target_delimiter": " ",
1397
+ "fewshot_delimiter": "\n\n",
1398
+ "fewshot_config": {
1399
+ "sampler": "first_n"
1400
+ },
1401
+ "metric_list": [
1402
+ {
1403
+ "metric": "acc",
1404
+ "aggregation": "mean",
1405
+ "higher_is_better": true
1406
+ }
1407
+ ],
1408
+ "output_type": "multiple_choice",
1409
+ "repeats": 1,
1410
+ "should_decontaminate": false,
1411
+ "metadata": {
1412
+ "version": 0.0
1413
+ }
1414
+ },
1415
+ "mmlu_high_school_statistics": {
1416
+ "task": "mmlu_high_school_statistics",
1417
+ "task_alias": "high_school_statistics",
1418
+ "group": "mmlu_stem",
1419
+ "group_alias": "stem",
1420
+ "dataset_path": "hails/mmlu_no_train",
1421
+ "dataset_name": "high_school_statistics",
1422
+ "test_split": "test",
1423
+ "fewshot_split": "dev",
1424
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1425
+ "doc_to_target": "answer",
1426
+ "doc_to_choice": [
1427
+ "A",
1428
+ "B",
1429
+ "C",
1430
+ "D"
1431
+ ],
1432
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
1433
+ "target_delimiter": " ",
1434
+ "fewshot_delimiter": "\n\n",
1435
+ "fewshot_config": {
1436
+ "sampler": "first_n"
1437
+ },
1438
+ "metric_list": [
1439
+ {
1440
+ "metric": "acc",
1441
+ "aggregation": "mean",
1442
+ "higher_is_better": true
1443
+ }
1444
+ ],
1445
+ "output_type": "multiple_choice",
1446
+ "repeats": 1,
1447
+ "should_decontaminate": false,
1448
+ "metadata": {
1449
+ "version": 0.0
1450
+ }
1451
+ },
1452
+ "mmlu_high_school_us_history": {
1453
+ "task": "mmlu_high_school_us_history",
1454
+ "task_alias": "high_school_us_history",
1455
+ "group": "mmlu_humanities",
1456
+ "group_alias": "humanities",
1457
+ "dataset_path": "hails/mmlu_no_train",
1458
+ "dataset_name": "high_school_us_history",
1459
+ "test_split": "test",
1460
+ "fewshot_split": "dev",
1461
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1462
+ "doc_to_target": "answer",
1463
+ "doc_to_choice": [
1464
+ "A",
1465
+ "B",
1466
+ "C",
1467
+ "D"
1468
+ ],
1469
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
1470
+ "target_delimiter": " ",
1471
+ "fewshot_delimiter": "\n\n",
1472
+ "fewshot_config": {
1473
+ "sampler": "first_n"
1474
+ },
1475
+ "metric_list": [
1476
+ {
1477
+ "metric": "acc",
1478
+ "aggregation": "mean",
1479
+ "higher_is_better": true
1480
+ }
1481
+ ],
1482
+ "output_type": "multiple_choice",
1483
+ "repeats": 1,
1484
+ "should_decontaminate": false,
1485
+ "metadata": {
1486
+ "version": 0.0
1487
+ }
1488
+ },
1489
+ "mmlu_high_school_world_history": {
1490
+ "task": "mmlu_high_school_world_history",
1491
+ "task_alias": "high_school_world_history",
1492
+ "group": "mmlu_humanities",
1493
+ "group_alias": "humanities",
1494
+ "dataset_path": "hails/mmlu_no_train",
1495
+ "dataset_name": "high_school_world_history",
1496
+ "test_split": "test",
1497
+ "fewshot_split": "dev",
1498
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1499
+ "doc_to_target": "answer",
1500
+ "doc_to_choice": [
1501
+ "A",
1502
+ "B",
1503
+ "C",
1504
+ "D"
1505
+ ],
1506
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
1507
+ "target_delimiter": " ",
1508
+ "fewshot_delimiter": "\n\n",
1509
+ "fewshot_config": {
1510
+ "sampler": "first_n"
1511
+ },
1512
+ "metric_list": [
1513
+ {
1514
+ "metric": "acc",
1515
+ "aggregation": "mean",
1516
+ "higher_is_better": true
1517
+ }
1518
+ ],
1519
+ "output_type": "multiple_choice",
1520
+ "repeats": 1,
1521
+ "should_decontaminate": false,
1522
+ "metadata": {
1523
+ "version": 0.0
1524
+ }
1525
+ },
1526
+ "mmlu_human_aging": {
1527
+ "task": "mmlu_human_aging",
1528
+ "task_alias": "human_aging",
1529
+ "group": "mmlu_other",
1530
+ "group_alias": "other",
1531
+ "dataset_path": "hails/mmlu_no_train",
1532
+ "dataset_name": "human_aging",
1533
+ "test_split": "test",
1534
+ "fewshot_split": "dev",
1535
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1536
+ "doc_to_target": "answer",
1537
+ "doc_to_choice": [
1538
+ "A",
1539
+ "B",
1540
+ "C",
1541
+ "D"
1542
+ ],
1543
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
1544
+ "target_delimiter": " ",
1545
+ "fewshot_delimiter": "\n\n",
1546
+ "fewshot_config": {
1547
+ "sampler": "first_n"
1548
+ },
1549
+ "metric_list": [
1550
+ {
1551
+ "metric": "acc",
1552
+ "aggregation": "mean",
1553
+ "higher_is_better": true
1554
+ }
1555
+ ],
1556
+ "output_type": "multiple_choice",
1557
+ "repeats": 1,
1558
+ "should_decontaminate": false,
1559
+ "metadata": {
1560
+ "version": 0.0
1561
+ }
1562
+ },
1563
+ "mmlu_human_sexuality": {
1564
+ "task": "mmlu_human_sexuality",
1565
+ "task_alias": "human_sexuality",
1566
+ "group": "mmlu_social_sciences",
1567
+ "group_alias": "social_sciences",
1568
+ "dataset_path": "hails/mmlu_no_train",
1569
+ "dataset_name": "human_sexuality",
1570
+ "test_split": "test",
1571
+ "fewshot_split": "dev",
1572
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1573
+ "doc_to_target": "answer",
1574
+ "doc_to_choice": [
1575
+ "A",
1576
+ "B",
1577
+ "C",
1578
+ "D"
1579
+ ],
1580
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
1581
+ "target_delimiter": " ",
1582
+ "fewshot_delimiter": "\n\n",
1583
+ "fewshot_config": {
1584
+ "sampler": "first_n"
1585
+ },
1586
+ "metric_list": [
1587
+ {
1588
+ "metric": "acc",
1589
+ "aggregation": "mean",
1590
+ "higher_is_better": true
1591
+ }
1592
+ ],
1593
+ "output_type": "multiple_choice",
1594
+ "repeats": 1,
1595
+ "should_decontaminate": false,
1596
+ "metadata": {
1597
+ "version": 0.0
1598
+ }
1599
+ },
1600
+ "mmlu_international_law": {
1601
+ "task": "mmlu_international_law",
1602
+ "task_alias": "international_law",
1603
+ "group": "mmlu_humanities",
1604
+ "group_alias": "humanities",
1605
+ "dataset_path": "hails/mmlu_no_train",
1606
+ "dataset_name": "international_law",
1607
+ "test_split": "test",
1608
+ "fewshot_split": "dev",
1609
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1610
+ "doc_to_target": "answer",
1611
+ "doc_to_choice": [
1612
+ "A",
1613
+ "B",
1614
+ "C",
1615
+ "D"
1616
+ ],
1617
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
1618
+ "target_delimiter": " ",
1619
+ "fewshot_delimiter": "\n\n",
1620
+ "fewshot_config": {
1621
+ "sampler": "first_n"
1622
+ },
1623
+ "metric_list": [
1624
+ {
1625
+ "metric": "acc",
1626
+ "aggregation": "mean",
1627
+ "higher_is_better": true
1628
+ }
1629
+ ],
1630
+ "output_type": "multiple_choice",
1631
+ "repeats": 1,
1632
+ "should_decontaminate": false,
1633
+ "metadata": {
1634
+ "version": 0.0
1635
+ }
1636
+ },
1637
+ "mmlu_jurisprudence": {
1638
+ "task": "mmlu_jurisprudence",
1639
+ "task_alias": "jurisprudence",
1640
+ "group": "mmlu_humanities",
1641
+ "group_alias": "humanities",
1642
+ "dataset_path": "hails/mmlu_no_train",
1643
+ "dataset_name": "jurisprudence",
1644
+ "test_split": "test",
1645
+ "fewshot_split": "dev",
1646
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1647
+ "doc_to_target": "answer",
1648
+ "doc_to_choice": [
1649
+ "A",
1650
+ "B",
1651
+ "C",
1652
+ "D"
1653
+ ],
1654
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
1655
+ "target_delimiter": " ",
1656
+ "fewshot_delimiter": "\n\n",
1657
+ "fewshot_config": {
1658
+ "sampler": "first_n"
1659
+ },
1660
+ "metric_list": [
1661
+ {
1662
+ "metric": "acc",
1663
+ "aggregation": "mean",
1664
+ "higher_is_better": true
1665
+ }
1666
+ ],
1667
+ "output_type": "multiple_choice",
1668
+ "repeats": 1,
1669
+ "should_decontaminate": false,
1670
+ "metadata": {
1671
+ "version": 0.0
1672
+ }
1673
+ },
1674
+ "mmlu_logical_fallacies": {
1675
+ "task": "mmlu_logical_fallacies",
1676
+ "task_alias": "logical_fallacies",
1677
+ "group": "mmlu_humanities",
1678
+ "group_alias": "humanities",
1679
+ "dataset_path": "hails/mmlu_no_train",
1680
+ "dataset_name": "logical_fallacies",
1681
+ "test_split": "test",
1682
+ "fewshot_split": "dev",
1683
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1684
+ "doc_to_target": "answer",
1685
+ "doc_to_choice": [
1686
+ "A",
1687
+ "B",
1688
+ "C",
1689
+ "D"
1690
+ ],
1691
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
1692
+ "target_delimiter": " ",
1693
+ "fewshot_delimiter": "\n\n",
1694
+ "fewshot_config": {
1695
+ "sampler": "first_n"
1696
+ },
1697
+ "metric_list": [
1698
+ {
1699
+ "metric": "acc",
1700
+ "aggregation": "mean",
1701
+ "higher_is_better": true
1702
+ }
1703
+ ],
1704
+ "output_type": "multiple_choice",
1705
+ "repeats": 1,
1706
+ "should_decontaminate": false,
1707
+ "metadata": {
1708
+ "version": 0.0
1709
+ }
1710
+ },
1711
+ "mmlu_machine_learning": {
1712
+ "task": "mmlu_machine_learning",
1713
+ "task_alias": "machine_learning",
1714
+ "group": "mmlu_stem",
1715
+ "group_alias": "stem",
1716
+ "dataset_path": "hails/mmlu_no_train",
1717
+ "dataset_name": "machine_learning",
1718
+ "test_split": "test",
1719
+ "fewshot_split": "dev",
1720
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1721
+ "doc_to_target": "answer",
1722
+ "doc_to_choice": [
1723
+ "A",
1724
+ "B",
1725
+ "C",
1726
+ "D"
1727
+ ],
1728
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
1729
+ "target_delimiter": " ",
1730
+ "fewshot_delimiter": "\n\n",
1731
+ "fewshot_config": {
1732
+ "sampler": "first_n"
1733
+ },
1734
+ "metric_list": [
1735
+ {
1736
+ "metric": "acc",
1737
+ "aggregation": "mean",
1738
+ "higher_is_better": true
1739
+ }
1740
+ ],
1741
+ "output_type": "multiple_choice",
1742
+ "repeats": 1,
1743
+ "should_decontaminate": false,
1744
+ "metadata": {
1745
+ "version": 0.0
1746
+ }
1747
+ },
1748
+ "mmlu_management": {
1749
+ "task": "mmlu_management",
1750
+ "task_alias": "management",
1751
+ "group": "mmlu_other",
1752
+ "group_alias": "other",
1753
+ "dataset_path": "hails/mmlu_no_train",
1754
+ "dataset_name": "management",
1755
+ "test_split": "test",
1756
+ "fewshot_split": "dev",
1757
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1758
+ "doc_to_target": "answer",
1759
+ "doc_to_choice": [
1760
+ "A",
1761
+ "B",
1762
+ "C",
1763
+ "D"
1764
+ ],
1765
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
1766
+ "target_delimiter": " ",
1767
+ "fewshot_delimiter": "\n\n",
1768
+ "fewshot_config": {
1769
+ "sampler": "first_n"
1770
+ },
1771
+ "metric_list": [
1772
+ {
1773
+ "metric": "acc",
1774
+ "aggregation": "mean",
1775
+ "higher_is_better": true
1776
+ }
1777
+ ],
1778
+ "output_type": "multiple_choice",
1779
+ "repeats": 1,
1780
+ "should_decontaminate": false,
1781
+ "metadata": {
1782
+ "version": 0.0
1783
+ }
1784
+ },
1785
+ "mmlu_marketing": {
1786
+ "task": "mmlu_marketing",
1787
+ "task_alias": "marketing",
1788
+ "group": "mmlu_other",
1789
+ "group_alias": "other",
1790
+ "dataset_path": "hails/mmlu_no_train",
1791
+ "dataset_name": "marketing",
1792
+ "test_split": "test",
1793
+ "fewshot_split": "dev",
1794
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1795
+ "doc_to_target": "answer",
1796
+ "doc_to_choice": [
1797
+ "A",
1798
+ "B",
1799
+ "C",
1800
+ "D"
1801
+ ],
1802
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
1803
+ "target_delimiter": " ",
1804
+ "fewshot_delimiter": "\n\n",
1805
+ "fewshot_config": {
1806
+ "sampler": "first_n"
1807
+ },
1808
+ "metric_list": [
1809
+ {
1810
+ "metric": "acc",
1811
+ "aggregation": "mean",
1812
+ "higher_is_better": true
1813
+ }
1814
+ ],
1815
+ "output_type": "multiple_choice",
1816
+ "repeats": 1,
1817
+ "should_decontaminate": false,
1818
+ "metadata": {
1819
+ "version": 0.0
1820
+ }
1821
+ },
1822
+ "mmlu_medical_genetics": {
1823
+ "task": "mmlu_medical_genetics",
1824
+ "task_alias": "medical_genetics",
1825
+ "group": "mmlu_other",
1826
+ "group_alias": "other",
1827
+ "dataset_path": "hails/mmlu_no_train",
1828
+ "dataset_name": "medical_genetics",
1829
+ "test_split": "test",
1830
+ "fewshot_split": "dev",
1831
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1832
+ "doc_to_target": "answer",
1833
+ "doc_to_choice": [
1834
+ "A",
1835
+ "B",
1836
+ "C",
1837
+ "D"
1838
+ ],
1839
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
1840
+ "target_delimiter": " ",
1841
+ "fewshot_delimiter": "\n\n",
1842
+ "fewshot_config": {
1843
+ "sampler": "first_n"
1844
+ },
1845
+ "metric_list": [
1846
+ {
1847
+ "metric": "acc",
1848
+ "aggregation": "mean",
1849
+ "higher_is_better": true
1850
+ }
1851
+ ],
1852
+ "output_type": "multiple_choice",
1853
+ "repeats": 1,
1854
+ "should_decontaminate": false,
1855
+ "metadata": {
1856
+ "version": 0.0
1857
+ }
1858
+ },
1859
+ "mmlu_miscellaneous": {
1860
+ "task": "mmlu_miscellaneous",
1861
+ "task_alias": "miscellaneous",
1862
+ "group": "mmlu_other",
1863
+ "group_alias": "other",
1864
+ "dataset_path": "hails/mmlu_no_train",
1865
+ "dataset_name": "miscellaneous",
1866
+ "test_split": "test",
1867
+ "fewshot_split": "dev",
1868
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1869
+ "doc_to_target": "answer",
1870
+ "doc_to_choice": [
1871
+ "A",
1872
+ "B",
1873
+ "C",
1874
+ "D"
1875
+ ],
1876
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
1877
+ "target_delimiter": " ",
1878
+ "fewshot_delimiter": "\n\n",
1879
+ "fewshot_config": {
1880
+ "sampler": "first_n"
1881
+ },
1882
+ "metric_list": [
1883
+ {
1884
+ "metric": "acc",
1885
+ "aggregation": "mean",
1886
+ "higher_is_better": true
1887
+ }
1888
+ ],
1889
+ "output_type": "multiple_choice",
1890
+ "repeats": 1,
1891
+ "should_decontaminate": false,
1892
+ "metadata": {
1893
+ "version": 0.0
1894
+ }
1895
+ },
1896
+ "mmlu_moral_disputes": {
1897
+ "task": "mmlu_moral_disputes",
1898
+ "task_alias": "moral_disputes",
1899
+ "group": "mmlu_humanities",
1900
+ "group_alias": "humanities",
1901
+ "dataset_path": "hails/mmlu_no_train",
1902
+ "dataset_name": "moral_disputes",
1903
+ "test_split": "test",
1904
+ "fewshot_split": "dev",
1905
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1906
+ "doc_to_target": "answer",
1907
+ "doc_to_choice": [
1908
+ "A",
1909
+ "B",
1910
+ "C",
1911
+ "D"
1912
+ ],
1913
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
1914
+ "target_delimiter": " ",
1915
+ "fewshot_delimiter": "\n\n",
1916
+ "fewshot_config": {
1917
+ "sampler": "first_n"
1918
+ },
1919
+ "metric_list": [
1920
+ {
1921
+ "metric": "acc",
1922
+ "aggregation": "mean",
1923
+ "higher_is_better": true
1924
+ }
1925
+ ],
1926
+ "output_type": "multiple_choice",
1927
+ "repeats": 1,
1928
+ "should_decontaminate": false,
1929
+ "metadata": {
1930
+ "version": 0.0
1931
+ }
1932
+ },
1933
+ "mmlu_moral_scenarios": {
1934
+ "task": "mmlu_moral_scenarios",
1935
+ "task_alias": "moral_scenarios",
1936
+ "group": "mmlu_humanities",
1937
+ "group_alias": "humanities",
1938
+ "dataset_path": "hails/mmlu_no_train",
1939
+ "dataset_name": "moral_scenarios",
1940
+ "test_split": "test",
1941
+ "fewshot_split": "dev",
1942
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1943
+ "doc_to_target": "answer",
1944
+ "doc_to_choice": [
1945
+ "A",
1946
+ "B",
1947
+ "C",
1948
+ "D"
1949
+ ],
1950
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
1951
+ "target_delimiter": " ",
1952
+ "fewshot_delimiter": "\n\n",
1953
+ "fewshot_config": {
1954
+ "sampler": "first_n"
1955
+ },
1956
+ "metric_list": [
1957
+ {
1958
+ "metric": "acc",
1959
+ "aggregation": "mean",
1960
+ "higher_is_better": true
1961
+ }
1962
+ ],
1963
+ "output_type": "multiple_choice",
1964
+ "repeats": 1,
1965
+ "should_decontaminate": false,
1966
+ "metadata": {
1967
+ "version": 0.0
1968
+ }
1969
+ },
1970
+ "mmlu_nutrition": {
1971
+ "task": "mmlu_nutrition",
1972
+ "task_alias": "nutrition",
1973
+ "group": "mmlu_other",
1974
+ "group_alias": "other",
1975
+ "dataset_path": "hails/mmlu_no_train",
1976
+ "dataset_name": "nutrition",
1977
+ "test_split": "test",
1978
+ "fewshot_split": "dev",
1979
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
1980
+ "doc_to_target": "answer",
1981
+ "doc_to_choice": [
1982
+ "A",
1983
+ "B",
1984
+ "C",
1985
+ "D"
1986
+ ],
1987
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
1988
+ "target_delimiter": " ",
1989
+ "fewshot_delimiter": "\n\n",
1990
+ "fewshot_config": {
1991
+ "sampler": "first_n"
1992
+ },
1993
+ "metric_list": [
1994
+ {
1995
+ "metric": "acc",
1996
+ "aggregation": "mean",
1997
+ "higher_is_better": true
1998
+ }
1999
+ ],
2000
+ "output_type": "multiple_choice",
2001
+ "repeats": 1,
2002
+ "should_decontaminate": false,
2003
+ "metadata": {
2004
+ "version": 0.0
2005
+ }
2006
+ },
2007
+ "mmlu_philosophy": {
2008
+ "task": "mmlu_philosophy",
2009
+ "task_alias": "philosophy",
2010
+ "group": "mmlu_humanities",
2011
+ "group_alias": "humanities",
2012
+ "dataset_path": "hails/mmlu_no_train",
2013
+ "dataset_name": "philosophy",
2014
+ "test_split": "test",
2015
+ "fewshot_split": "dev",
2016
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2017
+ "doc_to_target": "answer",
2018
+ "doc_to_choice": [
2019
+ "A",
2020
+ "B",
2021
+ "C",
2022
+ "D"
2023
+ ],
2024
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
2025
+ "target_delimiter": " ",
2026
+ "fewshot_delimiter": "\n\n",
2027
+ "fewshot_config": {
2028
+ "sampler": "first_n"
2029
+ },
2030
+ "metric_list": [
2031
+ {
2032
+ "metric": "acc",
2033
+ "aggregation": "mean",
2034
+ "higher_is_better": true
2035
+ }
2036
+ ],
2037
+ "output_type": "multiple_choice",
2038
+ "repeats": 1,
2039
+ "should_decontaminate": false,
2040
+ "metadata": {
2041
+ "version": 0.0
2042
+ }
2043
+ },
2044
+ "mmlu_prehistory": {
2045
+ "task": "mmlu_prehistory",
2046
+ "task_alias": "prehistory",
2047
+ "group": "mmlu_humanities",
2048
+ "group_alias": "humanities",
2049
+ "dataset_path": "hails/mmlu_no_train",
2050
+ "dataset_name": "prehistory",
2051
+ "test_split": "test",
2052
+ "fewshot_split": "dev",
2053
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2054
+ "doc_to_target": "answer",
2055
+ "doc_to_choice": [
2056
+ "A",
2057
+ "B",
2058
+ "C",
2059
+ "D"
2060
+ ],
2061
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
2062
+ "target_delimiter": " ",
2063
+ "fewshot_delimiter": "\n\n",
2064
+ "fewshot_config": {
2065
+ "sampler": "first_n"
2066
+ },
2067
+ "metric_list": [
2068
+ {
2069
+ "metric": "acc",
2070
+ "aggregation": "mean",
2071
+ "higher_is_better": true
2072
+ }
2073
+ ],
2074
+ "output_type": "multiple_choice",
2075
+ "repeats": 1,
2076
+ "should_decontaminate": false,
2077
+ "metadata": {
2078
+ "version": 0.0
2079
+ }
2080
+ },
2081
+ "mmlu_professional_accounting": {
2082
+ "task": "mmlu_professional_accounting",
2083
+ "task_alias": "professional_accounting",
2084
+ "group": "mmlu_other",
2085
+ "group_alias": "other",
2086
+ "dataset_path": "hails/mmlu_no_train",
2087
+ "dataset_name": "professional_accounting",
2088
+ "test_split": "test",
2089
+ "fewshot_split": "dev",
2090
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2091
+ "doc_to_target": "answer",
2092
+ "doc_to_choice": [
2093
+ "A",
2094
+ "B",
2095
+ "C",
2096
+ "D"
2097
+ ],
2098
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
2099
+ "target_delimiter": " ",
2100
+ "fewshot_delimiter": "\n\n",
2101
+ "fewshot_config": {
2102
+ "sampler": "first_n"
2103
+ },
2104
+ "metric_list": [
2105
+ {
2106
+ "metric": "acc",
2107
+ "aggregation": "mean",
2108
+ "higher_is_better": true
2109
+ }
2110
+ ],
2111
+ "output_type": "multiple_choice",
2112
+ "repeats": 1,
2113
+ "should_decontaminate": false,
2114
+ "metadata": {
2115
+ "version": 0.0
2116
+ }
2117
+ },
2118
+ "mmlu_professional_law": {
2119
+ "task": "mmlu_professional_law",
2120
+ "task_alias": "professional_law",
2121
+ "group": "mmlu_humanities",
2122
+ "group_alias": "humanities",
2123
+ "dataset_path": "hails/mmlu_no_train",
2124
+ "dataset_name": "professional_law",
2125
+ "test_split": "test",
2126
+ "fewshot_split": "dev",
2127
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2128
+ "doc_to_target": "answer",
2129
+ "doc_to_choice": [
2130
+ "A",
2131
+ "B",
2132
+ "C",
2133
+ "D"
2134
+ ],
2135
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
2136
+ "target_delimiter": " ",
2137
+ "fewshot_delimiter": "\n\n",
2138
+ "fewshot_config": {
2139
+ "sampler": "first_n"
2140
+ },
2141
+ "metric_list": [
2142
+ {
2143
+ "metric": "acc",
2144
+ "aggregation": "mean",
2145
+ "higher_is_better": true
2146
+ }
2147
+ ],
2148
+ "output_type": "multiple_choice",
2149
+ "repeats": 1,
2150
+ "should_decontaminate": false,
2151
+ "metadata": {
2152
+ "version": 0.0
2153
+ }
2154
+ },
2155
+ "mmlu_professional_medicine": {
2156
+ "task": "mmlu_professional_medicine",
2157
+ "task_alias": "professional_medicine",
2158
+ "group": "mmlu_other",
2159
+ "group_alias": "other",
2160
+ "dataset_path": "hails/mmlu_no_train",
2161
+ "dataset_name": "professional_medicine",
2162
+ "test_split": "test",
2163
+ "fewshot_split": "dev",
2164
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2165
+ "doc_to_target": "answer",
2166
+ "doc_to_choice": [
2167
+ "A",
2168
+ "B",
2169
+ "C",
2170
+ "D"
2171
+ ],
2172
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
2173
+ "target_delimiter": " ",
2174
+ "fewshot_delimiter": "\n\n",
2175
+ "fewshot_config": {
2176
+ "sampler": "first_n"
2177
+ },
2178
+ "metric_list": [
2179
+ {
2180
+ "metric": "acc",
2181
+ "aggregation": "mean",
2182
+ "higher_is_better": true
2183
+ }
2184
+ ],
2185
+ "output_type": "multiple_choice",
2186
+ "repeats": 1,
2187
+ "should_decontaminate": false,
2188
+ "metadata": {
2189
+ "version": 0.0
2190
+ }
2191
+ },
2192
+ "mmlu_professional_psychology": {
2193
+ "task": "mmlu_professional_psychology",
2194
+ "task_alias": "professional_psychology",
2195
+ "group": "mmlu_social_sciences",
2196
+ "group_alias": "social_sciences",
2197
+ "dataset_path": "hails/mmlu_no_train",
2198
+ "dataset_name": "professional_psychology",
2199
+ "test_split": "test",
2200
+ "fewshot_split": "dev",
2201
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2202
+ "doc_to_target": "answer",
2203
+ "doc_to_choice": [
2204
+ "A",
2205
+ "B",
2206
+ "C",
2207
+ "D"
2208
+ ],
2209
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
2210
+ "target_delimiter": " ",
2211
+ "fewshot_delimiter": "\n\n",
2212
+ "fewshot_config": {
2213
+ "sampler": "first_n"
2214
+ },
2215
+ "metric_list": [
2216
+ {
2217
+ "metric": "acc",
2218
+ "aggregation": "mean",
2219
+ "higher_is_better": true
2220
+ }
2221
+ ],
2222
+ "output_type": "multiple_choice",
2223
+ "repeats": 1,
2224
+ "should_decontaminate": false,
2225
+ "metadata": {
2226
+ "version": 0.0
2227
+ }
2228
+ },
2229
+ "mmlu_public_relations": {
2230
+ "task": "mmlu_public_relations",
2231
+ "task_alias": "public_relations",
2232
+ "group": "mmlu_social_sciences",
2233
+ "group_alias": "social_sciences",
2234
+ "dataset_path": "hails/mmlu_no_train",
2235
+ "dataset_name": "public_relations",
2236
+ "test_split": "test",
2237
+ "fewshot_split": "dev",
2238
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2239
+ "doc_to_target": "answer",
2240
+ "doc_to_choice": [
2241
+ "A",
2242
+ "B",
2243
+ "C",
2244
+ "D"
2245
+ ],
2246
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
2247
+ "target_delimiter": " ",
2248
+ "fewshot_delimiter": "\n\n",
2249
+ "fewshot_config": {
2250
+ "sampler": "first_n"
2251
+ },
2252
+ "metric_list": [
2253
+ {
2254
+ "metric": "acc",
2255
+ "aggregation": "mean",
2256
+ "higher_is_better": true
2257
+ }
2258
+ ],
2259
+ "output_type": "multiple_choice",
2260
+ "repeats": 1,
2261
+ "should_decontaminate": false,
2262
+ "metadata": {
2263
+ "version": 0.0
2264
+ }
2265
+ },
2266
+ "mmlu_security_studies": {
2267
+ "task": "mmlu_security_studies",
2268
+ "task_alias": "security_studies",
2269
+ "group": "mmlu_social_sciences",
2270
+ "group_alias": "social_sciences",
2271
+ "dataset_path": "hails/mmlu_no_train",
2272
+ "dataset_name": "security_studies",
2273
+ "test_split": "test",
2274
+ "fewshot_split": "dev",
2275
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2276
+ "doc_to_target": "answer",
2277
+ "doc_to_choice": [
2278
+ "A",
2279
+ "B",
2280
+ "C",
2281
+ "D"
2282
+ ],
2283
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
2284
+ "target_delimiter": " ",
2285
+ "fewshot_delimiter": "\n\n",
2286
+ "fewshot_config": {
2287
+ "sampler": "first_n"
2288
+ },
2289
+ "metric_list": [
2290
+ {
2291
+ "metric": "acc",
2292
+ "aggregation": "mean",
2293
+ "higher_is_better": true
2294
+ }
2295
+ ],
2296
+ "output_type": "multiple_choice",
2297
+ "repeats": 1,
2298
+ "should_decontaminate": false,
2299
+ "metadata": {
2300
+ "version": 0.0
2301
+ }
2302
+ },
2303
+ "mmlu_sociology": {
2304
+ "task": "mmlu_sociology",
2305
+ "task_alias": "sociology",
2306
+ "group": "mmlu_social_sciences",
2307
+ "group_alias": "social_sciences",
2308
+ "dataset_path": "hails/mmlu_no_train",
2309
+ "dataset_name": "sociology",
2310
+ "test_split": "test",
2311
+ "fewshot_split": "dev",
2312
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2313
+ "doc_to_target": "answer",
2314
+ "doc_to_choice": [
2315
+ "A",
2316
+ "B",
2317
+ "C",
2318
+ "D"
2319
+ ],
2320
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
2321
+ "target_delimiter": " ",
2322
+ "fewshot_delimiter": "\n\n",
2323
+ "fewshot_config": {
2324
+ "sampler": "first_n"
2325
+ },
2326
+ "metric_list": [
2327
+ {
2328
+ "metric": "acc",
2329
+ "aggregation": "mean",
2330
+ "higher_is_better": true
2331
+ }
2332
+ ],
2333
+ "output_type": "multiple_choice",
2334
+ "repeats": 1,
2335
+ "should_decontaminate": false,
2336
+ "metadata": {
2337
+ "version": 0.0
2338
+ }
2339
+ },
2340
+ "mmlu_us_foreign_policy": {
2341
+ "task": "mmlu_us_foreign_policy",
2342
+ "task_alias": "us_foreign_policy",
2343
+ "group": "mmlu_social_sciences",
2344
+ "group_alias": "social_sciences",
2345
+ "dataset_path": "hails/mmlu_no_train",
2346
+ "dataset_name": "us_foreign_policy",
2347
+ "test_split": "test",
2348
+ "fewshot_split": "dev",
2349
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2350
+ "doc_to_target": "answer",
2351
+ "doc_to_choice": [
2352
+ "A",
2353
+ "B",
2354
+ "C",
2355
+ "D"
2356
+ ],
2357
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
2358
+ "target_delimiter": " ",
2359
+ "fewshot_delimiter": "\n\n",
2360
+ "fewshot_config": {
2361
+ "sampler": "first_n"
2362
+ },
2363
+ "metric_list": [
2364
+ {
2365
+ "metric": "acc",
2366
+ "aggregation": "mean",
2367
+ "higher_is_better": true
2368
+ }
2369
+ ],
2370
+ "output_type": "multiple_choice",
2371
+ "repeats": 1,
2372
+ "should_decontaminate": false,
2373
+ "metadata": {
2374
+ "version": 0.0
2375
+ }
2376
+ },
2377
+ "mmlu_virology": {
2378
+ "task": "mmlu_virology",
2379
+ "task_alias": "virology",
2380
+ "group": "mmlu_other",
2381
+ "group_alias": "other",
2382
+ "dataset_path": "hails/mmlu_no_train",
2383
+ "dataset_name": "virology",
2384
+ "test_split": "test",
2385
+ "fewshot_split": "dev",
2386
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2387
+ "doc_to_target": "answer",
2388
+ "doc_to_choice": [
2389
+ "A",
2390
+ "B",
2391
+ "C",
2392
+ "D"
2393
+ ],
2394
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
2395
+ "target_delimiter": " ",
2396
+ "fewshot_delimiter": "\n\n",
2397
+ "fewshot_config": {
2398
+ "sampler": "first_n"
2399
+ },
2400
+ "metric_list": [
2401
+ {
2402
+ "metric": "acc",
2403
+ "aggregation": "mean",
2404
+ "higher_is_better": true
2405
+ }
2406
+ ],
2407
+ "output_type": "multiple_choice",
2408
+ "repeats": 1,
2409
+ "should_decontaminate": false,
2410
+ "metadata": {
2411
+ "version": 0.0
2412
+ }
2413
+ },
2414
+ "mmlu_world_religions": {
2415
+ "task": "mmlu_world_religions",
2416
+ "task_alias": "world_religions",
2417
+ "group": "mmlu_humanities",
2418
+ "group_alias": "humanities",
2419
+ "dataset_path": "hails/mmlu_no_train",
2420
+ "dataset_name": "world_religions",
2421
+ "test_split": "test",
2422
+ "fewshot_split": "dev",
2423
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
2424
+ "doc_to_target": "answer",
2425
+ "doc_to_choice": [
2426
+ "A",
2427
+ "B",
2428
+ "C",
2429
+ "D"
2430
+ ],
2431
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
2432
+ "target_delimiter": " ",
2433
+ "fewshot_delimiter": "\n\n",
2434
+ "fewshot_config": {
2435
+ "sampler": "first_n"
2436
+ },
2437
+ "metric_list": [
2438
+ {
2439
+ "metric": "acc",
2440
+ "aggregation": "mean",
2441
+ "higher_is_better": true
2442
+ }
2443
+ ],
2444
+ "output_type": "multiple_choice",
2445
+ "repeats": 1,
2446
+ "should_decontaminate": false,
2447
+ "metadata": {
2448
+ "version": 0.0
2449
+ }
2450
+ }
2451
+ },
2452
+ "versions": {
2453
+ "mmlu": "N/A",
2454
+ "mmlu_abstract_algebra": 0.0,
2455
+ "mmlu_anatomy": 0.0,
2456
+ "mmlu_astronomy": 0.0,
2457
+ "mmlu_business_ethics": 0.0,
2458
+ "mmlu_clinical_knowledge": 0.0,
2459
+ "mmlu_college_biology": 0.0,
2460
+ "mmlu_college_chemistry": 0.0,
2461
+ "mmlu_college_computer_science": 0.0,
2462
+ "mmlu_college_mathematics": 0.0,
2463
+ "mmlu_college_medicine": 0.0,
2464
+ "mmlu_college_physics": 0.0,
2465
+ "mmlu_computer_security": 0.0,
2466
+ "mmlu_conceptual_physics": 0.0,
2467
+ "mmlu_econometrics": 0.0,
2468
+ "mmlu_electrical_engineering": 0.0,
2469
+ "mmlu_elementary_mathematics": 0.0,
2470
+ "mmlu_formal_logic": 0.0,
2471
+ "mmlu_global_facts": 0.0,
2472
+ "mmlu_high_school_biology": 0.0,
2473
+ "mmlu_high_school_chemistry": 0.0,
2474
+ "mmlu_high_school_computer_science": 0.0,
2475
+ "mmlu_high_school_european_history": 0.0,
2476
+ "mmlu_high_school_geography": 0.0,
2477
+ "mmlu_high_school_government_and_politics": 0.0,
2478
+ "mmlu_high_school_macroeconomics": 0.0,
2479
+ "mmlu_high_school_mathematics": 0.0,
2480
+ "mmlu_high_school_microeconomics": 0.0,
2481
+ "mmlu_high_school_physics": 0.0,
2482
+ "mmlu_high_school_psychology": 0.0,
2483
+ "mmlu_high_school_statistics": 0.0,
2484
+ "mmlu_high_school_us_history": 0.0,
2485
+ "mmlu_high_school_world_history": 0.0,
2486
+ "mmlu_human_aging": 0.0,
2487
+ "mmlu_human_sexuality": 0.0,
2488
+ "mmlu_humanities": "N/A",
2489
+ "mmlu_international_law": 0.0,
2490
+ "mmlu_jurisprudence": 0.0,
2491
+ "mmlu_logical_fallacies": 0.0,
2492
+ "mmlu_machine_learning": 0.0,
2493
+ "mmlu_management": 0.0,
2494
+ "mmlu_marketing": 0.0,
2495
+ "mmlu_medical_genetics": 0.0,
2496
+ "mmlu_miscellaneous": 0.0,
2497
+ "mmlu_moral_disputes": 0.0,
2498
+ "mmlu_moral_scenarios": 0.0,
2499
+ "mmlu_nutrition": 0.0,
2500
+ "mmlu_other": "N/A",
2501
+ "mmlu_philosophy": 0.0,
2502
+ "mmlu_prehistory": 0.0,
2503
+ "mmlu_professional_accounting": 0.0,
2504
+ "mmlu_professional_law": 0.0,
2505
+ "mmlu_professional_medicine": 0.0,
2506
+ "mmlu_professional_psychology": 0.0,
2507
+ "mmlu_public_relations": 0.0,
2508
+ "mmlu_security_studies": 0.0,
2509
+ "mmlu_social_sciences": "N/A",
2510
+ "mmlu_sociology": 0.0,
2511
+ "mmlu_stem": "N/A",
2512
+ "mmlu_us_foreign_policy": 0.0,
2513
+ "mmlu_virology": 0.0,
2514
+ "mmlu_world_religions": 0.0
2515
+ },
2516
+ "n-shot": {
2517
+ "mmlu": 0,
2518
+ "mmlu_abstract_algebra": 0,
2519
+ "mmlu_anatomy": 0,
2520
+ "mmlu_astronomy": 0,
2521
+ "mmlu_business_ethics": 0,
2522
+ "mmlu_clinical_knowledge": 0,
2523
+ "mmlu_college_biology": 0,
2524
+ "mmlu_college_chemistry": 0,
2525
+ "mmlu_college_computer_science": 0,
2526
+ "mmlu_college_mathematics": 0,
2527
+ "mmlu_college_medicine": 0,
2528
+ "mmlu_college_physics": 0,
2529
+ "mmlu_computer_security": 0,
2530
+ "mmlu_conceptual_physics": 0,
2531
+ "mmlu_econometrics": 0,
2532
+ "mmlu_electrical_engineering": 0,
2533
+ "mmlu_elementary_mathematics": 0,
2534
+ "mmlu_formal_logic": 0,
2535
+ "mmlu_global_facts": 0,
2536
+ "mmlu_high_school_biology": 0,
2537
+ "mmlu_high_school_chemistry": 0,
2538
+ "mmlu_high_school_computer_science": 0,
2539
+ "mmlu_high_school_european_history": 0,
2540
+ "mmlu_high_school_geography": 0,
2541
+ "mmlu_high_school_government_and_politics": 0,
2542
+ "mmlu_high_school_macroeconomics": 0,
2543
+ "mmlu_high_school_mathematics": 0,
2544
+ "mmlu_high_school_microeconomics": 0,
2545
+ "mmlu_high_school_physics": 0,
2546
+ "mmlu_high_school_psychology": 0,
2547
+ "mmlu_high_school_statistics": 0,
2548
+ "mmlu_high_school_us_history": 0,
2549
+ "mmlu_high_school_world_history": 0,
2550
+ "mmlu_human_aging": 0,
2551
+ "mmlu_human_sexuality": 0,
2552
+ "mmlu_humanities": 0,
2553
+ "mmlu_international_law": 0,
2554
+ "mmlu_jurisprudence": 0,
2555
+ "mmlu_logical_fallacies": 0,
2556
+ "mmlu_machine_learning": 0,
2557
+ "mmlu_management": 0,
2558
+ "mmlu_marketing": 0,
2559
+ "mmlu_medical_genetics": 0,
2560
+ "mmlu_miscellaneous": 0,
2561
+ "mmlu_moral_disputes": 0,
2562
+ "mmlu_moral_scenarios": 0,
2563
+ "mmlu_nutrition": 0,
2564
+ "mmlu_other": 0,
2565
+ "mmlu_philosophy": 0,
2566
+ "mmlu_prehistory": 0,
2567
+ "mmlu_professional_accounting": 0,
2568
+ "mmlu_professional_law": 0,
2569
+ "mmlu_professional_medicine": 0,
2570
+ "mmlu_professional_psychology": 0,
2571
+ "mmlu_public_relations": 0,
2572
+ "mmlu_security_studies": 0,
2573
+ "mmlu_social_sciences": 0,
2574
+ "mmlu_sociology": 0,
2575
+ "mmlu_stem": 0,
2576
+ "mmlu_us_foreign_policy": 0,
2577
+ "mmlu_virology": 0,
2578
+ "mmlu_world_religions": 0
2579
+ },
2580
+ "config": {
2581
+ "model": "hf",
2582
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
2583
+ "batch_size": "auto",
2584
+ "batch_sizes": [
2585
+ 32
2586
+ ],
2587
+ "device": null,
2588
+ "use_cache": null,
2589
+ "limit": null,
2590
+ "bootstrap_iters": 100000,
2591
+ "gen_kwargs": null
2592
+ },
2593
+ "git_hash": "97a2520"
2594
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/mmlu/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34b8fd229d13f74a3422ed44e434abd2e88776291175d8c5a7c54708b41c86b2
3
+ size 96739
lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e4cac2e3eb5f5313dbbeab1135c0391d876f94e964b4efcf714ad237ffa58c
3
+ size 74609
lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "openbookqa": {
4
+ "acc,none": 0.338,
5
+ "acc_stderr,none": 0.021175665695209407,
6
+ "acc_norm,none": 0.45,
7
+ "acc_norm_stderr,none": 0.022270877485360437,
8
+ "alias": "openbookqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "openbookqa": {
13
+ "task": "openbookqa",
14
+ "dataset_path": "openbookqa",
15
+ "dataset_name": "main",
16
+ "training_split": "train",
17
+ "validation_split": "validation",
18
+ "test_split": "test",
19
+ "doc_to_text": "question_stem",
20
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
21
+ "doc_to_choice": "{{choices.text}}",
22
+ "description": "",
23
+ "target_delimiter": " ",
24
+ "fewshot_delimiter": "\n\n",
25
+ "metric_list": [
26
+ {
27
+ "metric": "acc",
28
+ "aggregation": "mean",
29
+ "higher_is_better": true
30
+ },
31
+ {
32
+ "metric": "acc_norm",
33
+ "aggregation": "mean",
34
+ "higher_is_better": true
35
+ }
36
+ ],
37
+ "output_type": "multiple_choice",
38
+ "repeats": 1,
39
+ "should_decontaminate": true,
40
+ "doc_to_decontamination_query": "question_stem",
41
+ "metadata": {
42
+ "version": 1.0
43
+ }
44
+ }
45
+ },
46
+ "versions": {
47
+ "openbookqa": 1.0
48
+ },
49
+ "n-shot": {
50
+ "openbookqa": 0
51
+ },
52
+ "config": {
53
+ "model": "hf",
54
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
55
+ "batch_size": "auto",
56
+ "batch_sizes": [
57
+ 64
58
+ ],
59
+ "device": null,
60
+ "use_cache": null,
61
+ "limit": null,
62
+ "bootstrap_iters": 100000,
63
+ "gen_kwargs": null
64
+ },
65
+ "git_hash": "97a2520"
66
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/openbookqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de1ff1e7e313623869607929227ad148798b80bf380db5bfdc410f1de8641032
3
+ size 12033
lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c871153c88d9aad6aaaea9e3d70b967443f706fec60b0664f5be0f0cec7a31ef
3
+ size 2133413
lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "pawsx": {
4
+ "acc,none": 0.43635714285714283,
5
+ "acc_stderr,none": 0.05805845343398072,
6
+ "alias": "pawsx"
7
+ },
8
+ "paws_de": {
9
+ "acc,none": 0.416,
10
+ "acc_stderr,none": 0.011024190055654281,
11
+ "alias": " - paws_de"
12
+ },
13
+ "paws_en": {
14
+ "acc,none": 0.336,
15
+ "acc_stderr,none": 0.010564459470410665,
16
+ "alias": " - paws_en"
17
+ },
18
+ "paws_es": {
19
+ "acc,none": 0.351,
20
+ "acc_stderr,none": 0.010675039964286672,
21
+ "alias": " - paws_es"
22
+ },
23
+ "paws_fr": {
24
+ "acc,none": 0.5415,
25
+ "acc_stderr,none": 0.011144549137930353,
26
+ "alias": " - paws_fr"
27
+ },
28
+ "paws_ja": {
29
+ "acc,none": 0.52,
30
+ "acc_stderr,none": 0.011174185930778312,
31
+ "alias": " - paws_ja"
32
+ },
33
+ "paws_ko": {
34
+ "acc,none": 0.4495,
35
+ "acc_stderr,none": 0.011125950223877365,
36
+ "alias": " - paws_ko"
37
+ },
38
+ "paws_zh": {
39
+ "acc,none": 0.4405,
40
+ "acc_stderr,none": 0.011103671499120343,
41
+ "alias": " - paws_zh"
42
+ }
43
+ },
44
+ "groups": {
45
+ "pawsx": {
46
+ "acc,none": 0.43635714285714283,
47
+ "acc_stderr,none": 0.05805845343398072,
48
+ "alias": "pawsx"
49
+ }
50
+ },
51
+ "configs": {
52
+ "paws_de": {
53
+ "task": "paws_de",
54
+ "group": "pawsx",
55
+ "dataset_path": "paws-x",
56
+ "dataset_name": "de",
57
+ "training_split": "train",
58
+ "validation_split": "validation",
59
+ "test_split": "test",
60
+ "doc_to_text": "",
61
+ "doc_to_target": "label",
62
+ "doc_to_choice": "{{[sentence1+\", richtig? Ja, \"+sentence2, sentence1+\", richtig? Nein, \"+sentence2]}}",
63
+ "description": "",
64
+ "target_delimiter": " ",
65
+ "fewshot_delimiter": "\n\n",
66
+ "metric_list": [
67
+ {
68
+ "metric": "acc",
69
+ "aggregation": "mean",
70
+ "higher_is_better": true
71
+ }
72
+ ],
73
+ "output_type": "multiple_choice",
74
+ "repeats": 1,
75
+ "should_decontaminate": false,
76
+ "metadata": {
77
+ "version": 0.0
78
+ }
79
+ },
80
+ "paws_en": {
81
+ "task": "paws_en",
82
+ "group": "pawsx",
83
+ "dataset_path": "paws-x",
84
+ "dataset_name": "en",
85
+ "training_split": "train",
86
+ "validation_split": "validation",
87
+ "test_split": "test",
88
+ "doc_to_text": "",
89
+ "doc_to_target": "label",
90
+ "doc_to_choice": "{{[sentence1+\", right? Yes, \"+sentence2, sentence1+\", right? No, \"+sentence2]}}",
91
+ "description": "",
92
+ "target_delimiter": " ",
93
+ "fewshot_delimiter": "\n\n",
94
+ "metric_list": [
95
+ {
96
+ "metric": "acc",
97
+ "aggregation": "mean",
98
+ "higher_is_better": true
99
+ }
100
+ ],
101
+ "output_type": "multiple_choice",
102
+ "repeats": 1,
103
+ "should_decontaminate": false,
104
+ "metadata": {
105
+ "version": 0.0
106
+ }
107
+ },
108
+ "paws_es": {
109
+ "task": "paws_es",
110
+ "group": "pawsx",
111
+ "dataset_path": "paws-x",
112
+ "dataset_name": "es",
113
+ "training_split": "train",
114
+ "validation_split": "validation",
115
+ "test_split": "test",
116
+ "doc_to_text": "",
117
+ "doc_to_target": "label",
118
+ "doc_to_choice": "{{[sentence1+\", verdad? Sí, \"+sentence2, sentence1+\", verdad? No, \"+sentence2]}}",
119
+ "description": "",
120
+ "target_delimiter": " ",
121
+ "fewshot_delimiter": "\n\n",
122
+ "metric_list": [
123
+ {
124
+ "metric": "acc",
125
+ "aggregation": "mean",
126
+ "higher_is_better": true
127
+ }
128
+ ],
129
+ "output_type": "multiple_choice",
130
+ "repeats": 1,
131
+ "should_decontaminate": false,
132
+ "metadata": {
133
+ "version": 0.0
134
+ }
135
+ },
136
+ "paws_fr": {
137
+ "task": "paws_fr",
138
+ "group": "pawsx",
139
+ "dataset_path": "paws-x",
140
+ "dataset_name": "fr",
141
+ "training_split": "train",
142
+ "validation_split": "validation",
143
+ "test_split": "test",
144
+ "doc_to_text": "",
145
+ "doc_to_target": "label",
146
+ "doc_to_choice": "{{[sentence1+\", n'est-ce pas? Oui, \"+sentence2, sentence1+\", n'est-ce pas? No, \"+sentence2]}}",
147
+ "description": "",
148
+ "target_delimiter": " ",
149
+ "fewshot_delimiter": "\n\n",
150
+ "metric_list": [
151
+ {
152
+ "metric": "acc",
153
+ "aggregation": "mean",
154
+ "higher_is_better": true
155
+ }
156
+ ],
157
+ "output_type": "multiple_choice",
158
+ "repeats": 1,
159
+ "should_decontaminate": false,
160
+ "metadata": {
161
+ "version": 0.0
162
+ }
163
+ },
164
+ "paws_ja": {
165
+ "task": "paws_ja",
166
+ "group": "pawsx",
167
+ "dataset_path": "paws-x",
168
+ "dataset_name": "ja",
169
+ "training_split": "train",
170
+ "validation_split": "validation",
171
+ "test_split": "test",
172
+ "doc_to_text": "",
173
+ "doc_to_target": "label",
174
+ "doc_to_choice": "{{[sentence1+\", ですね? はい, \"+sentence2, sentence1+\", ですね? いいえ, \"+sentence2]}}",
175
+ "description": "",
176
+ "target_delimiter": " ",
177
+ "fewshot_delimiter": "\n\n",
178
+ "metric_list": [
179
+ {
180
+ "metric": "acc",
181
+ "aggregation": "mean",
182
+ "higher_is_better": true
183
+ }
184
+ ],
185
+ "output_type": "multiple_choice",
186
+ "repeats": 1,
187
+ "should_decontaminate": false,
188
+ "metadata": {
189
+ "version": 0.0
190
+ }
191
+ },
192
+ "paws_ko": {
193
+ "task": "paws_ko",
194
+ "group": "pawsx",
195
+ "dataset_path": "paws-x",
196
+ "dataset_name": "ko",
197
+ "training_split": "train",
198
+ "validation_split": "validation",
199
+ "test_split": "test",
200
+ "doc_to_text": "",
201
+ "doc_to_target": "label",
202
+ "doc_to_choice": "{{[sentence1+\", 맞죠? 예, \"+sentence2, sentence1+\", 맞죠? 아니요, \"+sentence2]}}",
203
+ "description": "",
204
+ "target_delimiter": " ",
205
+ "fewshot_delimiter": "\n\n",
206
+ "metric_list": [
207
+ {
208
+ "metric": "acc",
209
+ "aggregation": "mean",
210
+ "higher_is_better": true
211
+ }
212
+ ],
213
+ "output_type": "multiple_choice",
214
+ "repeats": 1,
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 0.0
218
+ }
219
+ },
220
+ "paws_zh": {
221
+ "task": "paws_zh",
222
+ "group": "pawsx",
223
+ "dataset_path": "paws-x",
224
+ "dataset_name": "zh",
225
+ "training_split": "train",
226
+ "validation_split": "validation",
227
+ "test_split": "test",
228
+ "doc_to_text": "",
229
+ "doc_to_target": "label",
230
+ "doc_to_choice": "{{[sentence1+\", 对吧? 是, \"+sentence2, sentence1+\", 对吧? 不是, \"+sentence2]}}",
231
+ "description": "",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "metric_list": [
235
+ {
236
+ "metric": "acc",
237
+ "aggregation": "mean",
238
+ "higher_is_better": true
239
+ }
240
+ ],
241
+ "output_type": "multiple_choice",
242
+ "repeats": 1,
243
+ "should_decontaminate": false,
244
+ "metadata": {
245
+ "version": 0.0
246
+ }
247
+ }
248
+ },
249
+ "versions": {
250
+ "paws_de": 0.0,
251
+ "paws_en": 0.0,
252
+ "paws_es": 0.0,
253
+ "paws_fr": 0.0,
254
+ "paws_ja": 0.0,
255
+ "paws_ko": 0.0,
256
+ "paws_zh": 0.0,
257
+ "pawsx": "N/A"
258
+ },
259
+ "n-shot": {
260
+ "paws_de": 0,
261
+ "paws_en": 0,
262
+ "paws_es": 0,
263
+ "paws_fr": 0,
264
+ "paws_ja": 0,
265
+ "paws_ko": 0,
266
+ "paws_zh": 0,
267
+ "pawsx": 0
268
+ },
269
+ "config": {
270
+ "model": "hf",
271
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
272
+ "batch_size": "auto",
273
+ "batch_sizes": [
274
+ 64
275
+ ],
276
+ "device": null,
277
+ "use_cache": null,
278
+ "limit": null,
279
+ "bootstrap_iters": 100000,
280
+ "gen_kwargs": null
281
+ },
282
+ "git_hash": "97a2520"
283
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/pawsx/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8ee152c496a75dda2bca1e03f0e11cb5f1f70d26c6136b6d1cc3aea3ff4d4b5
3
+ size 28205
lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9314e122db708bbb9824245e8e1d629e68ff805c6fca1c62c1ccabb67d107c29
3
+ size 238859
lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "piqa": {
4
+ "acc,none": 0.8025027203482046,
5
+ "acc_stderr,none": 0.00928857810852327,
6
+ "acc_norm,none": 0.8035908596300326,
7
+ "acc_norm_stderr,none": 0.00926923223767992,
8
+ "alias": "piqa"
9
+ }
10
+ },
11
+ "configs": {
12
+ "piqa": {
13
+ "task": "piqa",
14
+ "dataset_path": "piqa",
15
+ "training_split": "train",
16
+ "validation_split": "validation",
17
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
18
+ "doc_to_target": "label",
19
+ "doc_to_choice": "{{[sol1, sol2]}}",
20
+ "description": "",
21
+ "target_delimiter": " ",
22
+ "fewshot_delimiter": "\n\n",
23
+ "metric_list": [
24
+ {
25
+ "metric": "acc",
26
+ "aggregation": "mean",
27
+ "higher_is_better": true
28
+ },
29
+ {
30
+ "metric": "acc_norm",
31
+ "aggregation": "mean",
32
+ "higher_is_better": true
33
+ }
34
+ ],
35
+ "output_type": "multiple_choice",
36
+ "repeats": 1,
37
+ "should_decontaminate": true,
38
+ "doc_to_decontamination_query": "goal",
39
+ "metadata": {
40
+ "version": 1.0
41
+ }
42
+ }
43
+ },
44
+ "versions": {
45
+ "piqa": 1.0
46
+ },
47
+ "n-shot": {
48
+ "piqa": 0
49
+ },
50
+ "config": {
51
+ "model": "hf",
52
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
53
+ "batch_size": "auto",
54
+ "batch_sizes": [
55
+ 64
56
+ ],
57
+ "device": null,
58
+ "use_cache": null,
59
+ "limit": null,
60
+ "bootstrap_iters": 100000,
61
+ "gen_kwargs": null
62
+ },
63
+ "git_hash": "97a2520"
64
+ }
lm-eval-output/m8than/Finch-14B-Continued-10/piqa/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4248118222c9a95807af5f276617900a69a01ca5ea8eb4f8b3756d4c8cdc8857
3
+ size 16359
lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e98b55c12181af66586f54f408411af0a07b71f8c7bd59c332d2feb1cde5a4
3
+ size 11980040
lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
The diff for this file is too large to render. See raw diff
 
lm-eval-output/m8than/Finch-14B-Continued-10/pythia/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/taskrun.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4d055747640f9c02bfe93fcd367e27dfd0f7edd040225a5397f3857f40aaa8
3
+ size 437076
lm-eval-output/m8than/Finch-14B-Continued-10/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/result-jsonl.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39add6cd660c1d2cb82f0e8f2ca1956cc6d9c161f0994939a7be74a0bb7942fa
3
+ size 11106481
lm-eval-output/m8than/Finch-14B-Continued-10/record/dtype=bfloat16,trust_remote_code=True-num_fewshot=-1-nvidia-gpu/results.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "record": {
4
+ "f1,none": 0.2822200002551079,
5
+ "f1_stderr,none": 0.004461487034085861,
6
+ "em,none": 0.272,
7
+ "em_stderr,none": 0.004450121386888205,
8
+ "alias": "record"
9
+ }
10
+ },
11
+ "configs": {
12
+ "record": {
13
+ "task": "record",
14
+ "group": [
15
+ "super-glue-lm-eval-v1"
16
+ ],
17
+ "dataset_path": "super_glue",
18
+ "dataset_name": "record",
19
+ "training_split": "train",
20
+ "validation_split": "validation",
21
+ "doc_to_text": "def doc_to_text(doc):\n initial_text, *highlights = doc[\"passage\"].strip().split(\"\\n@highlight\\n\")\n text = initial_text + \"\\n\\n\"\n for highlight in highlights:\n text += f\" - {highlight}.\\n\"\n return text\n",
22
+ "doc_to_target": "{{answers}}",
23
+ "doc_to_choice": "{{entities}}",
24
+ "process_results": "def process_results(doc, results):\n # ReCoRD's evaluation is actually deceptively simple:\n # - Pick the maximum likelihood prediction entity\n # - Evaluate the accuracy and token F1 PER EXAMPLE\n # - Average over all examples\n max_idx = np.argmax(np.array([result[0] for result in results]))\n\n prediction = doc[\"entities\"][max_idx]\n gold_label_set = doc[\"answers\"]\n f1 = metric_max_over_ground_truths(\n squad_metrics.compute_f1, prediction, gold_label_set\n )\n em = metric_max_over_ground_truths(\n squad_metrics.compute_exact, prediction, gold_label_set\n )\n\n return {\n \"f1\": f1,\n \"em\": em,\n }\n",
25
+ "description": "",
26
+ "target_delimiter": " ",
27
+ "fewshot_delimiter": "\n\n",
28
+ "metric_list": [
29
+ {
30
+ "metric": "f1",
31
+ "aggregation": "mean"
32
+ },
33
+ {
34
+ "metric": "em",
35
+ "higher_is_better": true,
36
+ "aggregation": "mean"
37
+ }
38
+ ],
39
+ "output_type": "multiple_choice",
40
+ "repeats": 1,
41
+ "should_decontaminate": false,
42
+ "metadata": {
43
+ "version": 1.0
44
+ }
45
+ }
46
+ },
47
+ "versions": {
48
+ "record": 1.0
49
+ },
50
+ "n-shot": {
51
+ "record": 0
52
+ },
53
+ "config": {
54
+ "model": "hf",
55
+ "model_args": "pretrained=m8than/Finch-14B-Continued-10,dtype=bfloat16,trust_remote_code=True",
56
+ "batch_size": "auto",
57
+ "batch_sizes": [
58
+ 32
59
+ ],
60
+ "device": null,
61
+ "use_cache": null,
62
+ "limit": null,
63
+ "bootstrap_iters": 100000,
64
+ "gen_kwargs": null
65
+ },
66
+ "git_hash": "97a2520"
67
+ }