juewang commited on
Commit
322bda0
1 Parent(s): 1e711fd
notebooks/.ipynb_checkpoints/convert-lm-eval-harness-checkpoint.ipynb CHANGED
@@ -39,7 +39,7 @@
39
  "task_name = 'hellaswag'\n",
40
  "data = load_dataset(task_name)\n",
41
  "data.shuffle(seed=42)\n",
42
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
43
  " for i_item, item in enumerate(data['train']):\n",
44
  " text = item['ctx'] + item['endings'][int(item['label'])]\n",
45
  " f.write(\n",
@@ -117,7 +117,7 @@
117
  "task_name = 'boolq'\n",
118
  "data = load_dataset(task_name)\n",
119
  "data.shuffle(seed=42)\n",
120
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
121
  " for i_item, item in enumerate(data['train']):\n",
122
  " text = f\"{item['passage']}\\nQuestion: {item['question']}?\\nAnswer: {item['answer']}\"\n",
123
  " f.write(\n",
@@ -144,7 +144,7 @@
144
  "task_name = 'arc_challenge'\n",
145
  "data = load_dataset('ai2_arc', 'ARC-Challenge')\n",
146
  "data.shuffle(seed=42)\n",
147
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
148
  " for i_item, item in enumerate(data['train']):\n",
149
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
150
  " q = item['question']\n",
@@ -174,7 +174,7 @@
174
  "task_name = 'arc_easy'\n",
175
  "data = load_dataset('ai2_arc', 'ARC-Easy')\n",
176
  "data.shuffle(seed=42)\n",
177
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
178
  " for i_item, item in enumerate(data['train']):\n",
179
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
180
  " q = item['question']\n",
@@ -207,7 +207,7 @@
207
  "task_name = 'copa'\n",
208
  "data = load_dataset('super_glue', task_name)\n",
209
  "data.shuffle(seed=42)\n",
210
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
211
  " for i_item, item in enumerate(data['train']):\n",
212
  " i_a = item['label']\n",
213
  " premise = item['premise']\n",
@@ -248,7 +248,7 @@
248
  "task_name = 'piqa'\n",
249
  "data = load_dataset(task_name)\n",
250
  "data.shuffle(seed=42)\n",
251
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
252
  " for i_item, item in enumerate(data['train']):\n",
253
  " i_a = item['label']\n",
254
  " goal = item['goal']\n",
@@ -282,7 +282,7 @@
282
  "task_name = 'winogrande'\n",
283
  "data = load_dataset(task_name, 'winogrande_xl')\n",
284
  "data.shuffle(seed=42)\n",
285
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
286
  " for i_item, item in enumerate(data['train']):\n",
287
  " i_a = item['answer']\n",
288
  " sentence = item['sentence']\n",
@@ -309,7 +309,7 @@
309
  }
310
  ],
311
  "source": [
312
- "dataset = load_dataset(\"juewang/target-data\", data_files='*.jsonl', split='train')"
313
  ]
314
  },
315
  {
 
39
  "task_name = 'hellaswag'\n",
40
  "data = load_dataset(task_name)\n",
41
  "data.shuffle(seed=42)\n",
42
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
43
  " for i_item, item in enumerate(data['train']):\n",
44
  " text = item['ctx'] + item['endings'][int(item['label'])]\n",
45
  " f.write(\n",
 
117
  "task_name = 'boolq'\n",
118
  "data = load_dataset(task_name)\n",
119
  "data.shuffle(seed=42)\n",
120
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
121
  " for i_item, item in enumerate(data['train']):\n",
122
  " text = f\"{item['passage']}\\nQuestion: {item['question']}?\\nAnswer: {item['answer']}\"\n",
123
  " f.write(\n",
 
144
  "task_name = 'arc_challenge'\n",
145
  "data = load_dataset('ai2_arc', 'ARC-Challenge')\n",
146
  "data.shuffle(seed=42)\n",
147
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
148
  " for i_item, item in enumerate(data['train']):\n",
149
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
150
  " q = item['question']\n",
 
174
  "task_name = 'arc_easy'\n",
175
  "data = load_dataset('ai2_arc', 'ARC-Easy')\n",
176
  "data.shuffle(seed=42)\n",
177
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
178
  " for i_item, item in enumerate(data['train']):\n",
179
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
180
  " q = item['question']\n",
 
207
  "task_name = 'copa'\n",
208
  "data = load_dataset('super_glue', task_name)\n",
209
  "data.shuffle(seed=42)\n",
210
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
211
  " for i_item, item in enumerate(data['train']):\n",
212
  " i_a = item['label']\n",
213
  " premise = item['premise']\n",
 
248
  "task_name = 'piqa'\n",
249
  "data = load_dataset(task_name)\n",
250
  "data.shuffle(seed=42)\n",
251
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
252
  " for i_item, item in enumerate(data['train']):\n",
253
  " i_a = item['label']\n",
254
  " goal = item['goal']\n",
 
282
  "task_name = 'winogrande'\n",
283
  "data = load_dataset(task_name, 'winogrande_xl')\n",
284
  "data.shuffle(seed=42)\n",
285
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
286
  " for i_item, item in enumerate(data['train']):\n",
287
  " i_a = item['answer']\n",
288
  " sentence = item['sentence']\n",
 
309
  }
310
  ],
311
  "source": [
312
+ "dataset = load_dataset(\"juewang/target-data\", data_files='seed_data/*.jsonl', split='train')"
313
  ]
314
  },
315
  {
notebooks/convert-lm-eval-harness.ipynb CHANGED
@@ -39,7 +39,7 @@
39
  "task_name = 'hellaswag'\n",
40
  "data = load_dataset(task_name)\n",
41
  "data.shuffle(seed=42)\n",
42
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
43
  " for i_item, item in enumerate(data['train']):\n",
44
  " text = item['ctx'] + item['endings'][int(item['label'])]\n",
45
  " f.write(\n",
@@ -117,7 +117,7 @@
117
  "task_name = 'boolq'\n",
118
  "data = load_dataset(task_name)\n",
119
  "data.shuffle(seed=42)\n",
120
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
121
  " for i_item, item in enumerate(data['train']):\n",
122
  " text = f\"{item['passage']}\\nQuestion: {item['question']}?\\nAnswer: {item['answer']}\"\n",
123
  " f.write(\n",
@@ -144,7 +144,7 @@
144
  "task_name = 'arc_challenge'\n",
145
  "data = load_dataset('ai2_arc', 'ARC-Challenge')\n",
146
  "data.shuffle(seed=42)\n",
147
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
148
  " for i_item, item in enumerate(data['train']):\n",
149
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
150
  " q = item['question']\n",
@@ -174,7 +174,7 @@
174
  "task_name = 'arc_easy'\n",
175
  "data = load_dataset('ai2_arc', 'ARC-Easy')\n",
176
  "data.shuffle(seed=42)\n",
177
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
178
  " for i_item, item in enumerate(data['train']):\n",
179
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
180
  " q = item['question']\n",
@@ -207,7 +207,7 @@
207
  "task_name = 'copa'\n",
208
  "data = load_dataset('super_glue', task_name)\n",
209
  "data.shuffle(seed=42)\n",
210
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
211
  " for i_item, item in enumerate(data['train']):\n",
212
  " i_a = item['label']\n",
213
  " premise = item['premise']\n",
@@ -248,7 +248,7 @@
248
  "task_name = 'piqa'\n",
249
  "data = load_dataset(task_name)\n",
250
  "data.shuffle(seed=42)\n",
251
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
252
  " for i_item, item in enumerate(data['train']):\n",
253
  " i_a = item['label']\n",
254
  " goal = item['goal']\n",
@@ -282,7 +282,7 @@
282
  "task_name = 'winogrande'\n",
283
  "data = load_dataset(task_name, 'winogrande_xl')\n",
284
  "data.shuffle(seed=42)\n",
285
- "with open(f'../{task_name}.jsonl', 'w') as f:\n",
286
  " for i_item, item in enumerate(data['train']):\n",
287
  " i_a = item['answer']\n",
288
  " sentence = item['sentence']\n",
@@ -309,7 +309,7 @@
309
  }
310
  ],
311
  "source": [
312
- "dataset = load_dataset(\"juewang/target-data\", data_files='*.jsonl', split='train')"
313
  ]
314
  },
315
  {
 
39
  "task_name = 'hellaswag'\n",
40
  "data = load_dataset(task_name)\n",
41
  "data.shuffle(seed=42)\n",
42
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
43
  " for i_item, item in enumerate(data['train']):\n",
44
  " text = item['ctx'] + item['endings'][int(item['label'])]\n",
45
  " f.write(\n",
 
117
  "task_name = 'boolq'\n",
118
  "data = load_dataset(task_name)\n",
119
  "data.shuffle(seed=42)\n",
120
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
121
  " for i_item, item in enumerate(data['train']):\n",
122
  " text = f\"{item['passage']}\\nQuestion: {item['question']}?\\nAnswer: {item['answer']}\"\n",
123
  " f.write(\n",
 
144
  "task_name = 'arc_challenge'\n",
145
  "data = load_dataset('ai2_arc', 'ARC-Challenge')\n",
146
  "data.shuffle(seed=42)\n",
147
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
148
  " for i_item, item in enumerate(data['train']):\n",
149
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
150
  " q = item['question']\n",
 
174
  "task_name = 'arc_easy'\n",
175
  "data = load_dataset('ai2_arc', 'ARC-Easy')\n",
176
  "data.shuffle(seed=42)\n",
177
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
178
  " for i_item, item in enumerate(data['train']):\n",
179
  " i_a = item['choices']['label'].index(item['answerKey'])\n",
180
  " q = item['question']\n",
 
207
  "task_name = 'copa'\n",
208
  "data = load_dataset('super_glue', task_name)\n",
209
  "data.shuffle(seed=42)\n",
210
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
211
  " for i_item, item in enumerate(data['train']):\n",
212
  " i_a = item['label']\n",
213
  " premise = item['premise']\n",
 
248
  "task_name = 'piqa'\n",
249
  "data = load_dataset(task_name)\n",
250
  "data.shuffle(seed=42)\n",
251
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
252
  " for i_item, item in enumerate(data['train']):\n",
253
  " i_a = item['label']\n",
254
  " goal = item['goal']\n",
 
282
  "task_name = 'winogrande'\n",
283
  "data = load_dataset(task_name, 'winogrande_xl')\n",
284
  "data.shuffle(seed=42)\n",
285
+ "with open(f'../seed_data{task_name}.jsonl', 'w') as f:\n",
286
  " for i_item, item in enumerate(data['train']):\n",
287
  " i_a = item['answer']\n",
288
  " sentence = item['sentence']\n",
 
309
  }
310
  ],
311
  "source": [
312
+ "dataset = load_dataset(\"juewang/target-data\", data_files='seed_data/*.jsonl', split='train')"
313
  ]
314
  },
315
  {
seed_data/arc_challenge.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d64da34cc4eb901214d1c4317a73579c31d0d82d2cfca7d62ceff07f51e2e9c
3
+ size 240469
seed_data/arc_easy.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:376d29e4f199c6899ec3ea22f34f35e62a4b59dbf722d194b02f151b2dca7293
3
+ size 423646
seed_data/boolq.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3f37660a3909415cdeb102c2dbe06187d607fe7e49bd6a6129bb6dfa5bdc3a
3
+ size 6332634
seed_data/copa.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:261a8413a7fa5af6b129dee1c435976f4c7086187c5ea3dd5fd14939ee14bb92
3
+ size 40851
seed_data/hellaswag.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc59132b7c9ed7513eb428d8bde1f5214c24aaa196a404c05acee36cc5c9838
3
+ size 15630002
seed_data/piqa.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f23de850360818d0e3733f9fe1540929a9de22e53d25145d831c5932ad520b
3
+ size 3003394
seed_data/winogrande.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac8f07178644445f6421314b899ff976f49c25804d43cd163734cf139f380681
3
+ size 5711173