Datasets:
Tasks:
Question Answering
Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
English
Size:
10K - 100K
ArXiv:
License:
import os | |
import json | |
from datasets import load_dataset | |
os.makedirs('datasets', exist_ok=True) | |
data = load_dataset("lmqg/qg_squad") | |
_id = 0 | |
for _split in data: | |
output = [] | |
for d in data[_split]: | |
a = d['answer'] | |
p = d['paragraph'] | |
output.append({ | |
"id": str(_id), | |
"title": "None", | |
"context": d['paragraph'], | |
"question": d['question'], | |
"answers": { | |
"text": [a], | |
"answer_start": [p.index(a)] | |
} | |
}) | |
_id += 1 | |
with open(f'datasets/{_split}.jsonl', 'w') as f: | |
f.write('\n'.join([json.dumps(i) for i in output])) | |