Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
""" python -c "from datasets import load_dataset;load_dataset('.')" """ | |
import json | |
from itertools import chain | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task.""" | |
_URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed' | |
_FILES = { | |
str(datasets.Split.TEST): { | |
'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)], | |
'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(4)], | |
'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(4)], | |
'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(4)] | |
}, | |
str(datasets.Split.TRAIN): { | |
'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(2)], | |
'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(3)], | |
'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(3)], | |
'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(3)] | |
}, | |
str(datasets.Split.VALIDATION): { | |
'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(2)], | |
'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(3)], | |
'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(3)], | |
'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(3)] | |
}, | |
} | |
# _FILES = { | |
# str(datasets.Split.TEST): { | |
# 'new_wiki': [f'{_URL}/new_wiki.test.jsonl'], | |
# 'nyt': [f'{_URL}/nyt.test.jsonl'], | |
# 'reddit': [f'{_URL}/reddit.test.jsonl'], | |
# 'amazon': [f'{_URL}/amazon.test.jsonl'] | |
# }, | |
# str(datasets.Split.TRAIN): { | |
# 'new_wiki': [f'{_URL}/new_wiki.train.jsonl'], | |
# 'nyt': [f'{_URL}/nyt.train.jsonl'], | |
# 'reddit': [f'{_URL}/reddit.train.jsonl'], | |
# 'amazon': [f'{_URL}/amazon.train.jsonl'] | |
# }, | |
# str(datasets.Split.VALIDATION): { | |
# 'new_wiki': [f'{_URL}/new_wiki.validation.jsonl'], | |
# 'nyt': [f'{_URL}/nyt.validation.jsonl'], | |
# 'reddit': [f'{_URL}/reddit.validation.jsonl'], | |
# 'amazon': [f'{_URL}/amazon.validation.jsonl'] | |
# }, | |
# } | |
_DOMAIN = list(_FILES[list(_FILES.keys())[0]].keys()) | |
class QGSQuADShiftsConfig(datasets.BuilderConfig): | |
"""BuilderConfig for SquadQG""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig for SquadQG. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(QGSQuADShiftsConfig, self).__init__(**kwargs) | |
class QGSQuADShifts(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="default", description="All domain.")] | |
BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, description=f"Domain {i}") for i in sorted(_DOMAIN)] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"answer": datasets.Value("string"), | |
"question": datasets.Value("string"), | |
"sentence": datasets.Value("string"), | |
"paragraph": datasets.Value("string"), | |
"sentence_answer": datasets.Value("string"), | |
"paragraph_answer": datasets.Value("string"), | |
"paragraph_sentence": datasets.Value("string"), | |
"paragraph_id": datasets.Value("string") | |
} | |
), | |
supervised_keys=None, | |
homepage="https://github.com/asahi417/lm-question-generation" | |
) | |
def _split_generators(self, dl_manager): | |
if self.config.name == 'default': | |
downloaded_file = dl_manager.download_and_extract({k: list(chain(*list(v.values()))) for k, v in _FILES.items()}) | |
else: | |
downloaded_file = dl_manager.download_and_extract({k: v[self.config.name] for k, v in _FILES.items()}) | |
return [datasets.SplitGenerator(name=k, gen_kwargs={"filepaths": downloaded_file[k]}) for k in _FILES.keys()] | |
def _generate_examples(self, filepaths): | |
_key = 0 | |
for filepath in filepaths: | |
logger.info("generating examples from = %s", filepath) | |
with open(filepath, encoding="utf-8") as f: | |
_list = f.read().split('\n') | |
if _list[-1] == '': | |
_list = _list[:-1] | |
for i in _list: | |
data = json.loads(i) | |
yield _key, data | |
_key += 1 | |