Datasets:
lmqg
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
File size: 4,806 Bytes
a8807ea
d22f92b
a8807ea
d22f92b
 
 
041b16b
 
 
 
d22f92b
8605ac5
a8807ea
f8e6d3e
1000cca
 
 
 
f8e6d3e
 
1000cca
1589ced
 
 
f8e6d3e
 
93add4d
 
 
 
f8e6d3e
a8807ea
c32e7dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ad0eec
d22f92b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
041b16b
 
d22f92b
 
 
 
 
 
 
 
 
 
 
 
aababdb
 
d22f92b
 
 
 
 
 
 
8a49aa7
a8807ea
d22f92b
a8807ea
 
d22f92b
 
 
 
 
 
 
 
 
 
 
 
 
077f703
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
""" python -c "from datasets import load_dataset;load_dataset('.')" """
import json
from itertools import chain
import datasets

logger = datasets.logging.get_logger(__name__)
_VERSION = "1.0.0"
_CITATION = """
TBA
"""
_DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
_URL = 'https://huggingface.co/datasets/lmqg/qg_squadshifts/raw/main/data/processed'
_FILES = {
    str(datasets.Split.TEST): {
            'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)],
            'nyt': [f'{_URL}/nyt.test{i:02d}.jsonl' for i in range(4)],
            'reddit': [f'{_URL}/reddit.test{i:02d}.jsonl' for i in range(4)],
            'amazon': [f'{_URL}/amazon.test{i:02d}.jsonl' for i in range(4)]
        },
    str(datasets.Split.TRAIN): {
            'new_wiki': [f'{_URL}/new_wiki.train{i:02d}.jsonl' for i in range(2)],
            'nyt': [f'{_URL}/nyt.train{i:02d}.jsonl' for i in range(3)],
            'reddit': [f'{_URL}/reddit.train{i:02d}.jsonl' for i in range(3)],
            'amazon': [f'{_URL}/amazon.train{i:02d}.jsonl' for i in range(3)]
        },
    str(datasets.Split.VALIDATION): {
            'new_wiki': [f'{_URL}/new_wiki.validation{i:02d}.jsonl' for i in range(1)],
            'nyt': [f'{_URL}/nyt.validation{i:02d}.jsonl' for i in range(2)],
            'reddit': [f'{_URL}/reddit.validation{i:02d}.jsonl' for i in range(2)],
            'amazon': [f'{_URL}/amazon.validation{i:02d}.jsonl' for i in range(2)]
        },
}
# _FILES = {
#     str(datasets.Split.TEST): {
#             'new_wiki': [f'{_URL}/new_wiki.test.jsonl'],
#             'nyt': [f'{_URL}/nyt.test.jsonl'],
#             'reddit': [f'{_URL}/reddit.test.jsonl'],
#             'amazon': [f'{_URL}/amazon.test.jsonl']
#         },
#     str(datasets.Split.TRAIN): {
#             'new_wiki': [f'{_URL}/new_wiki.train.jsonl'],
#             'nyt': [f'{_URL}/nyt.train.jsonl'],
#             'reddit': [f'{_URL}/reddit.train.jsonl'],
#             'amazon': [f'{_URL}/amazon.train.jsonl']
#         },
#     str(datasets.Split.VALIDATION): {
#             'new_wiki': [f'{_URL}/new_wiki.validation.jsonl'],
#             'nyt': [f'{_URL}/nyt.validation.jsonl'],
#             'reddit': [f'{_URL}/reddit.validation.jsonl'],
#             'amazon': [f'{_URL}/amazon.validation.jsonl']
#         },
# }
_DOMAIN = list(_FILES[list(_FILES.keys())[0]].keys())


class QGSQuADShiftsConfig(datasets.BuilderConfig):
    """BuilderConfig for SquadQG"""

    def __init__(self, **kwargs):
        """BuilderConfig for SquadQG.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(QGSQuADShiftsConfig, self).__init__(**kwargs)


class QGSQuADShifts(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="all", version=datasets.Version(_VERSION), description="All domain.")]
    BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, version=datasets.Version(_VERSION), description=f"Domain {i}") for i in sorted(_DOMAIN)]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "answer": datasets.Value("string"),
                    "question": datasets.Value("string"),
                    "sentence": datasets.Value("string"),
                    "paragraph": datasets.Value("string"),
                    "sentence_answer": datasets.Value("string"),
                    "paragraph_answer": datasets.Value("string"),
                    "paragraph_sentence": datasets.Value("string"),
                    "paragraph_id": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/asahi417/lm-question-generation"
        )

    def _split_generators(self, dl_manager):
        if self.config.name == 'all':
            downloaded_file = dl_manager.download_and_extract({k: list(chain(*list(v.values()))) for k, v in _FILES.items()})
        else:
            downloaded_file = dl_manager.download_and_extract({k: v[self.config.name] for k, v in _FILES.items()})
        return [datasets.SplitGenerator(name=k, gen_kwargs={"filepaths": downloaded_file[k]}) for k in _FILES.keys()]

    def _generate_examples(self, filepaths):
        _key = 0
        for filepath in filepaths:
            logger.info("generating examples from = %s", filepath)
            with open(filepath, encoding="utf-8") as f:
                _list = f.read().split('\n')
                if _list[-1] == '':
                    _list = _list[:-1]
                for i in _list:
                    data = json.loads(i)
                    yield _key, data
                    _key += 1