Datasets:
lmqg
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Source Datasets:
squad
ArXiv:
Tags:
question-generation
License:
File size: 3,524 Bytes
b2ca82a
48011d0
030abe3
92b00a8
b1e2b35
5422d2d
 
d33bd1e
 
 
 
 
 
 
 
 
 
 
5422d2d
030abe3
 
 
 
 
57bdaee
030abe3
 
 
 
 
 
 
eefff80
030abe3
 
 
 
 
 
 
7861497
030abe3
 
eefff80
030abe3
5422d2d
 
 
 
030abe3
 
 
 
 
0b03aac
030abe3
 
fd1c1d3
030abe3
fd1c1d3
 
030abe3
 
 
 
 
 
 
 
 
b2ca82a
 
 
030abe3
 
b2ca82a
24a4e2e
b2ca82a
 
 
 
 
 
24a4e2e
b2ca82a
24a4e2e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import json
import datasets

logger = datasets.logging.get_logger(__name__)
_VERSION = "5.0.1"
_NAME = "qg_squad"
_CITATION = """
@inproceedings{ushio-etal-2022-generative,
    title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
    author = "Ushio, Asahi  and
        Alva-Manchego, Fernando  and
        Camacho-Collados, Jose",
    booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
    month = dec,
    year = "2022",
    address = "Abu Dhabi, U.A.E.",
    publisher = "Association for Computational Linguistics",
}
"""
_DESCRIPTION = """
[SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) evaluation set for the question generation (QG) models. The split 
of test and development set follows the ["Neural Question Generation"](https://arxiv.org/abs/1705.00106) work and is 
compatible with the [leader board](https://paperswithcode.com/sota/question-generation-on-squad11).
"""
_URL = 'https://huggingface.co/datasets/lmqg/qg_squad/resolve/main/data/processed'
_URLS = {
    'train': ['{}/train{:02d}.jsonl'.format(_URL, i) for i in range(23)],
    'test': ['{}/test{:02d}.jsonl'.format(_URL, i) for i in range(4)],
    'validation': ['{}/dev{:02d}.jsonl'.format(_URL, i) for i in range(4)]
}


class QGSquadConfig(datasets.BuilderConfig):
    """BuilderConfig for SquadQG"""

    def __init__(self, **kwargs):
        """BuilderConfig for SquadQG.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(QGSquadConfig, self).__init__(**kwargs)


class QGSquad(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        QGSquadConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
    ]
    
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "answer": datasets.Value("string"), "paragraph_question": datasets.Value("string"),
                    "question": datasets.Value("string"),
                    "sentence": datasets.Value("string"),
                    "paragraph": datasets.Value("string"),
                    "sentence_answer": datasets.Value("string"),
                    "paragraph_answer": datasets.Value("string"),
                    "paragraph_sentence": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/asahi417/lm-question-generation"
        )

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_file["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_file["validation"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_file["test"]}),
        ]

    def _generate_examples(self, filepaths):
        _key = 0
        for filepath in filepaths:
            logger.info("generating examples from = %s", filepath)
            with open(filepath, encoding="utf-8") as f:
                _list = f.read().split('\n')
                if _list[-1] == '':
                    _list = _list[:-1]
                for i in _list:
                    data = json.loads(i)
                    yield _key, data
                    _key += 1