kleinay commited on
Commit
5b94fa5
1 Parent(s): ef7e1f4

first commit

Browse files
Files changed (1) hide show
  1. qa_srl.py +147 -0
qa_srl.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Dataset loading script for loading the Large-Scale-QASRL (FitzGeralds et. al., ACL 2018) training set, along with the QASRL-GS evaluation dataset (Roit et. al., ACL 2020)."""
16
+
17
+
18
+ import datasets
19
+ from pathlib import Path
20
+ import gzip
21
+ import json
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{fitzgerald2018large,
26
+ title={Large-Scale QA-SRL Parsing},
27
+ author={FitzGerald, Nicholas and Michael, Julian and He, Luheng and Zettlemoyer, Luke},
28
+ booktitle={Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
29
+ pages={2051--2060},
30
+ year={2018}
31
+ }
32
+ @inproceedings{roit2020controlled,
33
+ title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
34
+ author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
35
+ booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
36
+ pages={7008--7013},
37
+ year={2020}
38
+ }
39
+ """
40
+
41
+
42
+ _DESCRIPTION = """\
43
+ The dataset contains question-answer pairs to model verbal predicate-argument structure.
44
+ The questions start with wh-words (Who, What, Where, What, etc.) and contain a verb predicate in the sentence; the answers are phrases in the sentence.
45
+ This dataset loads the train split from "QASRL Bank", a.k.a "QASRL-v2" or "QASRL-LS" (Large Scale),
46
+ which was constructed via crowdsourcing and presented at (FitzGeralds et. al., ACL 2018),
47
+ and the dev and test splits from QASRL-GS (Gold Standard), introduced in (Roit et. al., ACL 2020).
48
+ """
49
+
50
+ _HOMEPAGE = "https://qasrl.org"
51
+
52
+ # TODO: Add the licence for the dataset here if you can find it
53
+ _LICENSE = ""
54
+
55
+ SpanFeatureType = datasets.Sequence(datasets.Value("int32"), length=2)
56
+
57
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
58
+ class QaSrl(datasets.GeneratorBasedBuilder):
59
+ """QA-SRL: Question-Answer Driven Semantic Role Labeling corpus"""
60
+
61
+ VERSION = datasets.Version("1.0.0")
62
+
63
+ BUILDER_CONFIGS = [
64
+ datasets.BuilderConfig(
65
+ name="plain_text", version=VERSION, description=""
66
+ ),
67
+ ]
68
+
69
+ DEFAULT_CONFIG_NAME = (
70
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
71
+ )
72
+
73
+ def _info(self):
74
+ features = datasets.Features(
75
+ {
76
+ "sentence": datasets.Value("string"),
77
+ "sent_id": datasets.Value("string"),
78
+ "predicate_idx": datasets.Value("int32"),
79
+ "predicate": datasets.Value("string"),
80
+ "is_verbal": datasets.Value("bool"),
81
+ "verb_form": datasets.Value("string"),
82
+ "question": datasets.Sequence(datasets.Value("string")),
83
+ "answers": datasets.Sequence(datasets.Value("string")),
84
+ "answer_ranges": datasets.Sequence(SpanFeatureType)
85
+ }
86
+ )
87
+ return datasets.DatasetInfo(
88
+ # This is the description that will appear on the datasets page.
89
+ description=_DESCRIPTION,
90
+ # This defines the different columns of the dataset and their types
91
+ features=features, # Here we define them above because they are different between the two configurations
92
+ # If there's a common (input, target) tuple from the features,
93
+ # specify them here. They'll be used if as_supervised=True in
94
+ # builder.as_dataset.
95
+ supervised_keys=None,
96
+ # Homepage of the dataset for documentation
97
+ homepage=_HOMEPAGE,
98
+ # License for the dataset if available
99
+ license=_LICENSE,
100
+ # Citation for the dataset
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
105
+ """Returns SplitGenerators."""
106
+
107
+ # iterate the tar file of the corpus
108
+
109
+ # Older version of the corpus (has some format errors):
110
+ # corpus_base_path = Path(dl_manager.download_and_extract(_URLs["qasrl_v2.0"]))
111
+ # corpus_orig = corpus_base_path / "qasrl-v2" / "orig"
112
+
113
+ self.qasrl2018 = datasets.load_dataset("biu-nlp/qa_srl2018")
114
+ self.qasrl2020 = datasets.load_dataset("biu-nlp/qa_srl2020")
115
+
116
+
117
+ # TODO add optional kwarg for genre (wikinews)
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ # These kwargs will be passed to _generate_examples
122
+ gen_kwargs={
123
+ "dataset": self.qasrl2018["train"],
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "dataset": self.qasrl2020["validation"],
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TEST,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "dataset": self.qasrl2020["test"],
138
+ },
139
+ ),
140
+ ]
141
+
142
+ def _generate_examples(self, dataset):
143
+
144
+ """ Yields examples from a '.jsonl.gz' file ."""
145
+ for idx, instance in enumerate(dataset):
146
+ yield idx, instance
147
+