kleinay commited on
Commit
038ae25
1 Parent(s): 3fb6b90

upload qa_discourse.py script

Browse files
Files changed (1) hide show
  1. qa_discourse.py +146 -0
qa_discourse.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A Dataset loading script for the QA-Discourse dataset (Pyatkin et. al., ACL 2020)."""
16
+
17
+
18
+ import datasets
19
+ from pathlib import Path
20
+ from typing import List
21
+ import pandas as pd
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{pyatkin2020qadiscourse,
26
+ title={QADiscourse-Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines},
27
+ author={Pyatkin, Valentina and Klein, Ayal and Tsarfaty, Reut and Dagan, Ido},
28
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
29
+ pages={2804--2819},
30
+ year={2020}
31
+ }"""
32
+
33
+
34
+ _DESCRIPTION = """\
35
+ The dataset contains question-answer pairs to model discourse relations.
36
+ While answers roughly correspond to spans of the sentence, these spans could have been freely adjusted by annotators to grammaticaly fit the question;
37
+ Therefore, answers are given just as text and not as identified spans of the original sentence.
38
+ See the paper for details: QADiscourse - Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines, Pyatkin et. al., 2020
39
+ """
40
+
41
+ _HOMEPAGE = "https://github.com/ValentinaPy/QADiscourse"
42
+
43
+ _LICENSE = """Resources on this page are licensed CC-BY 4.0, a Creative Commons license requiring Attribution (https://creativecommons.org/licenses/by/4.0/)."""
44
+
45
+
46
+ _URLs = {
47
+ "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv",
48
+ "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv",
49
+ "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv",
50
+ "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv",
51
+ "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv",
52
+ "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv",
53
+ }
54
+
55
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
56
+ class QaDiscourse(datasets.GeneratorBasedBuilder):
57
+ """QA-Discourse: Discourse Relations as Question-Answer Pairs. """
58
+
59
+ VERSION = datasets.Version("1.0.0")
60
+
61
+ BUILDER_CONFIGS = [
62
+ datasets.BuilderConfig(
63
+ name="plain_text", version=VERSION, description="This provides the QA-Discourse dataset"
64
+ ),
65
+ ]
66
+
67
+ DEFAULT_CONFIG_NAME = (
68
+ "plain_text" # It's not mandatory to have a default configuration. Just use one if it make sense.
69
+ )
70
+
71
+ def _info(self):
72
+ features = datasets.Features(
73
+ {
74
+ "sentence": datasets.Value("string"),
75
+ "sent_id": datasets.Value("string"),
76
+ "question": datasets.Sequence(datasets.Value("string")),
77
+ "answers": datasets.Sequence(datasets.Value("string")),
78
+ }
79
+ )
80
+ return datasets.DatasetInfo(
81
+ # This is the description that will appear on the datasets page.
82
+ description=_DESCRIPTION,
83
+ # This defines the different columns of the dataset and their types
84
+ features=features, # Here we define them above because they are different between the two configurations
85
+ # If there's a common (input, target) tuple from the features,
86
+ # specify them here. They'll be used if as_supervised=True in
87
+ # builder.as_dataset.
88
+ supervised_keys=None,
89
+ # Homepage of the dataset for documentation
90
+ homepage=_HOMEPAGE,
91
+ # License for the dataset if available
92
+ license=_LICENSE,
93
+ # Citation for the dataset
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
98
+ """Returns SplitGenerators."""
99
+
100
+ # Download and prepare all files - keep same structure as _URLs
101
+ corpora = {section: Path(dl_manager.download_and_extract(_URLs[section]))
102
+ for section in _URLs}
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ # These kwargs will be passed to _generate_examples
108
+ gen_kwargs={
109
+ "filepaths": [corpora["wikinews.train"],
110
+ corpora["wikipedia.train"]],
111
+ },
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.VALIDATION,
115
+ # These kwargs will be passed to _generate_examples
116
+ gen_kwargs={
117
+ "filepaths": [corpora["wikinews.dev"],
118
+ corpora["wikipedia.dev"]],
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={
125
+ "filepaths": [corpora["wikinews.test"],
126
+ corpora["wikipedia.test"]],
127
+ },
128
+ ),
129
+ ]
130
+
131
+ def _generate_examples(self, filepaths: List[str]):
132
+
133
+ """ Yields QA-Discourse examples from a tsv file."""
134
+
135
+ # merge annotations from sections
136
+ df = pd.concat([pd.read_csv(fn, separator='\t') for fn in filepaths]).reset_index()
137
+ for counter, row in df.iterrows():
138
+ # Prepare question (3 "slots" and question mark)
139
+ question = [row.question_start, row.question_aux, row.question_body.str.rstrip('?'), '?']
140
+
141
+ yield counter, {
142
+ "sentence": row.sentence,
143
+ "sent_id": row.qasrl_id,
144
+ "question": question,
145
+ "answers": [row.answer],
146
+ }