Datasets:

Languages:
English
ArXiv:
License:
lintang commited on
Commit
43edbf4
1 Parent(s): 5e22f7a

Update coqa.py

Browse files
Files changed (1) hide show
  1. coqa.py +222 -155
coqa.py CHANGED
@@ -1,22 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
- CoQA: A Conversational Question Answering Challenge
3
- https://arxiv.org/pdf/1808.07042.pdf
4
 
5
- CoQA is a large-scale dataset for building Conversational Question Answering
6
- systems. The goal of the CoQA challenge is to measure the ability of machines to
7
- understand a text passage and answer a series of interconnected questions that
8
- appear in a conversation.
9
 
10
- Homepage: https://stanfordnlp.github.io/coqa/
11
- """
12
- import inspect
13
- import transformers.data.metrics.squad_metrics as squad_metrics
14
- import lm_eval.datasets.coqa.coqa
15
- from lm_eval.base import Task, rf, mean
16
- from itertools import zip_longest
17
 
18
 
19
- _CITATION = """
20
  @misc{reddy2018coqa,
21
  title={CoQA: A Conversational Question Answering Challenge},
22
  author={Siva Reddy and Danqi Chen and Christopher D. Manning},
@@ -27,152 +35,211 @@ _CITATION = """
27
  }
28
  """
29
 
 
 
 
 
 
 
30
 
31
- class CoQA(Task):
32
- VERSION = 1
33
- DATASET_PATH = inspect.getfile(lm_eval.datasets.coqa.coqa)
34
- DATASET_NAME = None
35
-
36
- def has_training_docs(self):
37
- return True
38
-
39
- def has_validation_docs(self):
40
- return True
41
-
42
- def has_test_docs(self):
43
- return False
44
-
45
- def training_docs(self):
46
- return self.dataset["train"]
47
-
48
- def validation_docs(self):
49
- return self.dataset["validation"]
50
-
51
- def test_docs(self):
52
- pass
53
-
54
- def doc_to_text(self, doc):
55
- # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1}
56
- # and a question qi, the task is to predict the answer ai
57
- doc_text = doc["story"] + "\n\n"
58
- for (q, a) in zip_longest(
59
- doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]
60
- ): # omit target answer ai
61
- question = f"Q: {q}\n\n"
62
- answer = f"A: {a}\n\n" if a is not None else "A:"
63
- doc_text += question + answer
64
- return doc_text
65
-
66
- def should_decontaminate(self):
67
- return True
68
 
69
- def doc_to_decontamination_query(self, doc):
70
- return doc["story"] + " " + "\n".join(doc["questions"]["input_text"])
71
 
72
- @classmethod
73
- def get_answers(cls, doc, turn_id):
74
- # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
75
- answers = []
76
- answer_forturn = doc["answers"]["input_text"][turn_id - 1]
77
- answers.append(answer_forturn)
78
 
79
- additional_answers = doc.get("additional_answers")
80
- if additional_answers:
81
- for key in additional_answers:
82
- additional_answer_for_turn = additional_answers[key]["input_text"][
83
- turn_id - 1
84
- ]
85
- if additional_answer_for_turn.lower() not in map(str.lower, answers):
86
- answers.append(additional_answer_for_turn)
87
- return answers
88
-
89
- @classmethod
90
- def get_answer_choice(self, raw_text):
91
- # Function maps answers to CoQA answer categories
92
- # ~ 1/5 of the CoQA answers are Yes/No
93
- # ~ 2/3 of the CoQA answers are span-based
94
- # (answers overlap with the passage ignoring punctuation and case mismatch)
95
- if raw_text == "unknown":
96
- return "0"
97
- if squad_metrics.normalize_answer(raw_text) == "yes":
98
- return "1"
99
- if squad_metrics.normalize_answer(raw_text) == "no":
100
- return "2"
101
- return "3" # Not a yes/no question
102
-
103
- @staticmethod
104
- def compute_scores(gold_list, pred):
105
- # tests for exact match and on the normalised answer (compute_exact)
106
- # test for overlap (compute_f1)
107
- f1_sum = 0.0
108
- em_sum = 0.0
109
- if len(gold_list) > 1:
110
- for i in range(len(gold_list)):
111
- gold_answers = gold_list[0:i] + gold_list[i + 1 :]
112
- # predictions compared against (n) golds and take maximum
113
- em_sum += max(
114
- squad_metrics.compute_exact(a, pred) for a in gold_answers
115
- )
116
- f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
117
- else:
118
- em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
119
- f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)
120
-
121
- return {
122
- "em": em_sum / max(1, len(gold_list)),
123
- "f1": f1_sum / max(1, len(gold_list)),
124
  }
125
-
126
- def doc_to_target(self, doc, turnid=None):
127
- # Default to prediction of last turn.
128
- if turnid is None:
129
- turnid = len(doc["questions"]["input_text"])
130
- raw_text = doc["answers"]["input_text"][turnid - 1]
131
- return " " + raw_text
132
-
133
- def construct_requests(self, doc, ctx):
134
- """Uses RequestFactory to construct Requests and returns an iterable of
135
- Requests which will be sent to the LM.
136
-
137
- :param doc:
138
- The document as returned from training_docs, validation_docs, or test_docs.
139
- :param ctx: str
140
- The context string, generated by fewshot_context. This includes the natural
141
- language description, as well as the few shot examples, and the question
142
- part of the document for `doc`.
143
- """
144
- cont_request = rf.greedy_until(ctx, {"until": ["\nQ:"]})
145
- return cont_request
146
-
147
- def process_results(self, doc, results):
148
- """Take a single document and the LM results and evaluates, returning a
149
- dict where keys are the names of submetrics and values are the values of
150
- the metric for that one document
151
-
152
- :param doc:
153
- The document as returned from training_docs, validation_docs, or test_docs.
154
- :param results:
155
- The results of the requests created in construct_requests.
156
- """
157
- turn_id = len(doc["questions"]["input_text"])
158
- gold_list = self.get_answers(doc, turn_id)
159
- pred = results[0].strip().split("\n")[0]
160
-
161
- scores = self.compute_scores(gold_list, pred)
162
-
163
- return {
164
- "f1": scores["f1"],
165
- "em": scores["em"],
166
  }
167
-
168
- def higher_is_better(self):
169
- return {
170
- "f1": True,
171
- "em": True,
 
 
 
172
  }
 
 
173
 
174
- def aggregation(self):
175
- return {
176
- "f1": mean,
177
- "em": mean,
178
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """CoQA dataset.
15
+
16
+ This `CoQA` adds the "additional_answers" feature that's missing in the original
17
+ datasets version:
18
+ https://github.com/huggingface/datasets/blob/master/datasets/coqa/coqa.py
19
  """
 
 
20
 
 
 
 
 
21
 
22
+ import json
23
+
24
+ import datasets
 
 
 
 
25
 
26
 
27
+ _CITATION = """\
28
  @misc{reddy2018coqa,
29
  title={CoQA: A Conversational Question Answering Challenge},
30
  author={Siva Reddy and Danqi Chen and Christopher D. Manning},
 
35
  }
36
  """
37
 
38
+ _DESCRIPTION = """\
39
+ CoQA is a large-scale dataset for building Conversational Question Answering
40
+ systems. The goal of the CoQA challenge is to measure the ability of machines to
41
+ understand a text passage and answer a series of interconnected questions that
42
+ appear in a conversation.
43
+ """
44
 
45
+ _HOMEPAGE = "https://stanfordnlp.github.io/coqa/"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
 
50
+ _URLS = {
51
+ "train": "https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json",
52
+ "validation": "https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json",
53
+ }
 
 
54
 
55
+ # `additional_answers` are not available in the train set so we fill them with
56
+ # empty dicts of the same form.
57
+ _EMPTY_ADDITIONAL_ANSWER = {
58
+ "0": [
59
+ {
60
+ "span_start": -1,
61
+ "span_end": -1,
62
+ "span_text": "",
63
+ "input_text": "",
64
+ "turn_id": -1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  }
66
+ ],
67
+ "1": [
68
+ {
69
+ "span_start": -1,
70
+ "span_end": -1,
71
+ "span_text": "",
72
+ "input_text": "",
73
+ "turn_id": -1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  }
75
+ ],
76
+ "2": [
77
+ {
78
+ "span_start": -1,
79
+ "span_end": -1,
80
+ "span_text": "",
81
+ "input_text": "",
82
+ "turn_id": -1,
83
  }
84
+ ],
85
+ }
86
 
87
+
88
+ class Coqa(datasets.GeneratorBasedBuilder):
89
+ """CoQA is a large-scale dataset for building Conversational Question Answering systems."""
90
+
91
+ VERSION = datasets.Version("0.0.1")
92
+
93
+ BUILDER_CONFIGS = [
94
+ datasets.BuilderConfig(
95
+ name="coqa", version=VERSION, description="The CoQA dataset."
96
+ ),
97
+ ]
98
+
99
+ def _info(self):
100
+ features = datasets.Features(
101
+ {
102
+ "id": datasets.Value("string"),
103
+ "source": datasets.Value("string"),
104
+ "story": datasets.Value("string"),
105
+ "questions": datasets.features.Sequence(
106
+ {
107
+ "input_text": datasets.Value("string"),
108
+ "turn_id": datasets.Value("int32"),
109
+ }
110
+ ),
111
+ "answers": datasets.features.Sequence(
112
+ {
113
+ "span_start": datasets.Value("int32"),
114
+ "span_end": datasets.Value("int32"),
115
+ "span_text": datasets.Value("string"),
116
+ "input_text": datasets.Value("string"),
117
+ "turn_id": datasets.Value("int32"),
118
+ }
119
+ ),
120
+ "additional_answers": {
121
+ "0": datasets.features.Sequence(
122
+ {
123
+ "span_start": datasets.Value("int32"),
124
+ "span_end": datasets.Value("int32"),
125
+ "span_text": datasets.Value("string"),
126
+ "input_text": datasets.Value("string"),
127
+ "turn_id": datasets.Value("int32"),
128
+ }
129
+ ),
130
+ "1": datasets.features.Sequence(
131
+ {
132
+ "span_start": datasets.Value("int32"),
133
+ "span_end": datasets.Value("int32"),
134
+ "span_text": datasets.Value("string"),
135
+ "input_text": datasets.Value("string"),
136
+ "turn_id": datasets.Value("int32"),
137
+ }
138
+ ),
139
+ "2": datasets.features.Sequence(
140
+ {
141
+ "span_start": datasets.Value("int32"),
142
+ "span_end": datasets.Value("int32"),
143
+ "span_text": datasets.Value("string"),
144
+ "input_text": datasets.Value("string"),
145
+ "turn_id": datasets.Value("int32"),
146
+ }
147
+ ),
148
+ },
149
+ }
150
+ )
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=_LICENSE,
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager):
160
+ urls = {"train": _URLS["train"], "validation": _URLS["validation"]}
161
+ data_dirs = dl_manager.download_and_extract(urls)
162
+ return [
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TRAIN,
165
+ # These kwargs will be passed to _generate_examples
166
+ gen_kwargs={
167
+ "filepath": data_dirs["train"],
168
+ "split": datasets.Split.TRAIN,
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.VALIDATION,
173
+ # These kwargs will be passed to _generate_examples
174
+ gen_kwargs={
175
+ "filepath": data_dirs["validation"],
176
+ "split": datasets.Split.VALIDATION,
177
+ },
178
+ ),
179
+ ]
180
+
181
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
182
+ def _generate_examples(self, filepath, split):
183
+ with open(filepath, encoding="utf-8") as f:
184
+ data = json.load(f)
185
+ for row in data["data"]:
186
+ id = row["id"]
187
+ source = row["source"]
188
+ story = row["story"]
189
+ questions = [
190
+ {"input_text": q["input_text"], "turn_id": q["turn_id"]}
191
+ for q in row["questions"]
192
+ ]
193
+ answers = [
194
+ {
195
+ "span_start": a["span_start"],
196
+ "span_end": a["span_end"],
197
+ "span_text": a["span_text"],
198
+ "input_text": a["input_text"],
199
+ "turn_id": a["turn_id"],
200
+ }
201
+ for a in row["answers"]
202
+ ]
203
+ if split == datasets.Split.TRAIN:
204
+ additional_answers = _EMPTY_ADDITIONAL_ANSWER
205
+ else:
206
+ additional_answers = {
207
+ "0": [
208
+ {
209
+ "span_start": a0["span_start"],
210
+ "span_end": a0["span_end"],
211
+ "span_text": a0["span_text"],
212
+ "input_text": a0["input_text"],
213
+ "turn_id": a0["turn_id"],
214
+ }
215
+ for a0 in row["additional_answers"]["0"]
216
+ ],
217
+ "1": [
218
+ {
219
+ "span_start": a1["span_start"],
220
+ "span_end": a1["span_end"],
221
+ "span_text": a1["span_text"],
222
+ "input_text": a1["input_text"],
223
+ "turn_id": a1["turn_id"],
224
+ }
225
+ for a1 in row["additional_answers"]["1"]
226
+ ],
227
+ "2": [
228
+ {
229
+ "span_start": a2["span_start"],
230
+ "span_end": a2["span_end"],
231
+ "span_text": a2["span_text"],
232
+ "input_text": a2["input_text"],
233
+ "turn_id": a2["turn_id"],
234
+ }
235
+ for a2 in row["additional_answers"]["2"]
236
+ ],
237
+ }
238
+ yield row["id"], {
239
+ "id": id,
240
+ "story": story,
241
+ "source": source,
242
+ "questions": questions,
243
+ "answers": answers,
244
+ "additional_answers": additional_answers,
245
+ }