Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
parquet-converter commited on
Commit
ca6c0f7
·
1 Parent(s): 7e73244

Update parquet files

Browse files
Files changed (3) hide show
  1. bigbiohub.py +0 -556
  2. meqsum.py +0 -161
  3. meqsum_source/meqsum-train.parquet +3 -0
bigbiohub.py DELETED
@@ -1,556 +0,0 @@
1
- from collections import defaultdict
2
- from dataclasses import dataclass
3
- from enum import Enum
4
- import logging
5
- from pathlib import Path
6
- from types import SimpleNamespace
7
- from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
8
-
9
- import datasets
10
-
11
- if TYPE_CHECKING:
12
- import bioc
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
18
-
19
-
20
- @dataclass
21
- class BigBioConfig(datasets.BuilderConfig):
22
- """BuilderConfig for BigBio."""
23
-
24
- name: str = None
25
- version: datasets.Version = None
26
- description: str = None
27
- schema: str = None
28
- subset_id: str = None
29
-
30
-
31
- class Tasks(Enum):
32
- NAMED_ENTITY_RECOGNITION = "NER"
33
- NAMED_ENTITY_DISAMBIGUATION = "NED"
34
- EVENT_EXTRACTION = "EE"
35
- RELATION_EXTRACTION = "RE"
36
- COREFERENCE_RESOLUTION = "COREF"
37
- QUESTION_ANSWERING = "QA"
38
- TEXTUAL_ENTAILMENT = "TE"
39
- SEMANTIC_SIMILARITY = "STS"
40
- TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
41
- PARAPHRASING = "PARA"
42
- TRANSLATION = "TRANSL"
43
- SUMMARIZATION = "SUM"
44
- TEXT_CLASSIFICATION = "TXTCLASS"
45
-
46
-
47
- entailment_features = datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "premise": datasets.Value("string"),
51
- "hypothesis": datasets.Value("string"),
52
- "label": datasets.Value("string"),
53
- }
54
- )
55
-
56
- pairs_features = datasets.Features(
57
- {
58
- "id": datasets.Value("string"),
59
- "document_id": datasets.Value("string"),
60
- "text_1": datasets.Value("string"),
61
- "text_2": datasets.Value("string"),
62
- "label": datasets.Value("string"),
63
- }
64
- )
65
-
66
- qa_features = datasets.Features(
67
- {
68
- "id": datasets.Value("string"),
69
- "question_id": datasets.Value("string"),
70
- "document_id": datasets.Value("string"),
71
- "question": datasets.Value("string"),
72
- "type": datasets.Value("string"),
73
- "choices": [datasets.Value("string")],
74
- "context": datasets.Value("string"),
75
- "answer": datasets.Sequence(datasets.Value("string")),
76
- }
77
- )
78
-
79
- text_features = datasets.Features(
80
- {
81
- "id": datasets.Value("string"),
82
- "document_id": datasets.Value("string"),
83
- "text": datasets.Value("string"),
84
- "labels": [datasets.Value("string")],
85
- }
86
- )
87
-
88
- text2text_features = datasets.Features(
89
- {
90
- "id": datasets.Value("string"),
91
- "document_id": datasets.Value("string"),
92
- "text_1": datasets.Value("string"),
93
- "text_2": datasets.Value("string"),
94
- "text_1_name": datasets.Value("string"),
95
- "text_2_name": datasets.Value("string"),
96
- }
97
- )
98
-
99
- kb_features = datasets.Features(
100
- {
101
- "id": datasets.Value("string"),
102
- "document_id": datasets.Value("string"),
103
- "passages": [
104
- {
105
- "id": datasets.Value("string"),
106
- "type": datasets.Value("string"),
107
- "text": datasets.Sequence(datasets.Value("string")),
108
- "offsets": datasets.Sequence([datasets.Value("int32")]),
109
- }
110
- ],
111
- "entities": [
112
- {
113
- "id": datasets.Value("string"),
114
- "type": datasets.Value("string"),
115
- "text": datasets.Sequence(datasets.Value("string")),
116
- "offsets": datasets.Sequence([datasets.Value("int32")]),
117
- "normalized": [
118
- {
119
- "db_name": datasets.Value("string"),
120
- "db_id": datasets.Value("string"),
121
- }
122
- ],
123
- }
124
- ],
125
- "events": [
126
- {
127
- "id": datasets.Value("string"),
128
- "type": datasets.Value("string"),
129
- # refers to the text_bound_annotation of the trigger
130
- "trigger": {
131
- "text": datasets.Sequence(datasets.Value("string")),
132
- "offsets": datasets.Sequence([datasets.Value("int32")]),
133
- },
134
- "arguments": [
135
- {
136
- "role": datasets.Value("string"),
137
- "ref_id": datasets.Value("string"),
138
- }
139
- ],
140
- }
141
- ],
142
- "coreferences": [
143
- {
144
- "id": datasets.Value("string"),
145
- "entity_ids": datasets.Sequence(datasets.Value("string")),
146
- }
147
- ],
148
- "relations": [
149
- {
150
- "id": datasets.Value("string"),
151
- "type": datasets.Value("string"),
152
- "arg1_id": datasets.Value("string"),
153
- "arg2_id": datasets.Value("string"),
154
- "normalized": [
155
- {
156
- "db_name": datasets.Value("string"),
157
- "db_id": datasets.Value("string"),
158
- }
159
- ],
160
- }
161
- ],
162
- }
163
- )
164
-
165
-
166
- def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
167
-
168
- offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
169
-
170
- text = ann.text
171
-
172
- if len(offsets) > 1:
173
- i = 0
174
- texts = []
175
- for start, end in offsets:
176
- chunk_len = end - start
177
- texts.append(text[i : chunk_len + i])
178
- i += chunk_len
179
- while i < len(text) and text[i] == " ":
180
- i += 1
181
- else:
182
- texts = [text]
183
-
184
- return offsets, texts
185
-
186
-
187
- def remove_prefix(a: str, prefix: str) -> str:
188
- if a.startswith(prefix):
189
- a = a[len(prefix) :]
190
- return a
191
-
192
-
193
- def parse_brat_file(
194
- txt_file: Path,
195
- annotation_file_suffixes: List[str] = None,
196
- parse_notes: bool = False,
197
- ) -> Dict:
198
- """
199
- Parse a brat file into the schema defined below.
200
- `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
201
- Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
202
- e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
203
- Will include annotator notes, when `parse_notes == True`.
204
- brat_features = datasets.Features(
205
- {
206
- "id": datasets.Value("string"),
207
- "document_id": datasets.Value("string"),
208
- "text": datasets.Value("string"),
209
- "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
210
- {
211
- "offsets": datasets.Sequence([datasets.Value("int32")]),
212
- "text": datasets.Sequence(datasets.Value("string")),
213
- "type": datasets.Value("string"),
214
- "id": datasets.Value("string"),
215
- }
216
- ],
217
- "events": [ # E line in brat
218
- {
219
- "trigger": datasets.Value(
220
- "string"
221
- ), # refers to the text_bound_annotation of the trigger,
222
- "id": datasets.Value("string"),
223
- "type": datasets.Value("string"),
224
- "arguments": datasets.Sequence(
225
- {
226
- "role": datasets.Value("string"),
227
- "ref_id": datasets.Value("string"),
228
- }
229
- ),
230
- }
231
- ],
232
- "relations": [ # R line in brat
233
- {
234
- "id": datasets.Value("string"),
235
- "head": {
236
- "ref_id": datasets.Value("string"),
237
- "role": datasets.Value("string"),
238
- },
239
- "tail": {
240
- "ref_id": datasets.Value("string"),
241
- "role": datasets.Value("string"),
242
- },
243
- "type": datasets.Value("string"),
244
- }
245
- ],
246
- "equivalences": [ # Equiv line in brat
247
- {
248
- "id": datasets.Value("string"),
249
- "ref_ids": datasets.Sequence(datasets.Value("string")),
250
- }
251
- ],
252
- "attributes": [ # M or A lines in brat
253
- {
254
- "id": datasets.Value("string"),
255
- "type": datasets.Value("string"),
256
- "ref_id": datasets.Value("string"),
257
- "value": datasets.Value("string"),
258
- }
259
- ],
260
- "normalizations": [ # N lines in brat
261
- {
262
- "id": datasets.Value("string"),
263
- "type": datasets.Value("string"),
264
- "ref_id": datasets.Value("string"),
265
- "resource_name": datasets.Value(
266
- "string"
267
- ), # Name of the resource, e.g. "Wikipedia"
268
- "cuid": datasets.Value(
269
- "string"
270
- ), # ID in the resource, e.g. 534366
271
- "text": datasets.Value(
272
- "string"
273
- ), # Human readable description/name of the entity, e.g. "Barack Obama"
274
- }
275
- ],
276
- ### OPTIONAL: Only included when `parse_notes == True`
277
- "notes": [ # # lines in brat
278
- {
279
- "id": datasets.Value("string"),
280
- "type": datasets.Value("string"),
281
- "ref_id": datasets.Value("string"),
282
- "text": datasets.Value("string"),
283
- }
284
- ],
285
- },
286
- )
287
- """
288
-
289
- example = {}
290
- example["document_id"] = txt_file.with_suffix("").name
291
- with txt_file.open() as f:
292
- example["text"] = f.read()
293
-
294
- # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
295
- # for event extraction
296
- if annotation_file_suffixes is None:
297
- annotation_file_suffixes = [".a1", ".a2", ".ann"]
298
-
299
- if len(annotation_file_suffixes) == 0:
300
- raise AssertionError(
301
- "At least one suffix for the to-be-read annotation files should be given!"
302
- )
303
-
304
- ann_lines = []
305
- for suffix in annotation_file_suffixes:
306
- annotation_file = txt_file.with_suffix(suffix)
307
- if annotation_file.exists():
308
- with annotation_file.open() as f:
309
- ann_lines.extend(f.readlines())
310
-
311
- example["text_bound_annotations"] = []
312
- example["events"] = []
313
- example["relations"] = []
314
- example["equivalences"] = []
315
- example["attributes"] = []
316
- example["normalizations"] = []
317
-
318
- if parse_notes:
319
- example["notes"] = []
320
-
321
- for line in ann_lines:
322
- line = line.strip()
323
- if not line:
324
- continue
325
-
326
- if line.startswith("T"): # Text bound
327
- ann = {}
328
- fields = line.split("\t")
329
-
330
- ann["id"] = fields[0]
331
- ann["type"] = fields[1].split()[0]
332
- ann["offsets"] = []
333
- span_str = remove_prefix(fields[1], (ann["type"] + " "))
334
- text = fields[2]
335
- for span in span_str.split(";"):
336
- start, end = span.split()
337
- ann["offsets"].append([int(start), int(end)])
338
-
339
- # Heuristically split text of discontiguous entities into chunks
340
- ann["text"] = []
341
- if len(ann["offsets"]) > 1:
342
- i = 0
343
- for start, end in ann["offsets"]:
344
- chunk_len = end - start
345
- ann["text"].append(text[i : chunk_len + i])
346
- i += chunk_len
347
- while i < len(text) and text[i] == " ":
348
- i += 1
349
- else:
350
- ann["text"] = [text]
351
-
352
- example["text_bound_annotations"].append(ann)
353
-
354
- elif line.startswith("E"):
355
- ann = {}
356
- fields = line.split("\t")
357
-
358
- ann["id"] = fields[0]
359
-
360
- ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
361
-
362
- ann["arguments"] = []
363
- for role_ref_id in fields[1].split()[1:]:
364
- argument = {
365
- "role": (role_ref_id.split(":"))[0],
366
- "ref_id": (role_ref_id.split(":"))[1],
367
- }
368
- ann["arguments"].append(argument)
369
-
370
- example["events"].append(ann)
371
-
372
- elif line.startswith("R"):
373
- ann = {}
374
- fields = line.split("\t")
375
-
376
- ann["id"] = fields[0]
377
- ann["type"] = fields[1].split()[0]
378
-
379
- ann["head"] = {
380
- "role": fields[1].split()[1].split(":")[0],
381
- "ref_id": fields[1].split()[1].split(":")[1],
382
- }
383
- ann["tail"] = {
384
- "role": fields[1].split()[2].split(":")[0],
385
- "ref_id": fields[1].split()[2].split(":")[1],
386
- }
387
-
388
- example["relations"].append(ann)
389
-
390
- # '*' seems to be the legacy way to mark equivalences,
391
- # but I couldn't find any info on the current way
392
- # this might have to be adapted dependent on the brat version
393
- # of the annotation
394
- elif line.startswith("*"):
395
- ann = {}
396
- fields = line.split("\t")
397
-
398
- ann["id"] = fields[0]
399
- ann["ref_ids"] = fields[1].split()[1:]
400
-
401
- example["equivalences"].append(ann)
402
-
403
- elif line.startswith("A") or line.startswith("M"):
404
- ann = {}
405
- fields = line.split("\t")
406
-
407
- ann["id"] = fields[0]
408
-
409
- info = fields[1].split()
410
- ann["type"] = info[0]
411
- ann["ref_id"] = info[1]
412
-
413
- if len(info) > 2:
414
- ann["value"] = info[2]
415
- else:
416
- ann["value"] = ""
417
-
418
- example["attributes"].append(ann)
419
-
420
- elif line.startswith("N"):
421
- ann = {}
422
- fields = line.split("\t")
423
-
424
- ann["id"] = fields[0]
425
- ann["text"] = fields[2]
426
-
427
- info = fields[1].split()
428
-
429
- ann["type"] = info[0]
430
- ann["ref_id"] = info[1]
431
- ann["resource_name"] = info[2].split(":")[0]
432
- ann["cuid"] = info[2].split(":")[1]
433
- example["normalizations"].append(ann)
434
-
435
- elif parse_notes and line.startswith("#"):
436
- ann = {}
437
- fields = line.split("\t")
438
-
439
- ann["id"] = fields[0]
440
- ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
441
-
442
- info = fields[1].split()
443
-
444
- ann["type"] = info[0]
445
- ann["ref_id"] = info[1]
446
- example["notes"].append(ann)
447
-
448
- return example
449
-
450
-
451
- def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
452
- """
453
- Transform a brat parse (conforming to the standard brat schema) obtained with
454
- `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
455
- :param brat_parse:
456
- """
457
-
458
- unified_example = {}
459
-
460
- # Prefix all ids with document id to ensure global uniqueness,
461
- # because brat ids are only unique within their document
462
- id_prefix = brat_parse["document_id"] + "_"
463
-
464
- # identical
465
- unified_example["document_id"] = brat_parse["document_id"]
466
- unified_example["passages"] = [
467
- {
468
- "id": id_prefix + "_text",
469
- "type": "abstract",
470
- "text": [brat_parse["text"]],
471
- "offsets": [[0, len(brat_parse["text"])]],
472
- }
473
- ]
474
-
475
- # get normalizations
476
- ref_id_to_normalizations = defaultdict(list)
477
- for normalization in brat_parse["normalizations"]:
478
- ref_id_to_normalizations[normalization["ref_id"]].append(
479
- {
480
- "db_name": normalization["resource_name"],
481
- "db_id": normalization["cuid"],
482
- }
483
- )
484
-
485
- # separate entities and event triggers
486
- unified_example["events"] = []
487
- non_event_ann = brat_parse["text_bound_annotations"].copy()
488
- for event in brat_parse["events"]:
489
- event = event.copy()
490
- event["id"] = id_prefix + event["id"]
491
- trigger = next(
492
- tr
493
- for tr in brat_parse["text_bound_annotations"]
494
- if tr["id"] == event["trigger"]
495
- )
496
- if trigger in non_event_ann:
497
- non_event_ann.remove(trigger)
498
- event["trigger"] = {
499
- "text": trigger["text"].copy(),
500
- "offsets": trigger["offsets"].copy(),
501
- }
502
- for argument in event["arguments"]:
503
- argument["ref_id"] = id_prefix + argument["ref_id"]
504
-
505
- unified_example["events"].append(event)
506
-
507
- unified_example["entities"] = []
508
- anno_ids = [ref_id["id"] for ref_id in non_event_ann]
509
- for ann in non_event_ann:
510
- entity_ann = ann.copy()
511
- entity_ann["id"] = id_prefix + entity_ann["id"]
512
- entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
513
- unified_example["entities"].append(entity_ann)
514
-
515
- # massage relations
516
- unified_example["relations"] = []
517
- skipped_relations = set()
518
- for ann in brat_parse["relations"]:
519
- if (
520
- ann["head"]["ref_id"] not in anno_ids
521
- or ann["tail"]["ref_id"] not in anno_ids
522
- ):
523
- skipped_relations.add(ann["id"])
524
- continue
525
- unified_example["relations"].append(
526
- {
527
- "arg1_id": id_prefix + ann["head"]["ref_id"],
528
- "arg2_id": id_prefix + ann["tail"]["ref_id"],
529
- "id": id_prefix + ann["id"],
530
- "type": ann["type"],
531
- "normalized": [],
532
- }
533
- )
534
- if len(skipped_relations) > 0:
535
- example_id = brat_parse["document_id"]
536
- logger.info(
537
- f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
538
- f" Skip (for now): "
539
- f"{list(skipped_relations)}"
540
- )
541
-
542
- # get coreferences
543
- unified_example["coreferences"] = []
544
- for i, ann in enumerate(brat_parse["equivalences"], start=1):
545
- is_entity_cluster = True
546
- for ref_id in ann["ref_ids"]:
547
- if not ref_id.startswith("T"): # not textbound -> no entity
548
- is_entity_cluster = False
549
- elif ref_id not in anno_ids: # event trigger -> no entity
550
- is_entity_cluster = False
551
- if is_entity_cluster:
552
- entity_ids = [id_prefix + i for i in ann["ref_ids"]]
553
- unified_example["coreferences"].append(
554
- {"id": id_prefix + str(i), "entity_ids": entity_ids}
555
- )
556
- return unified_example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
meqsum.py DELETED
@@ -1,161 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """
17
- Dataset for medical question summarization introduced in the ACL 2019 paper "On the Summarization of Consumer Health
18
- Questions". Question understanding is one of the main challenges in question answering. In real world applications,
19
- users often submit natural language questions that are longer than needed and include peripheral information that
20
- increases the complexity of the question, leading to substantially more false positives in answer retrieval. In this
21
- paper, we study neural abstractive models for medical question summarization. We introduce the MeQSum corpus of 1,000
22
- summarized consumer health questions.
23
- """
24
-
25
- import os
26
- from typing import Dict, List, Tuple
27
-
28
- import datasets
29
- import pandas as pd
30
-
31
- from .bigbiohub import text2text_features
32
- from .bigbiohub import BigBioConfig
33
- from .bigbiohub import Tasks
34
-
35
- _LANGUAGES = ['English']
36
- _PUBMED = False
37
- _LOCAL = False
38
- _CITATION = """\
39
- @inproceedings{ben-abacha-demner-fushman-2019-summarization,
40
- title = "On the Summarization of Consumer Health Questions",
41
- author = "Ben Abacha, Asma and
42
- Demner-Fushman, Dina",
43
- booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
44
- month = jul,
45
- year = "2019",
46
- address = "Florence, Italy",
47
- publisher = "Association for Computational Linguistics",
48
- url = "https://aclanthology.org/P19-1215",
49
- doi = "10.18653/v1/P19-1215",
50
- pages = "2228--2234",
51
- abstract = "Question understanding is one of the main challenges in question answering. In real world applications, users often submit natural language questions that are longer than needed and include peripheral information that increases the complexity of the question, leading to substantially more false positives in answer retrieval. In this paper, we study neural abstractive models for medical question summarization. We introduce the MeQSum corpus of 1,000 summarized consumer health questions. We explore data augmentation methods and evaluate state-of-the-art neural abstractive models on this new task. In particular, we show that semantic augmentation from question datasets improves the overall performance, and that pointer-generator networks outperform sequence-to-sequence attentional models on this task, with a ROUGE-1 score of 44.16{\%}. We also present a detailed error analysis and discuss directions for improvement that are specific to question summarization.",
52
- }
53
- """
54
-
55
- _DATASETNAME = "meqsum"
56
- _DISPLAYNAME = "MeQSum"
57
-
58
- _DESCRIPTION = """\
59
- Dataset for medical question summarization introduced in the ACL 2019 paper "On the Summarization of Consumer Health
60
- Questions". Question understanding is one of the main challenges in question answering. In real world applications,
61
- users often submit natural language questions that are longer than needed and include peripheral information that
62
- increases the complexity of the question, leading to substantially more false positives in answer retrieval. In this
63
- paper, we study neural abstractive models for medical question summarization. We introduce the MeQSum corpus of 1,000
64
- summarized consumer health questions.
65
- """
66
-
67
- _HOMEPAGE = "https://github.com/abachaa/MeQSum"
68
-
69
- _LICENSE = 'License information unavailable'
70
-
71
- _URLS = {
72
- _DATASETNAME: "https://github.com/abachaa/MeQSum/raw/master/MeQSum_ACL2019_BenAbacha_Demner-Fushman.xlsx",
73
- }
74
-
75
- _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
76
-
77
- _SOURCE_VERSION = "1.0.0"
78
-
79
- _BIGBIO_VERSION = "1.0.0"
80
-
81
-
82
- class MeQSumDataset(datasets.GeneratorBasedBuilder):
83
- """Dataset containing 1000 summarized consumer health questions."""
84
-
85
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
- BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
87
-
88
- BUILDER_CONFIGS = [
89
- BigBioConfig(
90
- name="meqsum_source",
91
- version=SOURCE_VERSION,
92
- description="MeQSum source schema",
93
- schema="source",
94
- subset_id="meqsum",
95
- ),
96
- BigBioConfig(
97
- name="meqsum_bigbio_t2t",
98
- version=BIGBIO_VERSION,
99
- description="MeQSum BigBio schema",
100
- schema="bigbio_t2t",
101
- subset_id="meqsum",
102
- ),
103
- ]
104
-
105
- DEFAULT_CONFIG_NAME = "meqsum_source"
106
-
107
- def _info(self) -> datasets.DatasetInfo:
108
-
109
- if self.config.schema == "source":
110
- features = datasets.Features(
111
- {
112
- "File": datasets.Value("string"),
113
- "CHQ": datasets.Value("string"),
114
- "Summary": datasets.Value("string"),
115
- }
116
- )
117
- elif self.config.schema == "bigbio_t2t":
118
- features = text2text_features
119
-
120
- return datasets.DatasetInfo(
121
- description=_DESCRIPTION,
122
- features=features,
123
- homepage=_HOMEPAGE,
124
- license=str(_LICENSE),
125
- citation=_CITATION,
126
- )
127
-
128
- def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
129
- """Returns SplitGenerators."""
130
-
131
- urls = _URLS[_DATASETNAME]
132
- file_path = dl_manager.download(urls)
133
-
134
- return [
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TRAIN,
137
- gen_kwargs={
138
- "filepath": os.path.join(file_path),
139
- },
140
- ),
141
- ]
142
-
143
- def _generate_examples(self, filepath) -> Tuple[int, Dict]:
144
- """Yields examples as (key, example) tuples."""
145
-
146
- corpus = pd.read_excel(filepath)
147
-
148
- if self.config.schema == "source":
149
- for idx, example in corpus.iterrows():
150
- yield idx, example.to_dict()
151
-
152
- elif self.config.schema == "bigbio_t2t":
153
- corpus["id"] = corpus.index
154
- corpus.rename(
155
- columns={"File": "document_id", "CHQ": "text_1", "Summary": "text_2"},
156
- inplace=True,
157
- )
158
- corpus["text_1_name"] = ""
159
- corpus["text_2_name"] = ""
160
- for idx, example in corpus.iterrows():
161
- yield example["id"], example.to_dict()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
meqsum_source/meqsum-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b2354877082438411990825008819ec4054208d26dceaa0ed718ef587dbbab
3
+ size 262329