Datasets:

Languages:
English
Multilinguality:
monolingual
ArXiv:
Tags:
License:
gabrielaltay commited on
Commit
1b0f43f
1 Parent(s): 9839018

upload hubscripts/multi_xscience_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. multi_xscience.py +205 -0
multi_xscience.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from typing import List
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import text2text_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _LANGUAGES = ['English']
27
+ _PUBMED = False
28
+ _LOCAL = False
29
+ _CITATION = """\
30
+ @misc{https://doi.org/10.48550/arxiv.2010.14235,
31
+ doi = {10.48550/ARXIV.2010.14235},
32
+
33
+ url = {https://arxiv.org/abs/2010.14235},
34
+
35
+ author = {Lu, Yao and Dong, Yue and Charlin, Laurent},
36
+
37
+ keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
38
+
39
+ title = {Multi-XScience: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles},
40
+
41
+ publisher = {arXiv},
42
+
43
+ year = {2020},
44
+
45
+ copyright = {arXiv.org perpetual, non-exclusive license}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "multi_xscience"
50
+ _DISPLAYNAME = "Multi-XScience"
51
+
52
+ _DESCRIPTION = """\
53
+ Multi-document summarization is a challenging task for which there exists little large-scale datasets.
54
+ We propose Multi-XScience, a large-scale multi-document summarization dataset created from scientific articles.
55
+ Multi-XScience introduces a challenging multi-document summarization task: writing the related-work section
56
+ of a paper based on its abstract and the articles it references. Our work is inspired by extreme summarization,
57
+ a dataset construction protocol that favours abstractive modeling approaches. Descriptive statistics and
58
+ empirical results---using several state-of-the-art models trained on the Multi-XScience dataset---reveal t
59
+ hat Multi-XScience is well suited for abstractive models.
60
+ """
61
+
62
+ _HOMEPAGE = "https://github.com/yaolu/Multi-XScience"
63
+
64
+ _LICENSE = 'MIT License'
65
+
66
+ _URLS = {
67
+ _DATASETNAME: [
68
+ "https://github.com/yaolu/Multi-XScience/blob/master/data/train.json.gz?raw=true",
69
+ "https://github.com/yaolu/Multi-XScience/blob/master/data/test.json.gz?raw=true",
70
+ "https://github.com/yaolu/Multi-XScience/blob/master/data/val.json.gz?raw=true",
71
+ ],
72
+ }
73
+
74
+ _SUPPORTED_TASKS = [Tasks.PARAPHRASING, Tasks.SUMMARIZATION]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+
78
+ _BIGBIO_VERSION = "1.0.0"
79
+
80
+
81
+ class MultiXScience(datasets.GeneratorBasedBuilder):
82
+ """
83
+ Dataset for the EMNLP 2020 paper, Multi-XScience:
84
+ A Large-scale Dataset for Extreme Multi-document Summarization
85
+ of Scientific Articles.
86
+ """
87
+
88
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
89
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
90
+
91
+ BUILDER_CONFIGS = [
92
+ BigBioConfig(
93
+ name="multi_xscience_source",
94
+ version=SOURCE_VERSION,
95
+ description="multi_xscience source schema",
96
+ schema="source",
97
+ subset_id="multi_xscience",
98
+ ),
99
+ BigBioConfig(
100
+ name="multi_xscience_bigbio_t2t",
101
+ version=BIGBIO_VERSION,
102
+ description="multi_xscienceBigBio schema",
103
+ schema="bigbio_t2t",
104
+ subset_id="multi_xscience",
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = "multi_xscience_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "aid": datasets.Value("string"),
116
+ "mid": datasets.Value("string"),
117
+ "abstract": datasets.Value("string"),
118
+ "ref_abstract": datasets.Sequence(
119
+ {
120
+ "mid": datasets.Value("string"),
121
+ "abstract": datasets.Value("string"),
122
+ }
123
+ ),
124
+ }
125
+ )
126
+ elif self.config.schema == "bigbio_t2t":
127
+ features = text2text_features
128
+
129
+ return datasets.DatasetInfo(
130
+ description=_DESCRIPTION,
131
+ features=features,
132
+ homepage=_HOMEPAGE,
133
+ license=str(_LICENSE),
134
+ citation=_CITATION,
135
+ )
136
+
137
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
138
+
139
+ urls = _URLS[_DATASETNAME]
140
+ data_dir = dl_manager.download_and_extract(urls)
141
+
142
+ return [
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TRAIN,
145
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
146
+ gen_kwargs={
147
+ "filepath": os.path.join(data_dir[0]).replace("\\", "/"),
148
+ "split": "train",
149
+ },
150
+ ),
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TEST,
153
+ gen_kwargs={
154
+ "filepath": os.path.join(data_dir[1]).replace("\\", "/"),
155
+ "split": "test",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.VALIDATION,
160
+ gen_kwargs={
161
+ "filepath": os.path.join(data_dir[2]).replace("\\", "/"),
162
+ "split": "val",
163
+ },
164
+ ),
165
+ ]
166
+
167
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
168
+
169
+ def _generate_examples(self, filepath, split):
170
+ j_file = open(filepath, "r")
171
+ j_file.seek(0)
172
+ j_json = json.load(j_file)
173
+
174
+ if self.config.schema == "source":
175
+ for key, example in enumerate(j_json):
176
+ yield key, {
177
+ "aid": example["aid"],
178
+ "mid": example["mid"],
179
+ "abstract": example["abstract"],
180
+ "ref_abstract": [
181
+ {
182
+ "mid": example["ref_abstract"][key]["mid"],
183
+ "abstract": example["ref_abstract"][key]["abstract"],
184
+ }
185
+ for key in example["ref_abstract"].keys()
186
+ ],
187
+ }
188
+
189
+ elif self.config.schema == "bigbio_t2t":
190
+ uid = 0
191
+
192
+ for key, example in enumerate(j_json):
193
+ uid += 1
194
+ yield key, {
195
+ "id": str(uid),
196
+ "document_id": str(key),
197
+ "text_1": example["abstract"],
198
+ "text_2": " ".join(
199
+ [e["abstract"] for e in example["ref_abstract"].values()]
200
+ ),
201
+ "text_1_name": "Abstract of query paper",
202
+ "text_2_name": "Cite abstracts",
203
+ }
204
+
205
+ j_file.close()