Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
lovodkin93 commited on
Commit
d533c09
1 Parent(s): 7e98ad1

update the dataset loading script

Browse files
Files changed (1) hide show
  1. controlled_text_reduction.py +204 -0
controlled_text_reduction.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A Dataset loading script for the Controlled Text Reduction dataset."""
16
+
17
+
18
+ import datasets
19
+ from dataclasses import dataclass
20
+ from pathlib import Path
21
+ from typing import List, Tuple
22
+ import pandas as pd
23
+ import json
24
+ import gzip
25
+ import itertools
26
+
27
+
28
+ _CITATION = """"""
29
+ # _CITATION = """\
30
+ # @inproceedings{roit2020controlled,
31
+ # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
32
+ # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
33
+ # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
34
+ # pages={7008--7013},
35
+ # year={2020}
36
+ # }
37
+ # """
38
+
39
+
40
+ _DESCRIPTION = """\
41
+ The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary.
42
+ The evaluation and test datasets were constructed via controlled crowdsourcing.
43
+ The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021).
44
+ """
45
+
46
+ _HOMEPAGE = "https://huggingface.co/datasets/lovodkin93/Controlled-Text-Reduction-dataset"
47
+
48
+ _LICENSE = """MIT License
49
+ Copyright (c) 2022 lovodkin93
50
+ Permission is hereby granted, free of charge, to any person obtaining a copy
51
+ of this software and associated documentation files (the "Software"), to deal
52
+ in the Software without restriction, including without limitation the rights
53
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
54
+ copies of the Software, and to permit persons to whom the Software is
55
+ furnished to do so, subject to the following conditions:
56
+ The above copyright notice and this permission notice shall be included in all
57
+ copies or substantial portions of the Software.
58
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
61
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
64
+ SOFTWARE."""
65
+
66
+
67
+ # _URLs = {
68
+ # "csv": {
69
+ # "sentences": {
70
+ # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
71
+ # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
72
+ # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
73
+ # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
74
+ # },
75
+ # "qasrl-annotations": {
76
+ # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
77
+ # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
78
+ # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
79
+ # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
80
+ # },
81
+ # },
82
+ # "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
83
+ # }
84
+
85
+ _URLs = {
86
+ "DUC-2001-2002": {
87
+ "dev": "https://huggingface.co/datasets/lovodkin93/Controlled-Text-Reduction-dataset/blob/main/data/dev_DUC-2001-2002.csv",
88
+ "test": "https://huggingface.co/datasets/lovodkin93/Controlled-Text-Reduction-dataset/blob/main/data/test_DUC-2001-2002.csv",
89
+ "train": "https://huggingface.co/datasets/lovodkin93/Controlled-Text-Reduction-dataset/blob/main/data/train_DUC-2001-2002.csv"
90
+ },
91
+ "CNN-DM": {
92
+ "train": "https://huggingface.co/datasets/lovodkin93/Controlled-Text-Reduction-dataset/blob/main/data/train_CNNDM.csv"
93
+ },
94
+ }
95
+
96
+
97
+
98
+
99
+
100
+
101
+ class ControlledTextReduction(datasets.GeneratorBasedBuilder):
102
+ """Controlled Text Reduction: dataset for the Controlled Text Reduction task ().
103
+ Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary"""
104
+
105
+
106
+ VERSION = datasets.Version("1.0.0")
107
+
108
+ BUILDER_CONFIGS = [
109
+ datasets.BuilderConfig(
110
+ name="DUC-2001-2002",
111
+ version=VERSION,
112
+ description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark"
113
+ ),
114
+ datasets.BuilderConfig(
115
+ name="CNN-DM",
116
+ version=VERSION,
117
+ description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)"
118
+ )
119
+ ]
120
+
121
+ DEFAULT_CONFIG_NAME = (
122
+ "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
123
+ )
124
+
125
+ def _info(self):
126
+ features = datasets.Features(
127
+ {
128
+ "doc_text": datasets.Value("string"),
129
+ "summary_text": datasets.Value("string"),
130
+ "highlight_spans": datasets.Value("string")
131
+ }
132
+ )
133
+ return datasets.DatasetInfo(
134
+ # This is the description that will appear on the datasets page.
135
+ description=_DESCRIPTION,
136
+ # This defines the different columns of the dataset and their types
137
+ features=features, # Here we define them above because they are different between the two configurations
138
+ # If there's a common (input, target) tuple from the features,
139
+ # specify them here. They'll be used if as_supervised=True in
140
+ # builder.as_dataset.
141
+ supervised_keys=None,
142
+ # Homepage of the dataset for documentation
143
+ homepage=_HOMEPAGE,
144
+ # License for the dataset if available
145
+ license=_LICENSE,
146
+ # Citation for the dataset
147
+ citation=_CITATION,
148
+ )
149
+
150
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
151
+ """Returns SplitGenerators."""
152
+
153
+ URLs = _URLs[self.config.name]
154
+ # Download and prepare all files - keep same structure as URLs
155
+ corpora = {section: Path(dl_manager.download_and_extract(URLs[section]))
156
+ for section in URLs}
157
+
158
+ if self.config.name=="CNN-DM":
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ # These kwargs will be passed to _generate_examples
163
+ gen_kwargs={
164
+ "filepath": corpora["train"]
165
+ },
166
+ ),
167
+ ]
168
+
169
+ else:
170
+ return [
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TRAIN,
173
+ # These kwargs will be passed to _generate_examples
174
+ gen_kwargs={
175
+ "filepath": corpora["train"]
176
+ },
177
+ ),
178
+ datasets.SplitGenerator(
179
+ name=datasets.Split.VALIDATION,
180
+ # These kwargs will be passed to _generate_examples
181
+ gen_kwargs={
182
+ "filepath": corpora["dev"]
183
+ },
184
+ ),
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TEST,
187
+ # These kwargs will be passed to _generate_examples
188
+ gen_kwargs={
189
+ "filepath": corpora["test"]
190
+ },
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepath: List[str]):
195
+
196
+ """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
197
+
198
+ # merge annotations from sections
199
+ df = pd.read_csv(filepath, index_col=False)
200
+ for counter, dic in enumerate(df.to_dict('records')):
201
+ columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
202
+ for key in columns_to_load_into_object:
203
+ dic[key] = eval(dic[key])
204
+ yield counter, dic