File size: 15,756 Bytes
586e978
bba20ca
 
586e978
 
 
 
 
 
 
 
 
 
 
 
 
bba20ca
586e978
 
 
 
2b5d807
586e978
391d4a9
586e978
 
 
 
bba20ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586e978
 
 
 
bba20ca
 
 
 
586e978
 
bba20ca
586e978
bba20ca
586e978
ee6c327
 
 
586e978
7bf90d1
faa6add
0157c3c
 
ee6c327
 
586e978
 
7bdaba4
 
2b5d807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9ec98e
 
 
5e177db
b9ec98e
 
bba20ca
 
 
1ab1a79
b9ec98e
 
786e261
6bb0ab7
1ab1a79
 
 
b9ec98e
 
 
d98fbce
b9ec98e
ee6c327
 
 
3bd8cee
 
ee6c327
 
 
 
 
 
 
 
 
 
 
80f7843
2b5d807
586e978
 
bba20ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9ec98e
bba20ca
 
 
 
 
 
 
 
 
 
 
 
2b5d807
b9ec98e
bba20ca
 
b9ec98e
2b5d807
f275cf8
ee6c327
586e978
 
f275cf8
 
586e978
 
786e261
2b5d807
 
a40dcc3
cc80665
2b5d807
586e978
 
 
 
 
 
 
f275cf8
586e978
 
f275cf8
c46cab8
586e978
 
 
 
 
 
 
 
 
 
 
48f737e
 
 
 
 
 
 
e7be9f6
daced4d
ee6c327
 
f275cf8
2b5d807
48f737e
3e9ef65
 
 
 
48f737e
3e9ef65
 
 
 
 
 
48f737e
 
 
2b5d807
48f737e
 
 
 
 
 
 
586e978
070b7d8
 
4840694
fdc1a35
4840694
 
 
 
fdc1a35
 
586e978
 
 
fdc1a35
586e978
 
fe67c05
4840694
fdc1a35
 
586e978
465cc03
 
fdc1a35
465cc03
 
 
 
fdc1a35
 
465cc03
 
 
fdc1a35
465cc03
 
 
 
fdc1a35
 
465cc03
fdc1a35
2b5d807
070b7d8
48f737e
 
ee6c327
2b5d807
070b7d8
48f737e
 
070b7d8
 
 
 
 
 
 
3e9ef65
070b7d8
 
 
 
22ad057
 
586e978
 
48f737e
 
 
586e978
 
48f737e
 
586e978
48f737e
391d4a9
ee6c327
48f737e
 
 
2b5d807
3e9ef65
391d4a9
 
e5233d0
48f737e
 
391d4a9
 
 
2b5d807
 
48f737e
391d4a9
2b5d807
391d4a9
 
48f737e
ee6c327
 
 
 
 
e389063
4840694
 
 
48f737e
465cc03
48f737e
 
ee6c327
 
465cc03
 
2b5d807
 
465cc03
 
2b5d807
465cc03
586e978
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and
# the current dataset script contributor (Mathias Creutz).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Data loader for the Opusparcus paraphrase corpus."""

import csv
import json
import os
import re
import datasets
import bz2

# Add BibTeX citation

_CITATION = """\
@InProceedings{creutz:lrec2018,
  title = {Open Subtitles Paraphrase Corpus for Six Languages},
  author={Mathias Creutz},
  booktitle={Proceedings of the 11th edition of the Language Resources
  and Evaluation Conference (LREC 2018)},
  year={2018},
  month = {May 7-12},
  address = {Miyazaki, Japan},
  editor = {Nicoletta Calzolari (Conference chair) and Khalid Choukri
  and Christopher Cieri and Thierry Declerck and Sara Goggi and Koiti
  Hasida and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and
  Hélène Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis
  and Takenobu Tokunaga},
  publisher = {European Language Resources Association (ELRA)},
  isbn = {979-10-95546-00-9},
  language = {english},
  url={http://www.lrec-conf.org/proceedings/lrec2018/pdf/131.pdf}
}
"""

_DESCRIPTION = """\
Opusparcus is a paraphrase corpus for six European languages: German,
English, Finnish, French, Russian, and Swedish. The paraphrases are
extracted from the OpenSubtitles2016 corpus, which contains subtitles
from movies and TV shows.
"""

_HOMEPAGE = "http://urn.fi/urn:nbn:fi:lb-2018021221"

_LICENSE = "CC-BY-NC"

# The HuggingFace dataset library doesn't host the datasets but only
# points to the original files. This can be an arbitrary nested
# dict/list of URLs (see below in `_split_generators` method):
_URLs = {
    "validation": "validation.jsonl",
    "test": "test.jsonl",
    "validation.full": "validation.jsonl",
    "test.full": "test.jsonl",
    # NB: the "train" split file is defined dynamically inside the
    # `_split_generators` method
}

_VERSION = datasets.Version("1.0.0", "")


def detokenize(text):
    """
    Untokenizing a text undoes the tokenizing operation, restoring
    punctuation and spaces to the places that people expect them to be.
    Ideally, `untokenize(tokenize(text))` should be identical to `text`,
    except for line breaks.
    """
    step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .',  '...')
    step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
    step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
    step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3)
    step5 = step4.replace(" '", "'").replace(" n't", "n't").replace(
         "can not", "cannot").replace(" 've", "'ve")
    step6 = step5.replace(" ` ", " '")
    return step6.strip()


class OpusparcusConfig(datasets.BuilderConfig):
    """BuilderConfig for Opusparcus."""

    def __init__(self, lang=None, quality=100, **kwargs):
        """BuilderConfig for Wikipedia.
        Args:
          lang: string, two letter language code:
                de, en, fi, fr, ru, sv
          quality: int, filter training set according to quality:
                   [ 60, 65, 70, 75, 80, 85, 90, 95, 100 ]
          **kwargs: keyword arguments forwarded to super.
        """
        super(OpusparcusConfig, self).__init__(
            name="{0}.{1}".format(lang, quality),
            description=\
              "Opusparcus datasets for '{:s}', training set quality: {:d}"\
              .format(lang, quality),
            **kwargs,
        )
        self.lang = lang
        self.quality = quality


# Languages in Opusparcus: German (de), English (en), Finnish (fi),
# French (fr), Russian (ru), Swedish (sv):
LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ]

# The training sets (train splits) come in eight sizes (95 .. 60),
# where the number indicates the estimated proportion [%] of true
# paraphrases in the set. The higher the number the smaller (but
# ideally cleaner) the set. The lower the number, the larger (but
# noisier) the set is. The smaller sets are included as subsets of
# larger sets. The special value 100 matches no training data at all,
# so if you are only interested in validation and test sets, you can
# use the value 100 in order to save time and space. (The quality
# value is irrelevant for the validation and test sets, which have
# been annotated manually, and each example has an annotation score
# attached to it.)
QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ]

class Opusparcus(datasets.GeneratorBasedBuilder):

    """Opusparcus is a paraphrase corpus for six European languages:
    German, English, Finnish, French, Russian, and Swedish. The
    paraphrases are extracted from the OpenSubtitles2016 corpus, which
    contains subtitles from movies and TV shows.

    The data in Opusparcus has been extracted from OpenSubtitles2016
    (http://opus.nlpl.eu/OpenSubtitles2016.php), which is in turn
    based on data from http://www.opensubtitles.org/.

    For each target language, the Opusparcus data have been
    partitioned into three types of data sets: training, validation
    and test sets. The training sets are large, consisting of millions
    of sentence pairs, and have been compiled automatically, with the
    help of probabilistic ranking functions. The development and test
    sets consist of sentence pairs that have been annotated manually;
    each set contains approximately 1000 sentence pairs that have been
    verified to be acceptable paraphrases by two indepedent
    annotators.
    """

    # This is a dataset with multiple configurations.
    BUILDER_CONFIG_CLASS = OpusparcusConfig

    # You can load configurations as follows:
    # data = datasets.load_dataset('GEM/opusparcus', lang='de')
    # data = datasets.load_dataset('GEM/opusparcus', lang='fr', quality='75')
    # etc.
    #
    # The language parameter is compulsory, whereas the quality
    # parameter is not (the default value being 100).
    #
    # The above commands can alternatively be expressed as:
    # data = datasets.load_dataset('GEM/opusparcus', 'de.100')
    # data = datasets.load_dataset('GEM/opusparcus', 'fr.75')

    BUILDER_CONFIGS = [
        OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) \
        for lang in LANGS for quality in QUALITIES
    ]

    # There is no default configuration. User always needs to specify one:
    # DEFAULT_CONFIG_NAME = None

    def _info(self):
        # This method specifies the datasets.DatasetInfo object which
        # contains informations and typings for the dataset
        features = datasets.Features(
            {
                "lang": datasets.Value("string"),
                "input": datasets.Value("string"),
                "target": datasets.Value("string"),
                "annot_score": datasets.Value("float"),
                "gem_id": datasets.Value("string"),
                "references": [datasets.Value("string")]
            }
        )

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset:
            supervised_keys=("sent1", "sent2"), # is this correct?
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,

            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # This method is tasked with downloading/extracting the data
        # and defining the splits depending on the configuration.
        # Several configurations are possible (listed in
        # BUILDER_CONFIGS), and the configuration selected by the user
        # is in self.config.name, which consists of two fields
        # separated by a period, containing the values of
        # self.config.lang and self.config.quality.

        if self.config.lang is None:
            # This is an error: nothing to do here if no language
            # has been defined:
            return []

        # Select which file of the training data contains the matching data:
        if self.config.quality < 70:
            # We need to retrieve the largest training set file
            # containing the full training set for the desired language
            _URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang)

        elif self.config.quality <= 95:
            # We can do with a smaller version of the training set
            # for the desired language
            _URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang)

        # Otherwise, if the desired quality is above 95, we do not
        # download any training data, because there is no matching data.
        # The validation and test sets are so small that we do not perform
        # any filtering or optimization at this stage.

        # dl_manager is a datasets.download.DownloadManager, which
        # downloads and extracts the URLs
        # (It can accept any type or nested list/dict and will give
        # back the same structure with the url replaced with path to
        # local files.  By default the archives will be extracted and
        # a path to a cached folder where they are extracted is
        # returned instead of the archive.)
        data_dir = dl_manager.download_and_extract(_URLs)

        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "lang": self.config.lang,
                    "quality": 100,
                    "filepath": data_dir["test"],
                    "split": "test"
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "lang": self.config.lang,
                    "quality": 100,
                    "filepath": data_dir["validation"],
                    "split": "validation",
                },
            ),
            datasets.SplitGenerator(
                name="test.full",
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "lang": self.config.lang,
                    "quality": 100,
                    "filepath": data_dir["test.full"],
                    "split": "test.full"
                },
            ),
            datasets.SplitGenerator(
                name="validation.full",
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "lang": self.config.lang,
                    "quality": 100,
                    "filepath": data_dir["validation.full"],
                    "split": "validation.full",
                },
            ),
        ]

        # If the desired quality value is 100, no subset of the
        # training set is good enough, and we only produce validation
        # and test sets, in order to save space and time:

        if self.config.quality <= 95:
            # In this case there is matching training data, so we produce
            # a train split.
            splits.append(
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    # These kwargs will be passed to _generate_examples
                    gen_kwargs={
                        "lang": self.config.lang,
                        "quality": self.config.quality,
                        "filepath": data_dir["train"],
                        "split": "train",
                    },
                )
            )

        return splits

    def _generate_examples(
            self, lang, quality, filepath, split
            # method parameters are unpacked from `gen_kwargs` as given in
            # `_split_generators`
    ):
        """ Yields examples as (key, example) tuples. """
        # This method handles input defined in _split_generators to
        # yield (key, example) tuples from the dataset.
        # The `key` is here for legacy reason (tfds) and is not important in itself.

        if split == datasets.Split.TRAIN:
            # Training sets are in jsonl files that have been compressed using bzip2.
            # They contain a field "quality" missing from the validation and test sets.
            # We also know that this file only contains the desired language,
            # because for the training sets the languages are in separate
            # files, and only the desired language has been downloaded.
            with bz2.open(filepath, "rt", encoding="utf-8") as f:
                for id_, row in enumerate(f):
                    data = json.loads(row)
                    if data["quality"] < quality:
                        # The rest of this file contains too low quality data,
                        # because the data is sorted best first
                        break
                    yield id_, {
                        "lang": data["lang"],
                        "input": detokenize(data["sent1"]),
                        "target": detokenize(data["sent2"]),
                        "annot_score": 0.0,   # means there is no annotation
                        "gem_id": data["gem_id"],
                        "references": [detokenize(data["sent2"])]
                    }
        else:
            # The validation and test sets are in jsonl files.
            # They contain the fields "lang" and "annot_score" that we
            # filter on.  If we ask for the full sets, we will keep
            # all data entries for the desired language, also the
            # sentence pairs that were not considered paraphrases by
            # the annotators:
            keep_all = (split == "validation.full" or split == "test.full")
            with open(filepath, encoding="utf-8") as f:
                for id_, row in enumerate(f):
                    data = json.loads(row)
                    if data["lang"] == lang: # only keep desired language
                        if keep_all or data["annot_score"] >= 3.0:
                            # for full sets keep all;
                            # for standard test and validation sets, keep only
                            # the actual paraphrases (annot_score >= 3.0 means
                            # "good or mostly good example of paraphrases")
                            yield id_, {
                                "lang": data["lang"],
                                "input": detokenize(data["sent1"]),
                                "target": detokenize(data["sent2"]),
                                "annot_score": data["annot_score"],
                                "gem_id": data["gem_id"],
                                "references": [detokenize(data["sent2"])]
                            }