|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Data loader for the Opusparcus paraphrase corpus.""" |
|
|
|
import csv |
|
import json |
|
import os |
|
import re |
|
import datasets |
|
import bz2 |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{creutz:lrec2018, |
|
title = {Open Subtitles Paraphrase Corpus for Six Languages}, |
|
author={Mathias Creutz}, |
|
booktitle={Proceedings of the 11th edition of the Language Resources |
|
and Evaluation Conference (LREC 2018)}, |
|
year={2018}, |
|
month = {May 7-12}, |
|
address = {Miyazaki, Japan}, |
|
editor = {Nicoletta Calzolari (Conference chair) and Khalid Choukri |
|
and Christopher Cieri and Thierry Declerck and Sara Goggi and Koiti |
|
Hasida and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and |
|
Hélène Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis |
|
and Takenobu Tokunaga}, |
|
publisher = {European Language Resources Association (ELRA)}, |
|
isbn = {979-10-95546-00-9}, |
|
language = {english}, |
|
url={http://www.lrec-conf.org/proceedings/lrec2018/pdf/131.pdf} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Opusparcus is a paraphrase corpus for six European languages: German, |
|
English, Finnish, French, Russian, and Swedish. The paraphrases are |
|
extracted from the OpenSubtitles2016 corpus, which contains subtitles |
|
from movies and TV shows. |
|
""" |
|
|
|
_HOMEPAGE = "http://urn.fi/urn:nbn:fi:lb-2018021221" |
|
|
|
_LICENSE = "CC-BY-NC" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"validation": "validation.jsonl", |
|
"test": "test.jsonl", |
|
"validation.full": "validation.jsonl", |
|
"test.full": "test.jsonl", |
|
|
|
|
|
} |
|
|
|
_VERSION = datasets.Version("1.0.0", "") |
|
|
|
|
|
def detokenize(text): |
|
""" |
|
Untokenizing a text undoes the tokenizing operation, restoring |
|
punctuation and spaces to the places that people expect them to be. |
|
Ideally, `untokenize(tokenize(text))` should be identical to `text`, |
|
except for line breaks. |
|
""" |
|
step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') |
|
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") |
|
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) |
|
step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) |
|
step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( |
|
"can not", "cannot").replace(" 've", "'ve") |
|
step6 = step5.replace(" ` ", " '") |
|
return step6.strip() |
|
|
|
|
|
class OpusparcusConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Opusparcus.""" |
|
|
|
def __init__(self, lang=None, quality=100, **kwargs): |
|
"""BuilderConfig for Wikipedia. |
|
Args: |
|
lang: string, two letter language code: |
|
de, en, fi, fr, ru, sv |
|
quality: int, filter training set according to quality: |
|
[ 60, 65, 70, 75, 80, 85, 90, 95, 100 ] |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(OpusparcusConfig, self).__init__( |
|
name="{0}.{1}".format(lang, quality), |
|
description=\ |
|
"Opusparcus datasets for '{:s}', training set quality: {:d}"\ |
|
.format(lang, quality), |
|
**kwargs, |
|
) |
|
self.lang = lang |
|
self.quality = quality |
|
|
|
|
|
|
|
|
|
LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ] |
|
|
|
class Opusparcus(datasets.GeneratorBasedBuilder): |
|
|
|
"""Opusparcus is a paraphrase corpus for six European languages: |
|
German, English, Finnish, French, Russian, and Swedish. The |
|
paraphrases are extracted from the OpenSubtitles2016 corpus, which |
|
contains subtitles from movies and TV shows. |
|
|
|
The data in Opusparcus has been extracted from OpenSubtitles2016 |
|
(http://opus.nlpl.eu/OpenSubtitles2016.php), which is in turn |
|
based on data from http://www.opensubtitles.org/. |
|
|
|
For each target language, the Opusparcus data have been |
|
partitioned into three types of data sets: training, validation |
|
and test sets. The training sets are large, consisting of millions |
|
of sentence pairs, and have been compiled automatically, with the |
|
help of probabilistic ranking functions. The development and test |
|
sets consist of sentence pairs that have been annotated manually; |
|
each set contains approximately 1000 sentence pairs that have been |
|
verified to be acceptable paraphrases by two indepedent |
|
annotators. |
|
""" |
|
|
|
|
|
BUILDER_CONFIG_CLASS = OpusparcusConfig |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) \ |
|
for lang in LANGS for quality in QUALITIES |
|
] |
|
|
|
|
|
|
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"lang": datasets.Value("string"), |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"annot_score": datasets.Value("float"), |
|
"gem_id": datasets.Value("string"), |
|
"references": [datasets.Value("string")] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=("sent1", "sent2"), |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.lang is None: |
|
|
|
|
|
return [] |
|
|
|
|
|
if self.config.quality < 70: |
|
|
|
|
|
_URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang) |
|
|
|
elif self.config.quality <= 95: |
|
|
|
|
|
_URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URLs) |
|
|
|
splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"lang": self.config.lang, |
|
"quality": 100, |
|
"filepath": data_dir["test"], |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"lang": self.config.lang, |
|
"quality": 100, |
|
"filepath": data_dir["validation"], |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test.full", |
|
|
|
gen_kwargs={ |
|
"lang": self.config.lang, |
|
"quality": 100, |
|
"filepath": data_dir["test.full"], |
|
"split": "test.full" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="validation.full", |
|
|
|
gen_kwargs={ |
|
"lang": self.config.lang, |
|
"quality": 100, |
|
"filepath": data_dir["validation.full"], |
|
"split": "validation.full", |
|
}, |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
|
|
if self.config.quality <= 95: |
|
|
|
|
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"lang": self.config.lang, |
|
"quality": self.config.quality, |
|
"filepath": data_dir["train"], |
|
"split": "train", |
|
}, |
|
) |
|
) |
|
|
|
return splits |
|
|
|
def _generate_examples( |
|
self, lang, quality, filepath, split |
|
|
|
|
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
|
|
if split == datasets.Split.TRAIN: |
|
|
|
|
|
|
|
|
|
|
|
with bz2.open(filepath, "rt", encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
if data["quality"] < quality: |
|
|
|
|
|
break |
|
yield id_, { |
|
"lang": data["lang"], |
|
"input": detokenize(data["sent1"]), |
|
"target": detokenize(data["sent2"]), |
|
"annot_score": 0.0, |
|
"gem_id": data["gem_id"], |
|
"references": [detokenize(data["sent2"])] |
|
} |
|
else: |
|
|
|
|
|
|
|
|
|
|
|
|
|
keep_all = (split == "validation.full" or split == "test.full") |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
if data["lang"] == lang: |
|
if keep_all or data["annot_score"] >= 3.0: |
|
|
|
|
|
|
|
|
|
yield id_, { |
|
"lang": data["lang"], |
|
"input": detokenize(data["sent1"]), |
|
"target": detokenize(data["sent2"]), |
|
"annot_score": data["annot_score"], |
|
"gem_id": data["gem_id"], |
|
"references": [detokenize(data["sent2"])] |
|
} |
|
|
|
|