wiki_lingua / wiki_lingua.py
j-chim's picture
Update cleaned wiki_lingua data for v2
35a742a unverified
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
import os
import glob
import pickle
import datasets
_CITATION = """\
@article{ladhak-wiki-2020,
title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
journal = {arXiv preprint arXiv:2010.03093},
year = {2020},
url = {https://arxiv.org/abs/2010.03093}
}
"""
_DESCRIPTION = """\
WikiLingua is a large-scale multilingual dataset for the evaluation of
crosslingual abstractive summarization systems. The dataset includes ~770k
article and summary pairs in 18 languages from WikiHow. The gold-standard
article-summary alignments across languages was done by aligning the images
that are used to describe each how-to step in an article.
"""
_HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
_LICENSE = "CC BY-NC-SA 3.0"
_URL = "wikilingua_cleaned.tar.gz"
VERSION = datasets.Version("2.0.0")
valid_language_codes = {
"ar",
"cs",
"de",
"en",
"es",
"fr",
"hi",
"id",
"it",
"ja",
"ko",
"nl",
"pt",
"ru",
"th",
"tr",
"vi",
"zh",
}
valid_config_names = (
# multilingual
list(valid_language_codes)
+ [
# crosslingual / bridge
f"{src}_{tgt}"
for src in valid_language_codes
for tgt in valid_language_codes
if src != tgt
]
# load all multilingual / all crosslingual
+ ["multilingual", "crosslingual"]
)
class WikilinguaModes:
MULTILINGUAL = "multilingual" # L -> L
CROSSLINGUAL = "crosslingual" # L1 -> L1, L2 -> L2, L1 -> L2, L2 -> L1
BRIDGE = "bridge" # L -> en, en -> L, L -> L
class WikilinguaConfig(datasets.BuilderConfig):
"""BuilderConfig for WikiLingua."""
def __init__(self, name, **kwargs):
"""
Args:
name (string): configuration name that indicates task setup and languages.
1. multilingual - <lang>
2. crosslingual - <lang1>_<lang2>
3. english as bridge - en_<lang>
4. load all multilingual - multilingual
5. load all crosslingual - crosslingual
lang refers to the respective two-letter language code.
note that the order of lang1/lang2 does not matter;
for language pair (L1, L2), we load L1 <-> L2 and L1 -> L1, L2 -> L2.
"""
if name not in valid_config_names:
raise ValueError(
f"Expected config name to be one of: {', '.join(valid_config_names)}"
)
eles = name.split("_")
if name in (WikilinguaModes.MULTILINGUAL, WikilinguaModes.CROSSLINGUAL):
self.mode = name
self.source_lang = None
self.target_lang = None
description = f"Wikilingua summarization data ({self.mode}; all instances)"
else:
if len(eles) == 1:
mode = WikilinguaModes.MULTILINGUAL
source_lang, target_lang = name, name
elif len(eles) == 2:
source_lang, target_lang = eles
if source_lang == "en" or target_lang == "en":
mode = WikilinguaModes.BRIDGE
else:
mode = WikilinguaModes.CROSSLINGUAL
self.source_lang = source_lang
self.target_lang = target_lang
self.mode = mode
description = (
f"Wikilingua summarisation data ({mode}; {source_lang}, {target_lang})"
)
self.languages = set([self.source_lang, self.target_lang])
super().__init__(
name=name,
description=description,
**kwargs,
)
class WikiLingua(datasets.GeneratorBasedBuilder):
"""WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
BUILDER_CONFIG_CLASS = WikilinguaConfig
BUILDER_CONFIGS = [
WikilinguaConfig(
name=config_name,
version=VERSION,
)
for config_name in valid_config_names
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"source_language": datasets.Value("string"),
"target_language": datasets.Value("string"),
"source": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "cleaned")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": glob.glob(
os.path.join(data_dir, f"wikilingua_*.train.pk")
)
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": glob.glob(
os.path.join(data_dir, f"wikilingua_*lingual.val.pk")
)
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": glob.glob(
os.path.join(data_dir, f"wikilingua_*lingual.test.pk")
)
},
),
datasets.SplitGenerator(
name=f"sampled_{datasets.Split.VALIDATION}",
gen_kwargs={
"filepaths": glob.glob(
os.path.join(data_dir, f"wikilingua_*_sampled.val.pk")
)
},
),
datasets.SplitGenerator(
name=f"sampled_{datasets.Split.TEST}",
gen_kwargs={
"filepaths": glob.glob(
os.path.join(data_dir, f"wikilingua_*_sampled.test.pk")
)
},
),
]
def _generate_examples(self, filepaths):
"""Yields examples."""
for filepath in filepaths:
if (
self.config.name == WikilinguaModes.MULTILINGUAL
and WikilinguaModes.CROSSLINGUAL in filepath
) or (
self.config.name == WikilinguaModes.CROSSLINGUAL
and WikilinguaModes.MULTILINGUAL in filepath
):
yield from []
else:
with open(filepath, "rb") as f:
data = pickle.load(f)
for d in data:
idx = d["id"].replace(".", "-")
src = d["document"].strip()
tgt = d["summary"].strip()
src_lang = d["source"]
tgt_lang = d["target"]
# if loading specific language pair, filter for those
if any(self.config.languages):
if not (
src_lang in self.config.languages
and tgt_lang in self.config.languages
):
continue
# in bridge, we are inerested in L <-> en and L -> L, but not en -> en
if self.config.mode == WikilinguaModes.BRIDGE:
if src_lang == "en" and tgt_lang == "en":
continue
yield idx, {
"gem_id": idx,
"gem_parent_id": idx,
"source_language": src_lang,
"target_language": tgt_lang,
"source": src,
"target": tgt,
"references": [tgt],
}