|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""WikiLingua: A benchmark dataset for multilingual abstractive summarization.""" |
|
|
|
import os |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{ladhak-wiki-2020, |
|
title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization}, |
|
authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown}, |
|
journal = {arXiv preprint arXiv:2010.03093}, |
|
year = {2020}, |
|
url = {https://arxiv.org/abs/2010.03093} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
WikiLingua is a large-scale multilingual dataset for the evaluation of |
|
crosslingual abstractive summarization systems. The dataset includes ~770k |
|
article and summary pairs in 18 languages from WikiHow. The gold-standard |
|
article-summary alignments across languages was done by aligning the images |
|
that are used to describe each how-to step in an article. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/esdurmus/Wikilingua" |
|
|
|
_LICENSE = "CC BY-NC-SA 3.0" |
|
|
|
|
|
_URLs = { |
|
"wiki_lingua_es_en_v0": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip", |
|
}, |
|
"wiki_lingua_ru_en_v0": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip", |
|
}, |
|
"wiki_lingua_tr_en_v0": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip", |
|
}, |
|
"wiki_lingua_vi_en_v0": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip", |
|
}, |
|
"wiki_lingua_arabic_ar": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/arabic.zip", |
|
}, |
|
"wiki_lingua_chinese_zh": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/chinese.zip", |
|
}, |
|
"wiki_lingua_czech_cs": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/czech.zip", |
|
}, |
|
"wiki_lingua_dutch_nl": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/dutch.zip", |
|
}, |
|
"wiki_lingua_english_en": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/english.zip", |
|
}, |
|
"wiki_lingua_french_fr": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/french.zip", |
|
}, |
|
"wiki_lingua_german_de": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/german.zip", |
|
}, |
|
"wiki_lingua_hindi_hi": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/hindi.zip", |
|
}, |
|
"wiki_lingua_indonesian_id": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/indonesian.zip", |
|
}, |
|
"wiki_lingua_italian_it": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/italian.zip", |
|
}, |
|
"wiki_lingua_japanese_ja": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/japanese.zip", |
|
}, |
|
"wiki_lingua_korean_ko": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/korean.zip", |
|
}, |
|
"wiki_lingua_portuguese_pt": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/portuguese.zip", |
|
}, |
|
"wiki_lingua_russian_ru": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/russian.zip", |
|
}, |
|
"wiki_lingua_spanish_es": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/spanish.zip", |
|
}, |
|
"wiki_lingua_thai_th": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/thai.zip", |
|
}, |
|
"wiki_lingua_turkish_tr": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/turkish.zip", |
|
}, |
|
"wiki_lingua_vietnamese_vi": { |
|
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/vietnamese.zip", |
|
}, |
|
} |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
class WikilinguaConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for WikiLingua.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
|
|
eles = name.split("_") |
|
is_v0 = "v0" in name |
|
if is_v0: |
|
source_lang, target_lang = eles[-3], eles[-2] |
|
else: |
|
target_lang = eles[-1] |
|
source_lang = target_lang |
|
|
|
super().__init__( |
|
name=name, |
|
description=f"Wikilingua summarisation data ({source_lang} to {target_lang})", |
|
**kwargs, |
|
) |
|
self.is_v0 = is_v0 |
|
self.source_lang = source_lang |
|
self.target_lang = target_lang |
|
|
|
|
|
class WikiLingua(datasets.GeneratorBasedBuilder): |
|
"""WikiLingua: A benchmark dataset for multilingual abstractive summarization.""" |
|
|
|
BUILDER_CONFIG_CLASS = WikilinguaConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
WikilinguaConfig( |
|
name=lang, |
|
version=VERSION, |
|
) |
|
for lang in _URLs |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "wiki_lingua_es_en_v0" |
|
|
|
def _info(self): |
|
if self.config.is_v0: |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
) |
|
else: |
|
lang = self.config.source_lang |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"source_aligned": datasets.Translation(languages=[lang, "en"]), |
|
"target_aligned": datasets.Translation(languages=[lang, "en"]), |
|
"source": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_dir = dl_manager.download_and_extract(_URLs[self.config.name]) |
|
if self.config.is_v0: |
|
|
|
lang = self.config.source_lang |
|
base_dir = os.path.join( |
|
dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en" |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "val", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
else: |
|
lang = self.config.source_lang |
|
lang_name = self.config.name.split("_")[-2] |
|
base_dir = os.path.join(dl_dir["data"], lang_name) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "train", |
|
"lang": lang, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "val", |
|
"lang": lang, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": base_dir, |
|
"split": "test", |
|
"lang": lang, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split, lang=None): |
|
"""Yields examples.""" |
|
if self.config.is_v0: |
|
source_path = os.path.join(filepath, f"{split}.src") |
|
target_path = os.path.join(filepath, f"{split}.tgt") |
|
with open(source_path, encoding="utf-8") as f_in: |
|
with open(target_path, encoding="utf-8") as f_out: |
|
for id_, (src, tgt) in enumerate(zip(f_in, f_out)): |
|
yield id_, { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"source": src.strip(), |
|
"target": tgt.strip(), |
|
"references": [] if split == "train" else [tgt.strip()], |
|
} |
|
else: |
|
source_path = os.path.join(filepath, f"{split}.src.{lang}") |
|
source_path_en = os.path.join(filepath, f"{split}.src.en") |
|
target_path = os.path.join(filepath, f"{split}.tgt.{lang}") |
|
target_path_en = os.path.join(filepath, f"{split}.tgt.en") |
|
|
|
with open(source_path, encoding="utf-8") as f_in_ln: |
|
with open(source_path_en, encoding="utf-8") as f_in_en: |
|
with open(target_path, encoding="utf-8") as f_out_ln: |
|
with open(target_path_en, encoding="utf-8") as f_out_en: |
|
for id_, (src_ln, src_en, tgt_ln, tgt_en) in enumerate( |
|
zip(f_in_ln, f_in_en, f_out_ln, f_out_en) |
|
): |
|
yield id_, { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"source_aligned": { |
|
lang: src_ln.strip(), |
|
"en": src_en.strip(), |
|
}, |
|
"target_aligned": { |
|
lang: tgt_ln.strip(), |
|
"en": tgt_en.strip(), |
|
}, |
|
"source": src_ln.strip(), |
|
"target": tgt_en.strip(), |
|
"references": [] |
|
if split == "train" |
|
else [tgt_en.strip()], |
|
} |
|
|