|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""WikiLingua.""" |
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{ladhak-etal-2020-wikilingua, |
|
title = "{W}iki{L}ingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization", |
|
author = "Ladhak, Faisal and |
|
Durmus, Esin and |
|
Cardie, Claire and |
|
McKeown, Kathleen", |
|
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
month = nov, |
|
year = "2020", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2020.findings-emnlp.360", |
|
doi = "10.18653/v1/2020.findings-emnlp.360", |
|
pages = "4034--4048", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
WikiLingua is a large-scale multilingual dataset for the evaluation of |
|
cross-lingual abstractive summarization systems. The dataset includes ~770k |
|
article and summary pairs in 18 languages from WikiHow. The gold-standard |
|
article-summary alignments across languages was done by aligning the images |
|
that are used to describe each how-to step in an article. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/esdurmus/Wikilingua" |
|
|
|
_LICENSE = "CC BY-NC-SA 3.0" |
|
|
|
|
|
_URL = "data/{language}.jsonl.gz" |
|
_LANGUAGES = [ |
|
"arabic", |
|
"chinese", |
|
"czech", |
|
"dutch", |
|
"english", |
|
"french", |
|
"german", |
|
"hindi", |
|
"indonesian", |
|
"italian", |
|
"japanese", |
|
"korean", |
|
"portuguese", |
|
"russian", |
|
"spanish", |
|
"thai", |
|
"turkish", |
|
"vietnamese", |
|
] |
|
|
|
|
|
class WikiLingua(datasets.GeneratorBasedBuilder): |
|
"""WikiLingua dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=lang, |
|
version=datasets.Version("1.1.1"), |
|
description=f"A subset of article-summary in {lang.capitalize()}", |
|
) |
|
for lang in _LANGUAGES |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "english" |
|
|
|
def _info(self): |
|
if self.config.name == "english": |
|
features = datasets.Features( |
|
{ |
|
"url": datasets.Value("string"), |
|
"article": datasets.Sequence( |
|
{ |
|
"section_name": datasets.Value("string"), |
|
"document": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"url": datasets.Value("string"), |
|
"article": datasets.Sequence( |
|
{ |
|
"section_name": datasets.Value("string"), |
|
"document": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
"english_url": datasets.Value("string"), |
|
"english_section_name": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
filepath = dl_manager.download_and_extract(_URL.format(language=self.config.name)) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": filepath, |
|
}, |
|
), |
|
] |
|
|
|
def _process_article(self, article): |
|
"""Parse the article and convert into list of dict""" |
|
processed_article = [] |
|
for key, value in article.items(): |
|
row = {"section_name": key, "document": value["document"], "summary": value["summary"]} |
|
|
|
if self.config.name != "english": |
|
row["english_url"] = value["english_url"] |
|
row["english_section_name"] = value["english_section_name"] |
|
processed_article.append(row) |
|
|
|
return processed_article |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
with open(filepath, "rb") as f: |
|
for id_, line in enumerate(f): |
|
row = json.loads(line) |
|
yield id_, {"url": row["url"], "article": self._process_article(row["article"])} |
|
|