Datasets:
Tasks:
Summarization
Multilinguality:
multilingual
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""WikiLingua.""" | |
import json | |
import datasets | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@inproceedings{ladhak-etal-2020-wikilingua, | |
title = "{W}iki{L}ingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization", | |
author = "Ladhak, Faisal and | |
Durmus, Esin and | |
Cardie, Claire and | |
McKeown, Kathleen", | |
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", | |
month = nov, | |
year = "2020", | |
address = "Online", | |
publisher = "Association for Computational Linguistics", | |
url = "https://aclanthology.org/2020.findings-emnlp.360", | |
doi = "10.18653/v1/2020.findings-emnlp.360", | |
pages = "4034--4048", | |
} | |
""" | |
_DESCRIPTION = """\ | |
WikiLingua is a large-scale multilingual dataset for the evaluation of | |
cross-lingual abstractive summarization systems. The dataset includes ~770k | |
article and summary pairs in 18 languages from WikiHow. The gold-standard | |
article-summary alignments across languages was done by aligning the images | |
that are used to describe each how-to step in an article. | |
""" | |
_HOMEPAGE = "https://github.com/esdurmus/Wikilingua" | |
_LICENSE = "CC BY-NC-SA 3.0" | |
# Download link | |
_URL = "data/{language}.jsonl.gz" | |
_LANGUAGES = [ | |
"arabic", | |
"chinese", | |
"czech", | |
"dutch", | |
"english", | |
"french", | |
"german", | |
"hindi", | |
"indonesian", | |
"italian", | |
"japanese", | |
"korean", | |
"portuguese", | |
"russian", | |
"spanish", | |
"thai", | |
"turkish", | |
"vietnamese", | |
] | |
class WikiLingua(datasets.GeneratorBasedBuilder): | |
"""WikiLingua dataset.""" | |
VERSION = datasets.Version("1.1.1") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name=lang, | |
version=datasets.Version("1.1.1"), | |
description=f"A subset of article-summary in {lang.capitalize()}", | |
) | |
for lang in _LANGUAGES | |
] | |
DEFAULT_CONFIG_NAME = "english" | |
def _info(self): | |
if self.config.name == "english": | |
features = datasets.Features( | |
{ | |
"url": datasets.Value("string"), | |
"article": datasets.Sequence( | |
{ | |
"section_name": datasets.Value("string"), | |
"document": datasets.Value("string"), | |
"summary": datasets.Value("string"), | |
} | |
), | |
} | |
) | |
else: | |
features = datasets.Features( | |
{ | |
"url": datasets.Value("string"), | |
"article": datasets.Sequence( | |
{ | |
"section_name": datasets.Value("string"), | |
"document": datasets.Value("string"), | |
"summary": datasets.Value("string"), | |
"english_url": datasets.Value("string"), | |
"english_section_name": datasets.Value("string"), | |
} | |
), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
filepath = dl_manager.download_and_extract(_URL.format(language=self.config.name)) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": filepath, | |
}, | |
), | |
] | |
def _process_article(self, article): | |
"""Parse the article and convert into list of dict""" | |
processed_article = [] | |
for key, value in article.items(): | |
row = {"section_name": key, "document": value["document"], "summary": value["summary"]} | |
if self.config.name != "english": | |
row["english_url"] = value["english_url"] | |
row["english_section_name"] = value["english_section_name"] | |
processed_article.append(row) | |
return processed_article | |
def _generate_examples(self, filepath): | |
"""Yields examples.""" | |
with open(filepath, "rb") as f: | |
for id_, line in enumerate(f): | |
row = json.loads(line) | |
yield id_, {"url": row["url"], "article": self._process_article(row["article"])} | |