cnn_dailymail_nl / cnn_dailymail_nl.py
jakobcassiman's picture
Add dataloading script, meta file and dummy data
2e750ac
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CNN/Dailymail Dutch summarization dataset."""
import csv
import datasets
_DESCRIPTION = """\
This dataset is the CNN/Dailymail dataset translated to Dutch.
This is the original dataset:
```
load_dataset("cnn_dailymail", '3.0.0')
```
And this is the HuggingFace translation pipeline:
```
pipeline(
task='translation_en_to_nl',
model='Helsinki-NLP/opus-mt-en-nl',
tokenizer='Helsinki-NLP/opus-mt-en-nl')
```
"""
# The second citation introduces the source data, while the first
# introduces the specific form (non-anonymized) we use here.
_CITATION = """\
@article{DBLP:journals/corr/SeeLM17,
author = {Abigail See and
Peter J. Liu and
Christopher D. Manning},
title = {Get To The Point: Summarization with Pointer-Generator Networks},
journal = {CoRR},
volume = {abs/1704.04368},
year = {2017},
url = {http://arxiv.org/abs/1704.04368},
archivePrefix = {arXiv},
eprint = {1704.04368},
timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{hermann2015teaching,
title={Teaching machines to read and comprehend},
author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
booktitle={Advances in neural information processing systems},
pages={1693--1701},
year={2015}
}
"""
_TRAIN_DOWNLOAD_URLS = [
"https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000000.csv.gz",
"https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000001.csv.gz",
"https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_train_000000000002.csv.gz",
]
_VALIDATION_DOWNLOAD_URL = "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_validation.csv.gz"
_TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/ml6team/cnn_dailymail_nl/resolve/main/cnn_dailymail_nl_test.csv.gz"
_ID = "id"
_HIGHLIGHTS = "highlights"
_ARTICLE = "article"
class CnnDailymailNl(datasets.GeneratorBasedBuilder):
"""CNN/Dailymail Dutch summarization dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_ARTICLE: datasets.Value("string"),
_HIGHLIGHTS: datasets.Value("string"),
_ID: datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://huggingface.co/datasets/ml6team/cnn_dailymail_nl",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_paths = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URLS)
validation_path = dl_manager.download_and_extract(_VALIDATION_DOWNLOAD_URL)
test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_paths}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepaths": [validation_path]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepaths": [test_path]}
),
]
def _generate_examples(self, filepaths):
"""Generate Dutch CNN/Dailymail examples."""
for filepath in filepaths: # training data is divided over multiple shards
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(
csv_file,
quotechar='"',
delimiter=",",
quoting=csv.QUOTE_ALL,
skipinitialspace=True,
)
next(csv_reader) # skip header
for row in csv_reader:
article_id, article, highlights = row
yield article_id, {
_ARTICLE: article,
_HIGHLIGHTS: highlights,
_ID: article_id,
}