Datasets:
Tasks:
Summarization
Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
automatically-created
Source Datasets:
original
ArXiv:
Tags:
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import csv | |
import json | |
import re | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@inproceedings{perez2019generating, | |
title={Generating Summaries with Topic Templates and Structured Convolutional Decoders}, | |
author={Perez-Beltrachini, Laura and Liu, Yang and Lapata, Mirella}, | |
booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics}, | |
pages={5107--5116}, | |
year={2019} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
Summarise the most important facts of a given entity in the Film, Company, and Animal domains from a cluster of related documents. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "CC BY-SA 3.0" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace dataset library don't host the datasets but only point to the original files | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLs = { | |
"animal": { | |
"train": "main_splits/train-animal.jsonl", | |
"validation": "main_splits/valid-animal.jsonl", | |
"test": "main_splits/test-animal.jsonl", | |
"cs_abs": [ | |
"cs_abs/test-animal_nv_0.jsonl", | |
"cs_abs/test-animal_nv_1.jsonl", | |
"cs_abs/test-animal_nv_2.jsonl", | |
"cs_abs/test-animal_nv_3.jsonl", | |
"cs_abs/test-animal_nv_4.jsonl", | |
"cs_abs/test-animal_nv_6.jsonl", | |
"cs_abs/test-animal_nv_7.jsonl", | |
"cs_abs/test-animal_nv_8.jsonl", | |
"cs_abs/test-animal_nv_9.jsonl", | |
], | |
"cs_tdiv": [ | |
"cs_tdiv/test-animal_tdiv_0.jsonl", | |
"cs_tdiv/test-animal_tdiv_1.jsonl", | |
"cs_tdiv/test-animal_tdiv_2.jsonl", | |
"cs_tdiv/test-animal_tdiv_3.jsonl", | |
], | |
}, | |
"company": { | |
"train": "main_splits/train-company.jsonl", | |
"validation": "main_splits/valid-company.jsonl", | |
"test": "main_splits/test-company.jsonl", | |
"cs_abs": [ | |
"cs_abs/test-company_nv_0.jsonl", | |
"cs_abs/test-company_nv_1.jsonl", | |
"cs_abs/test-company_nv_2.jsonl", | |
"cs_abs/test-company_nv_3.jsonl", | |
"cs_abs/test-company_nv_4.jsonl", | |
"cs_abs/test-company_nv_6.jsonl", | |
"cs_abs/test-company_nv_7.jsonl", | |
"cs_abs/test-company_nv_8.jsonl", | |
"cs_abs/test-company_nv_9.jsonl", | |
], | |
"cs_tdiv": [ | |
"cs_tdiv/test-company_tdiv_0.jsonl", | |
"cs_tdiv/test-company_tdiv_1.jsonl", | |
"cs_tdiv/test-company_tdiv_2.jsonl", | |
"cs_tdiv/test-company_tdiv_3.jsonl", | |
], | |
}, | |
"film": { | |
"train": "main_splits/train-film.jsonl", | |
"validation": "main_splits/valid-film.jsonl", | |
"test": "main_splits/test-film.jsonl", | |
"cs_abs": [ | |
"cs_abs/test-film_nv_0.jsonl", | |
"cs_abs/test-film_nv_1.jsonl", | |
"cs_abs/test-film_nv_2.jsonl", | |
"cs_abs/test-film_nv_3.jsonl", | |
"cs_abs/test-film_nv_4.jsonl", | |
"cs_abs/test-film_nv_6.jsonl", | |
"cs_abs/test-film_nv_7.jsonl", | |
"cs_abs/test-film_nv_8.jsonl", | |
"cs_abs/test-film_nv_9.jsonl", | |
], | |
"cs_tdiv": [ | |
"cs_tdiv/test-film_tdiv_0.jsonl", | |
"cs_tdiv/test-film_tdiv_1.jsonl", | |
"cs_tdiv/test-film_tdiv_2.jsonl", | |
"cs_tdiv/test-film_tdiv_3.jsonl", | |
], | |
}, | |
} | |
def detokenize(text): | |
""" | |
Untokenizing a text undoes the tokenizing operation, restoring | |
punctuation and spaces to the places that people expect them to be. | |
Ideally, `untokenize(tokenize(text))` should be identical to `text`, | |
except for line breaks. | |
""" | |
step1 = text.replace("`` ", '"').replace(" ''", '"').replace(". . .", "...") | |
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") | |
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) | |
step4 = re.sub(r" ([.,:;?!%]+)$", r"\1", step3) | |
step5 = ( | |
step4.replace(" '", "'") | |
.replace(" n't", "n't") | |
.replace("can not", "cannot") | |
.replace(" 've", "'ve") | |
) | |
step6 = step5.replace(" ` ", " '") | |
return step6.strip() | |
class WikiCatSum(datasets.GeneratorBasedBuilder): | |
"""A summarization dataset with multiple domains.""" | |
VERSION = datasets.Version("0.1.0") | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name="animal", version=VERSION, description="Animal domain" | |
), | |
datasets.BuilderConfig( | |
name="company", version=VERSION, description="Company domain" | |
), | |
datasets.BuilderConfig(name="film", version=VERSION, description="Film domain"), | |
] | |
DEFAULT_CONFIG_NAME = "animal" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
features = datasets.Features( | |
{ | |
"gem_id": datasets.Value("string"), | |
"gem_parent_id": datasets.Value("string"), | |
"id": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"paragraphs": datasets.features.Sequence(datasets.Value("string")), | |
"summary": datasets.features.Sequence( | |
{ | |
"text": datasets.Value("string"), | |
"topic": datasets.Value("int16"), | |
} | |
), | |
"target": datasets.Value("string"), | |
"references": [ | |
datasets.Value("string"), | |
], | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
my_urls = _URLs[self.config.name] | |
d_conf = dl_manager.download_and_extract(my_urls) | |
challenge_sets = [ | |
("challenge_test_abstractivity_%d" % (lvl), fname) | |
for lvl, fname in enumerate(d_conf["cs_abs"]) | |
] + [ | |
("challenge_test_topic_diversity_%d" % (lvl), fname) | |
for lvl, fname in enumerate(d_conf["cs_abs"]) | |
] | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": d_conf["train"], | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={"filepath": d_conf["validation"], "split": "test"}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": d_conf["test"], | |
"split": "validation", | |
}, | |
), | |
] + [ | |
datasets.SplitGenerator( | |
name=challenge_split, | |
gen_kwargs={ | |
"filepath": filename, | |
"split": challenge_split, | |
}, | |
) | |
for challenge_split, filename in challenge_sets | |
] | |
def _generate_examples( | |
self, | |
filepath, | |
split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
): | |
"""Yields examples as (key, example) tuples.""" | |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is here for legacy reason (tfds) and is not important in itself. | |
with open(filepath, encoding="utf-8") as f: | |
for id_, row in enumerate(f): | |
data = json.loads(row) | |
data["paragraphs"] = [detokenize(p) for p in data["paragraphs"]] | |
# If summary is a list itself, we have multi-ref. | |
if isinstance(data["summary"], list): | |
detok_targets = " ".join([ | |
detokenize(s["text"]) for s in data["summary"] | |
]) | |
data["target"] = detok_targets | |
data["references"] = [detok_targets] | |
# elif isinstance(data["summary"]["text"], list): | |
# detok_target = detokenize(" ".join(data["summary"]["text"])) | |
# print("\n\n\n\n", detok_target) | |
# exit() | |
# data["target"] = detok_target | |
# data["references"] = [detok_target] | |
# elif isinstance(data["summary"]["text"], str): | |
# detok_target = detokenize(data["summary"]["text"]) | |
else: | |
print(data["summary"]) | |
exit() | |
data["gem_parent_id"] = f"{self.config.name}-{split}-{id_+1}" | |
data["gem_id"] = f"{self.config.name}-{split}-{id_+1}" | |
yield id_, data | |