|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{narayan-etal-2018-dont, |
|
title = "Don{'}t Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization", |
|
author = "Narayan, Shashi and |
|
Cohen, Shay B. and |
|
Lapata, Mirella", |
|
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
month = oct # "-" # nov, |
|
year = "2018", |
|
address = "Brussels, Belgium", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/D18-1206", |
|
doi = "10.18653/v1/D18-1206", |
|
pages = "1797--1807", |
|
abstract = "We introduce {``}extreme summarization{''}, a new single-document summarization task which does not favor extractive strategies and calls for an abstractive modeling approach. The idea is to create a short, one-sentence news summary answering the question {``}What is the article about?{''}. We collect a real-world, large-scale dataset for this task by harvesting online articles from the British Broadcasting Corporation (BBC). We propose a novel abstractive model which is conditioned on the article{'}s topics and based entirely on convolutional neural networks. We demonstrate experimentally that this architecture captures long-range dependencies in a document and recognizes pertinent content, outperforming an oracle extractive system and state-of-the-art abstractive approaches when evaluated automatically and by humans.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This is the XSUM subset of the GEM benchmark. |
|
""" |
|
_URLs = { |
|
"data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz", |
|
"splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json", |
|
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/xsum.zip", |
|
} |
|
|
|
_XSUM_REMOVE_LINES = set( |
|
[ |
|
"Share this with\n", |
|
"Email\n", |
|
"Facebook\n", |
|
"Messenger\n", |
|
"Twitter\n", |
|
"Pinterest\n", |
|
"WhatsApp\n", |
|
"Linkedin\n", |
|
"LinkedIn\n", |
|
"Copy this link\n", |
|
"These are external links and will open in a new window\n", |
|
] |
|
) |
|
|
|
|
|
class Xsum(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="xsum", |
|
version=datasets.Version("1.0.0"), |
|
description="", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"xsum_id": datasets.Value("string"), |
|
"document": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_dir = dl_manager.download_and_extract(_URLs) |
|
challenge_sets = [ |
|
("challenge_train_sample", "train_xsum_RandomSample500.json"), |
|
("challenge_validation_sample", "validation_xsum_RandomSample500.json"), |
|
("challenge_test_backtranslation", "test_xsum_BackTranslation500.json"), |
|
( |
|
"challenge_test_bfp_02", |
|
"test_xsum_ButterFingersPerturbation_p=0.02_500.json", |
|
), |
|
( |
|
"challenge_test_bfp_05", |
|
"test_xsum_ButterFingersPerturbation_p=0.05_500.json", |
|
), |
|
("challenge_test_nopunc", "test_xsum_WithoutPunctuation500.json"), |
|
("challenge_test_covid", f"en_test_covid19.jsonl"), |
|
] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": dl_dir["splits"], |
|
"split": "train", |
|
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": dl_dir["splits"], |
|
"split": "validation", |
|
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": dl_dir["splits"], |
|
"split": "test", |
|
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"), |
|
}, |
|
), |
|
] + [ |
|
datasets.SplitGenerator( |
|
name=challenge_split, |
|
gen_kwargs={ |
|
"filepath": os.path.join(dl_dir["challenge_set"], "xsum", filename), |
|
"split": challenge_split, |
|
}, |
|
) |
|
for challenge_split, filename in challenge_sets |
|
] |
|
|
|
def _generate_examples(self, filepath, split, filepaths=None): |
|
"""Yields examples.""" |
|
print(self.info.splits) |
|
if "challenge" in split: |
|
if "covid" in split: |
|
with open(filepath, encoding="utf-8") as f: |
|
id_ = -1 |
|
for line in f: |
|
data = json.loads(line) |
|
id_ += 1 |
|
yield id_, { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"xsum_id": data["url"], |
|
"document": data["text"], |
|
"target": data["summary"], |
|
"references": [] if split == "train" else [data["summary"]], |
|
} |
|
else: |
|
exples = json.load(open(filepath, encoding="utf-8")) |
|
if isinstance(exples, dict): |
|
assert len(exples) == 1, "multiple entries found" |
|
exples = list(exples.values())[0] |
|
for id_, exple in enumerate(exples): |
|
exple["gem_parent_id"] = exple["gem_id"] |
|
exple["gem_id"] = f"{self.config.name}-{split}-{id_}" |
|
yield id_, exple |
|
else: |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
split_ids = json.load(f) |
|
for id_, i in enumerate(split_ids[split]): |
|
with open( |
|
os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8" |
|
) as f: |
|
text = "".join( |
|
[ |
|
line |
|
for line in f.readlines() |
|
if line not in _XSUM_REMOVE_LINES and line.strip() |
|
] |
|
) |
|
segs = text.split("[SN]") |
|
yield id_, { |
|
"gem_id": f"{self.config.name}-{split}-{id_}", |
|
"gem_parent_id": f"{self.config.name}-{split}-{id_}", |
|
"xsum_id": i, |
|
"document": segs[8].strip(), |
|
"target": segs[6].strip(), |
|
"references": [] if split == "train" else [segs[6].strip()], |
|
} |
|
|