|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import re |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{perez2019generating, |
|
title={Generating Summaries with Topic Templates and Structured Convolutional Decoders}, |
|
author={Perez-Beltrachini, Laura and Liu, Yang and Lapata, Mirella}, |
|
booktitle={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics}, |
|
pages={5107--5116}, |
|
year={2019} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Summarise the most important facts of a given entity in the Film, Company, and Animal domains from a cluster of related documents. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/3368" |
|
|
|
|
|
_LICENSE = "CC BY-SA 3.0" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"animal": { |
|
"train": "main_splits/train-animal.jsonl", |
|
"validation": "main_splits/valid-animal.jsonl", |
|
"test": "main_splits/test-animal.jsonl", |
|
"cs_abs": [ |
|
"cs_abs/test-animal_nv_0.jsonl", |
|
"cs_abs/test-animal_nv_1.jsonl", |
|
"cs_abs/test-animal_nv_2.jsonl", |
|
"cs_abs/test-animal_nv_3.jsonl", |
|
"cs_abs/test-animal_nv_4.jsonl", |
|
"cs_abs/test-animal_nv_6.jsonl", |
|
"cs_abs/test-animal_nv_7.jsonl", |
|
"cs_abs/test-animal_nv_8.jsonl", |
|
"cs_abs/test-animal_nv_9.jsonl", |
|
], |
|
"cs_tdiv": [ |
|
"cs_tdiv/test-animal_tdiv_0.jsonl", |
|
"cs_tdiv/test-animal_tdiv_1.jsonl", |
|
"cs_tdiv/test-animal_tdiv_2.jsonl", |
|
"cs_tdiv/test-animal_tdiv_3.jsonl", |
|
], |
|
}, |
|
"company": { |
|
"train": "main_splits/train-company.jsonl", |
|
"validation": "main_splits/valid-company.jsonl", |
|
"test": "main_splits/test-company.jsonl", |
|
"cs_abs": [ |
|
"cs_abs/test-company_nv_0.jsonl", |
|
"cs_abs/test-company_nv_1.jsonl", |
|
"cs_abs/test-company_nv_2.jsonl", |
|
"cs_abs/test-company_nv_3.jsonl", |
|
"cs_abs/test-company_nv_4.jsonl", |
|
"cs_abs/test-company_nv_6.jsonl", |
|
"cs_abs/test-company_nv_7.jsonl", |
|
"cs_abs/test-company_nv_8.jsonl", |
|
"cs_abs/test-company_nv_9.jsonl", |
|
], |
|
"cs_tdiv": [ |
|
"cs_tdiv/test-company_tdiv_0.jsonl", |
|
"cs_tdiv/test-company_tdiv_1.jsonl", |
|
"cs_tdiv/test-company_tdiv_2.jsonl", |
|
"cs_tdiv/test-company_tdiv_3.jsonl", |
|
], |
|
}, |
|
"film": { |
|
"train": "main_splits/train-film.jsonl", |
|
"validation": "main_splits/valid-film.jsonl", |
|
"test": "main_splits/test-film.jsonl", |
|
"cs_abs": [ |
|
"cs_abs/test-film_nv_0.jsonl", |
|
"cs_abs/test-film_nv_1.jsonl", |
|
"cs_abs/test-film_nv_2.jsonl", |
|
"cs_abs/test-film_nv_3.jsonl", |
|
"cs_abs/test-film_nv_4.jsonl", |
|
"cs_abs/test-film_nv_6.jsonl", |
|
"cs_abs/test-film_nv_7.jsonl", |
|
"cs_abs/test-film_nv_8.jsonl", |
|
"cs_abs/test-film_nv_9.jsonl", |
|
], |
|
"cs_tdiv": [ |
|
"cs_tdiv/test-film_tdiv_0.jsonl", |
|
"cs_tdiv/test-film_tdiv_1.jsonl", |
|
"cs_tdiv/test-film_tdiv_2.jsonl", |
|
"cs_tdiv/test-film_tdiv_3.jsonl", |
|
], |
|
}, |
|
} |
|
|
|
|
|
def detokenize(text): |
|
""" |
|
Untokenizing a text undoes the tokenizing operation, restoring |
|
punctuation and spaces to the places that people expect them to be. |
|
Ideally, `untokenize(tokenize(text))` should be identical to `text`, |
|
except for line breaks. |
|
""" |
|
step1 = text.replace("`` ", '"').replace(" ''", '"').replace(". . .", "...") |
|
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") |
|
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) |
|
step4 = re.sub(r" ([.,:;?!%]+)$", r"\1", step3) |
|
step5 = ( |
|
step4.replace(" '", "'") |
|
.replace(" n't", "n't") |
|
.replace("can not", "cannot") |
|
.replace(" 've", "'ve") |
|
) |
|
step6 = step5.replace(" ` ", " '") |
|
return step6.strip() |
|
|
|
|
|
class WikiCatSum(datasets.GeneratorBasedBuilder): |
|
"""A summarization dataset with multiple domains.""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="animal", version=VERSION, description="Animal domain" |
|
), |
|
datasets.BuilderConfig( |
|
name="company", version=VERSION, description="Company domain" |
|
), |
|
datasets.BuilderConfig(name="film", version=VERSION, description="Film domain"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "animal" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"paragraphs": datasets.features.Sequence(datasets.Value("string")), |
|
"summary": datasets.features.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"topic": datasets.Value("int16"), |
|
} |
|
), |
|
"target": datasets.Value("string"), |
|
"references": [ |
|
datasets.Value("string"), |
|
], |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
d_conf = dl_manager.download_and_extract(my_urls) |
|
challenge_sets = [ |
|
("challenge_test_abstractivity_%d" % (lvl), fname) |
|
for lvl, fname in enumerate(d_conf["cs_abs"]) |
|
] + [ |
|
("challenge_test_topic_diversity_%d" % (lvl), fname) |
|
for lvl, fname in enumerate(d_conf["cs_abs"]) |
|
] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": d_conf["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"filepath": d_conf["validation"], "split": "test"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": d_conf["test"], |
|
"split": "validation", |
|
}, |
|
), |
|
] + [ |
|
datasets.SplitGenerator( |
|
name=challenge_split, |
|
gen_kwargs={ |
|
"filepath": filename, |
|
"split": challenge_split, |
|
}, |
|
) |
|
for challenge_split, filename in challenge_sets |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
filepath, |
|
split, |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
data["paragraphs"] = [detokenize(p) for p in data["paragraphs"]] |
|
|
|
|
|
if isinstance(data["summary"], list): |
|
detok_targets = " ".join([ |
|
detokenize(s["text"]) for s in data["summary"] |
|
]) |
|
|
|
data["target"] = detok_targets |
|
data["references"] = [detok_targets] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
print(data["summary"]) |
|
exit() |
|
data["gem_parent_id"] = f"{self.config.name}-{split}-{id_+1}" |
|
data["gem_id"] = f"{self.config.name}-{split}-{id_+1}" |
|
yield id_, data |
|
|