usb / usb.py
kundank's picture
adding data builder script
347557c
raw
history blame
6.97 kB
# Acknowledgement: dataset builder script adapted from https://huggingface.co/datasets/glue/blob/main/glue.py
import datasets
import pdb
import jsonlines
CITATION_BLOB = '''
@article{krishna2023usb,
title={USB: A Unified Summarization Benchmark Across Tasks and Domains},
author={Krishna, Kundan and Gupta, Prakhar and Ramprasad, Sanjana and Wallace, Byron C and Bigham, Jeffrey P and Lipton, Zachary C},
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2023},
year={2023}
}
'''
DESCRIPTION_BLOB = '''
The USB benchmark consists of labeled datasets for a collection of 8 tasks dealing with text summarization,
particularly focusing on factuality and controllability of summary generation.
Paper can be found here : https://arxiv.org/abs/2305.14296
'''
class USBConfig(datasets.BuilderConfig):
def __init__(
self,
text_features,
label_column,
citation=CITATION_BLOB,
data_url="processed_data.tar.gz",
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
super(USBConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.text_features = text_features
self.label_column = label_column
self.citation = citation
self.label_classes = label_classes
self.process_label = process_label
self.url = "https://github.com/kukrishna/usb"
self.data_url=data_url
class USB(datasets.GeneratorBasedBuilder):
"""The Unified Summarization Benchmark."""
BUILDER_CONFIGS = [
USBConfig(
name="topicbased_summarization",
description="Generate a short summary of the given article covering the given topic",
text_features={"summ_idx": "int", "input_lines": "listsent", "topic_name": "sent", "output_lines":"listsent"},
label_column="output_lines",
),
USBConfig(
name="fixing_factuality",
description="Given a summary sentence (claim) and presented evidence from the article, edit the summary to remove unsupported or contradicting facts",
text_features={"summ_idx": "int", "input_lines": "listsent", "initial_summary": "sent", "fixed_summary":"sent"},
label_column="fixed_summary",
),
USBConfig(
name="unsupported_span_prediction",
description="Given a summary sentence (claim) and presented evidence from the article, mark the parts of the summary which are not supported by the evidence by surrounding them with [] and [/] tags.",
text_features={"summ_idx": "int", "input_lines": "listsent", "summary": "sent", "annotated_summary":"sent"},
label_column="annotated_summary",
),
USBConfig(
name="evidence_extraction",
description="Given an article and its summary, for each summary sentence, produce a minimal list of sentences from the article which provide sufficient evidence for all facts in the summary sentence.",
text_features={"input_lines": "listsent", "summary_lines": "listsent", "evidence_labels":"listlistint"},
label_column="evidence_labels",
),
USBConfig(
name="multisentence_compression",
description="Given a list of sentences from an article, generate a single sentence summary of the presented cluster of sentences.",
text_features={"summ_idx": "int", "input_lines": "listsent", "output_lines": "listsent"},
label_column="output_lines",
),
USBConfig(
name="extractive_summarization",
description="Given an article, generate an extractive summary by producing a subset o the article's sentences",
text_features={"input_lines": "listsent", "labels": "listint"},
label_column="labels",
),
USBConfig(
name="abstractive_summarization",
description="Given an article, generate its abstractive summary",
text_features={"input_lines": "listsent", "output_lines": "listsent"},
label_column="output_lines",
),
USBConfig(
name="factuality_classification",
description="Given a summary sentence (claim) and presented evidence from the article, predict whether all facts of the claim are supported by and in agreement with the presented evidence, or not.",
text_features={"summ_idx": "int", "input_lines": "listsent", "summary_sent": "sent", "label":"int"},
label_column="label",
),
]
def _split_generators(self, dl_manager):
data_root = dl_manager.download_and_extract(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/train.jsonl",
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/validation.jsonl",
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": f"{data_root}/{self.config.name}/test.jsonl",
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
with jsonlines.open(data_file) as f:
for ex_idx,example in enumerate(f):
example["id"] = example["id"]+":"+str(ex_idx)
example["domain"] = example["id"].split("/")[0]
yield example["id"], example
def _info(self):
features = {}
features["id"] = datasets.Value("string")
features["domain"] = datasets.Value("string")
for (text_feature,dtype) in self.config.text_features.items():
hf_dtype = None
if dtype=="int":
hf_dtype = datasets.Value("int32")
elif dtype=="listint":
hf_dtype = datasets.Sequence(datasets.Value("int32"))
elif dtype=="listlistint":
hf_dtype = datasets.Sequence(datasets.Sequence(datasets.Value("int32")))
elif dtype=="sent":
hf_dtype = datasets.Value("string")
elif dtype=="listsent":
hf_dtype = datasets.Sequence(datasets.Value("string"))
else:
raise NotImplementedError
features[text_feature] = hf_dtype
return datasets.DatasetInfo(
description=DESCRIPTION_BLOB,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation,
)