Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
fs / fs.py
yuvalkirstain's picture
building to use prompt
127e3fd
raw
history blame
8.48 kB
# coding=utf-8
# Lint as: python3
"""The SCROLLS benchmark."""
import json
import os
from abc import abstractmethod
import datasets
from citations_and_descriptions import (
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
_FS_DESCRIPTION, _FS_CITATION,
)
class FSConfig(datasets.BuilderConfig):
"""BuilderConfig for FS."""
def __init__(self, data_url, citation, url, max_source_length, tokenizer, **kwargs):
"""BuilderConfig for FS.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = ["pid", self.source_key, self.source_key]
self.data_url = data_url
self.citation = citation
self.url = url
self.max_source_length = max_source_length
self.tokenizer = tokenizer
def remove_redundant_fields(self, example):
for field in self.redundant_fields:
del example[field]
@abstractmethod
def postprocess(self, s):
pass
@property
@abstractmethod
def original_source_key(self):
pass
@property
@abstractmethod
def original_target_key(self):
pass
@property
@abstractmethod
def train_file(self):
pass
@property
@abstractmethod
def validation_file(self):
pass
@property
@abstractmethod
def test_file(self):
pass
@property
def source_key(self):
return "source"
@property
def target_key(self):
return "target"
@property
@abstractmethod
def id_key(self):
pass
@property
def redundant_fields(self):
return []
def preprocess(self, example): # TODO perhaps we can use this for base
example[self.source_key] = example[self.original_source_key].strip()
example[self.target_key] = example[self.original_target_key].strip()
def prompt(self, example):
pass # TODO
# prompt = get_prompt(self.dataset_name,
# self.template_name)
# row = prompt.apply(row)
def postprocess(self, example): # TODO truncate source
pass
class ScrollsConfig(FSConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def original_source_key(self):
return "input"
@property
def original_target_key(self):
return "output"
@property
def train_file(self):
return "train.jsonl"
@property
def validation_file(self):
return "validation.jsonl"
@property
def test_file(self):
return "test.jsonl"
@property
def id_key(self):
return "pid"
@property
def redundant_fields(self):
return [self.original_source_key, self.original_target_key, "id"]
def process_input(self, s):
prefix = s.strip()
suffix = "\nSummarize the above:"
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
return prefix + suffix
class ArxivConfig(FSConfig):
# TODO properties etc...
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.train_file = "train.txt"
self.validation_file = "val.txt"
self.test_file = "test.txt"
self.input_key = "article_text"
self.output_key = "abstract_text"
self.id_key = "article_id"
self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
def process_input(self, s):
prefix = ' '.join(s)
suffix = "\nSummarize the above:"
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
return prefix + suffix
def process_output(self, s):
# TODO remove "<S>" and "</S>" ?
return ' '.join(s).replace("<S>", "").replace("</S>", "")
def _truncate_prefix(prefix, suffix, max_source_length, tokenizer):
encoded_input = tokenizer.encode(prefix + suffix)
while len(encoded_input) > max_source_length:
overflow = len(encoded_input) - max_source_length
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
if overflow > 0:
tokenized_prefix = tokenized_prefix[:-overflow]
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
encoded_input = tokenizer.encode(prefix + suffix)
return prefix
class Fs(datasets.GeneratorBasedBuilder):
"""The SCROLLS benchmark."""
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
ScrollsConfig(
name="summ_screen_fd_debug",
description=_SUMM_SCREEN_DESCRIPTION,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
citation=_SUMM_SCREEN_CITATION,
url="https://github.com/mingdachen/SummScreen",
max_source_length=None,
tokenizer=None,
),
ScrollsConfig(
name="gov_report",
description=_GOV_REPORT_CITATION,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
citation=_GOV_REPORT_DESCRIPTION,
url="https://gov-report-data.github.io/",
max_source_length=None,
tokenizer=None,
),
# ArxivConfig(
# name="arxiv_debug",
# description=_ARXIV_CITATION,
# data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
# citation=_ARXIV_DESCRIPTION,
# url="https://github.com/armancohan/long-summarization",
# max_source_length=None,
# tokenizer=None,
# ),
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description=_FS_DESCRIPTION + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _FS_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.train_file),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.validation_file),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
"test"],
},
),
]
def _generate_examples(self, data_file):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
row["pid"] = row[self.config.id_key]
self.config.preprocess(row)
self.config.prompt(row)
self.config.postprocess(row)
self.config.remove_redundant_fields(row)
yield row["pid"], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]