personachat_truecased / personachat_truecased.py
epeters3's picture
Add v1 files and loading script
5dda1b3
raw
history blame
3.69 kB
import json
import datasets
from datasets.features import Sequence
_BASE_URL = "https://huggingface.co/datasets/bavard/personachat_truecased/raw/main"
_URLS = {
"full": {
"train": _BASE_URL + "/persona_chat_truecased_full_train.json",
"valid": _BASE_URL + "/persona_chat_truecased_full_valid.json"
},
"sample": {
"train": _BASE_URL + "/persona_chat_truecased_sample_train.json",
"valid": _BASE_URL + "/persona_chat_truecased_sample_valid.json"
}
}
_DESCRIPTION = """\
A version of the PersonaChat dataset that has been true-cased, and also has been given more normalized punctuation.
The original PersonaChat dataset is in all lower case, and has extra space around each clause/sentence separating
punctuation mark. This version of the dataset has more of a natural language look, with sentence capitalization,
proper noun capitalization, and normalized whitespace. Also, each dialogue turn includes a pool of distractor
candidate responses, which can be used by a multiple choice regularization loss during training.
"""
_CITATION = """\
@article{zhang2018personalizing,
title={Personalizing dialogue agents: I have a dog, do you have pets too?},
author={Zhang, Saizheng and Dinan, Emily and Urbanek, Jack and Szlam, Arthur and Kiela, Douwe and Weston, Jason},
journal={arXiv preprint arXiv:1801.07243},
year={2018}
}
"""
class PersonachatTruecased(datasets.DatasetBuilder):
"""
Version of the PersonaChat dataset that includes true-casing, normalized punctuation, and candidate distractor
responses for each dialogue turn, for including a multiple choice regularzation loss while training.
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="full", version=VERSION, description="The full dataset."),
datasets.BuilderConfig(name="sample", version=VERSION, description="A sample sample of the dataset, useful for testing.")
]
DEFAULT_CONFIG_NAME = "full"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"personality": Sequence(datasets.Value("string")),
"candidates": Sequence(datasets.Value("string")),
"history": Sequence(datasets.Value("string")),
"conv_id": datasets.Value("int32"),
"utterance_idx": datasets.Value("int32")
}),
citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
split_paths = dl_manager.download(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_path": split_paths["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_path": split_paths["valid"]}
)
]
def _generate_examples(self, data_path: str):
with open(data_path) as f:
data = json.load(f)
for conv_id, conv in enumerate(data):
personality = conv["personality"]
for utterance_idx, utterance in enumerate(conv["utterances"]):
id_ = f"{conv_id}-{utterance_idx}"
yield id_, {
"personality": personality,
"candidates": utterance["candidates"],
"history": utterance["history"],
"conv_id": conv_id,
"utterance_idx": utterance_idx
}