schema_guided_dialog / schema_guided_dialog.py
Sebastian Gehrmann
.
6f0f662
raw history blame
No virus
7.74 kB
import json
import os
import datasets
_CITATION = """\
@inproceedings{rastogi2020towards,
title={Towards scalable multi-domain conversational agents: The schema-guided dialogue dataset},
author={Rastogi, Abhinav and Zang, Xiaoxue and Sunkara, Srinivas and Gupta, Raghav and Khaitan, Pranav},
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
volume={34},
number={05},
pages={8689--8696},
year={2020}
}
"""
_DESCRIPTION = """\
The Schema-Guided Dialogue (SGD) dataset contains 18K multi-domain task-oriented
dialogues between a human and a virtual assistant, which covers 17 domains
ranging from banks and events to media, calendar, travel, and weather. The
language presents in the datset is only English. The SGD dataset provides a
challenging testbed for a number of tasks in task-oriented dialogue, including
language understanding, slot filling, dialogue state tracking and response
generation. For the creation of the SGD dataset, they developed a multi-domain
dialogue simulator that generates dialogue outlines over an arbitrary combination
of APIs, dialogue states and system actions. Then, they used a crowd-sourcing
procedure to paraphrase these outlines to natural language utterances. This novel
crowd-sourcing procedure preserves all annotations obtained from the simulator and
does not require any extra annotations after dialogue collection.
"""
_URLs = {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd_context.zip",
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/schema_guided_dialog.zip",
}
_SGD_ACTS = [
"AFFIRM",
"AFFIRM_INTENT",
"CONFIRM",
"GOODBYE",
"INFORM",
"INFORM_COUNT",
"INFORM_INTENT",
"NEGATE",
"NEGATE_INTENT",
"NOTIFY_FAILURE",
"NOTIFY_SUCCESS",
"OFFER",
"OFFER_INTENT",
"REQUEST",
"REQUEST_ALTS",
"REQ_MORE",
"SELECT",
"THANK_YOU",
]
def process_sgd(example):
prompt = example["prompt"]
inp = f'Prompt: "{prompt}", '
for da in example["dialog_acts"]:
act = _SGD_ACTS[da["act"]].lower()
slot = da["slot"]
values = " or ".join(da["values"])
inp += f"Response Type: {act}"
if slot:
inp += f", Type of Slot: {slot}"
if values:
inp += f", Values: {values}"
inp += ", "
inp += f'Agent: {example["service"]}'
return inp
class SchemaGuidedDialog(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "schema_guided_dialog"
def _info(self):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"dialog_acts": [
{
"act": datasets.ClassLabel(names=_SGD_ACTS),
"slot": datasets.Value("string"),
"values": [datasets.Value("string")],
}
],
"context": [datasets.Value("string")],
"dialog_id": datasets.Value("string"),
"service": datasets.Value("string"),
"turn_id": datasets.Value("int32"),
"prompt": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
"linearized_input": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs)
challenge_sets = [
(
"challenge_train_sample",
"train_schema_guided_dialog_RandomSample500_reformatted.json",
),
(
"challenge_validation_sample",
"validation_schema_guided_dialog_RandomSample500_reformatted.json",
),
(
"challenge_test_backtranslation",
"test_schema_guided_dialog_BackTranslation500_reformatted.json",
),
(
"challenge_test_bfp02",
"test_schema_guided_dialog_ButterFingersPerturbation_p=0.02_500_reformatted.json",
),
(
"challenge_test_bfp05",
"test_schema_guided_dialog_ButterFingersPerturbation_p=0.05_500_reformatted.json",
),
(
"challenge_test_nopunc",
"test_schema_guided_dialog_WithoutPunctuation500_reformatted.json",
),
(
"challenge_test_scramble",
"test_schema_guided_dialog_ScrambleInputStructure500_reformatted.json",
),
]
return [
datasets.SplitGenerator(
name=spl,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "gem_sgd.json"),
"split": spl,
},
)
for spl in ["train", "validation", "test"]
] + [
datasets.SplitGenerator(
name=challenge_split,
gen_kwargs={
"filepath": os.path.join(
dl_dir["challenge_set"], "schema_guided_dialog", filename
),
"split": challenge_split,
},
)
for challenge_split, filename in challenge_sets
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
if "challenge" in split:
exples = json.load(open(filepath, encoding="utf-8"))
if isinstance(exples, dict):
assert len(exples) == 1, "multiple entries found"
exples = list(exples.values())[0]
for id_, exple in enumerate(exples):
if len(exple) == 0:
continue
exple["gem_parent_id"] = exple["gem_id"]
exple["gem_id"] = f"schema_guided_dialog-{split}-{id_}"
exple["linearized_input"] = process_sgd(exple)
yield id_, exple
else:
examples = json.load(open(filepath, encoding="utf-8"))[split]
for id_, example in enumerate(examples):
# Fix the one example that has an empty target.
if not example["target"]:
example["target"] = "Thank you, goodbye."
exple = {
"gem_id": f"schema_guided_dialog-{split}-{id_}",
"gem_parent_id": f"schema_guided_dialog-{split}-{id_}",
"dialog_acts": [
{
"act": act_id,
"slot": slot,
"values": values,
}
for act_id, slot, values in example["da"]
],
"context": example["context"],
"dialog_id": example["dialog_id"],
"service": example["service"],
"turn_id": example["turn_ix"],
"prompt": example["prompt"],
"target": example["target"],
"references": [example["target"]],
}
exple["linearized_input"] = process_sgd(exple)
yield id_, exple