File size: 7,736 Bytes
e5d1748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f0f662
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5d1748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f0f662
e5d1748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f0f662
e5d1748
 
 
 
d91d186
8199c76
d91d186
6f0f662
 
e5d1748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f0f662
e5d1748
6f0f662
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import json
import os
import datasets

_CITATION = """\
@inproceedings{rastogi2020towards,
  title={Towards scalable multi-domain conversational agents: The schema-guided dialogue dataset},
  author={Rastogi, Abhinav and Zang, Xiaoxue and Sunkara, Srinivas and Gupta, Raghav and Khaitan, Pranav},
  booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
  volume={34},
  number={05},
  pages={8689--8696},
  year={2020}
}
"""

_DESCRIPTION = """\
The Schema-Guided Dialogue (SGD) dataset contains 18K multi-domain task-oriented
dialogues between a human and a virtual assistant, which covers 17 domains
ranging from banks and events to media, calendar, travel, and weather. The
language presents in the datset is only English. The SGD dataset provides a
challenging testbed for a number of tasks in task-oriented dialogue, including
language understanding, slot filling, dialogue state tracking and response
generation. For the creation of the SGD dataset, they developed a multi-domain
dialogue simulator that generates dialogue outlines over an arbitrary combination
of APIs, dialogue states and system actions. Then, they used a crowd-sourcing
procedure to paraphrase these outlines to natural language utterances. This novel
crowd-sourcing procedure preserves all annotations obtained from the simulator and
does not require any extra annotations after dialogue collection.

"""

_URLs = {
    "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd_context.zip",
    "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/schema_guided_dialog.zip",
}

_SGD_ACTS = [
    "AFFIRM",
    "AFFIRM_INTENT",
    "CONFIRM",
    "GOODBYE",
    "INFORM",
    "INFORM_COUNT",
    "INFORM_INTENT",
    "NEGATE",
    "NEGATE_INTENT",
    "NOTIFY_FAILURE",
    "NOTIFY_SUCCESS",
    "OFFER",
    "OFFER_INTENT",
    "REQUEST",
    "REQUEST_ALTS",
    "REQ_MORE",
    "SELECT",
    "THANK_YOU",
]


def process_sgd(example):
    prompt = example["prompt"]
    inp = f'Prompt: "{prompt}", '
    for da in example["dialog_acts"]:
        act = _SGD_ACTS[da["act"]].lower()
        slot = da["slot"]
        values = " or ".join(da["values"])
        inp += f"Response Type: {act}"
        if slot:
            inp += f", Type of Slot: {slot}"
        if values:
            inp += f", Values: {values}"
        inp += ", "
    inp += f'Agent: {example["service"]}'

    return inp


class SchemaGuidedDialog(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    DEFAULT_CONFIG_NAME = "schema_guided_dialog"

    def _info(self):
        features = datasets.Features(
            {
                "gem_id": datasets.Value("string"),
                "gem_parent_id": datasets.Value("string"),
                "dialog_acts": [
                    {
                        "act": datasets.ClassLabel(names=_SGD_ACTS),
                        "slot": datasets.Value("string"),
                        "values": [datasets.Value("string")],
                    }
                ],
                "context": [datasets.Value("string")],
                "dialog_id": datasets.Value("string"),
                "service": datasets.Value("string"),
                "turn_id": datasets.Value("int32"),
                "prompt": datasets.Value("string"),
                "target": datasets.Value("string"),
                "references": [datasets.Value("string")],
                "linearized_input": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_dir = dl_manager.download_and_extract(_URLs)
        challenge_sets = [
            (
                "challenge_train_sample",
                "train_schema_guided_dialog_RandomSample500_reformatted.json",
            ),
            (
                "challenge_validation_sample",
                "validation_schema_guided_dialog_RandomSample500_reformatted.json",
            ),
            (
                "challenge_test_backtranslation",
                "test_schema_guided_dialog_BackTranslation500_reformatted.json",
            ),
            (
                "challenge_test_bfp02",
                "test_schema_guided_dialog_ButterFingersPerturbation_p=0.02_500_reformatted.json",
            ),
            (
                "challenge_test_bfp05",
                "test_schema_guided_dialog_ButterFingersPerturbation_p=0.05_500_reformatted.json",
            ),
            (
                "challenge_test_nopunc",
                "test_schema_guided_dialog_WithoutPunctuation500_reformatted.json",
            ),
            (
                "challenge_test_scramble",
                "test_schema_guided_dialog_ScrambleInputStructure500_reformatted.json",
            ),
        ]
        return [
            datasets.SplitGenerator(
                name=spl,
                gen_kwargs={
                    "filepath": os.path.join(dl_dir["data"], "gem_sgd.json"),
                    "split": spl,
                },
            )
            for spl in ["train", "validation", "test"]
        ] + [
            datasets.SplitGenerator(
                name=challenge_split,
                gen_kwargs={
                    "filepath": os.path.join(
                        dl_dir["challenge_set"], "schema_guided_dialog", filename
                    ),
                    "split": challenge_split,
                },
            )
            for challenge_split, filename in challenge_sets
        ]

    def _generate_examples(self, filepath, split, filepaths=None, lang=None):
        """Yields examples."""
        if "challenge" in split:
            exples = json.load(open(filepath, encoding="utf-8"))
            if isinstance(exples, dict):
                assert len(exples) == 1, "multiple entries found"
                exples = list(exples.values())[0]
            for id_, exple in enumerate(exples):
                if len(exple) == 0:
                    continue
                exple["gem_parent_id"] = exple["gem_id"]
                exple["gem_id"] = f"schema_guided_dialog-{split}-{id_}"
                exple["linearized_input"] = process_sgd(exple)
                yield id_, exple
        else:
            examples = json.load(open(filepath, encoding="utf-8"))[split]
            for id_, example in enumerate(examples):
                # Fix the one example that has an empty target.
                if not example["target"]:
                    example["target"] = "Thank you, goodbye."

                exple = {
                    "gem_id": f"schema_guided_dialog-{split}-{id_}",
                    "gem_parent_id": f"schema_guided_dialog-{split}-{id_}",
                    "dialog_acts": [
                        {
                            "act": act_id,
                            "slot": slot,
                            "values": values,
                        }
                        for act_id, slot, values in example["da"]
                    ],
                    "context": example["context"],
                    "dialog_id": example["dialog_id"],
                    "service": example["service"],
                    "turn_id": example["turn_ix"],
                    "prompt": example["prompt"],
                    "target": example["target"],
                    "references": [example["target"]],
                }
                exple["linearized_input"] = process_sgd(exple)

                yield id_, exple