Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
de28af9
1 Parent(s): 65f42c0

Upload indosmd.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indosmd.py +273 -0
indosmd.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Tasks, Licenses
10
+
11
+ _CITATION = """\
12
+ @article{kautsar2023indotod,
13
+ author={Kautsar, Muhammad Dehan Al and Nurdini, Rahmah Khoirussyifa' and Cahyawijaya, Samuel and Winata, Genta Indra and Purwarianti, Ayu},
14
+ title={IndoToD: A Multi-Domain Indonesian Benchmark For End-to-End Task-Oriented Dialogue Systems},
15
+ journal={arXiv preprint arXiv:2311.00958},
16
+ year={2023},
17
+ }
18
+ """
19
+
20
+ _LANGUAGES = ["ind"]
21
+ _LOCAL = False
22
+
23
+ _DATASETNAME = "indosmd"
24
+
25
+ _DESCRIPTION = """\
26
+ IndoSMD is a synthetic task-oriented dialogue system dataset that was translated from the In-Car Assistant (SMD) dataset (Eric et al., 2017) into the new Indonesian dataset using the translation pipeline method
27
+ including delexicalization, translation, and delexicalization. The dataset consists of 323 dialogues in the POI Navigation, Calendar Scheduling, and Weather Information Retrieval domain, with a user and an agent talking to each other.
28
+ It also consists of slots and dialogue acts from the user and the agent.
29
+ """
30
+
31
+ _HOMEPAGE = "https://github.com/dehanalkautsar/IndoToD/tree/main/IndoSMD"
32
+
33
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
34
+
35
+ _URLS = {
36
+ _DATASETNAME: {
37
+ "train": "https://raw.githubusercontent.com/dehanalkautsar/IndoToD/main/IndoSMD/IndoSMD_split/IndoSMD_train.json",
38
+ "validation": "https://raw.githubusercontent.com/dehanalkautsar/IndoToD/main/IndoSMD/IndoSMD_split/IndoSMD_dev.json",
39
+ "test": "https://raw.githubusercontent.com/dehanalkautsar/IndoToD/main/IndoSMD/IndoSMD_split/IndoSMD_test.json",
40
+ },
41
+ }
42
+
43
+ _SUPPORTED_TASKS = [Tasks.E2E_TASK_ORIENTED_DIALOGUE]
44
+
45
+ _SOURCE_VERSION = "1.0.0"
46
+
47
+ _SEACROWD_VERSION = "2024.06.20"
48
+
49
+
50
+ class IndoSMDDataset(datasets.GeneratorBasedBuilder):
51
+ """IndoToD: A Multi-Domain Indonesian Benchmark For End-to-End Task-Oriented Dialogue Systems"""
52
+
53
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
54
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
55
+
56
+ BUILDER_CONFIGS = [
57
+ SEACrowdConfig(
58
+ name=f"{_DATASETNAME}_source",
59
+ version=SOURCE_VERSION,
60
+ description="IndoToD: IndoSMD source schema",
61
+ schema="source",
62
+ subset_id=f"{_DATASETNAME}",
63
+ ),
64
+ SEACrowdConfig(
65
+ name=f"{_DATASETNAME}_seacrowd_tod",
66
+ version=SEACROWD_VERSION,
67
+ description="IndoToD: IndoSMD SEACrowd End-to-end Task Oriented Dialogue schema",
68
+ schema="seacrowd_tod",
69
+ subset_id=f"{_DATASETNAME}",
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = "indosmd_source"
74
+
75
+ def _info(self) -> datasets.DatasetInfo:
76
+ if self.config.schema == "source":
77
+ features = datasets.Features(
78
+ {
79
+ "index": datasets.Value("string"),
80
+ "dialogue": [
81
+ {
82
+ "turn": datasets.Value("string"),
83
+ "data": {
84
+ "end_dialogue": datasets.Value("string"),
85
+ "utterance": datasets.Value("string"),
86
+ "delex_utterance": datasets.Value("string"),
87
+ "requested": {
88
+ "distance": datasets.Value("string"),
89
+ "traffic_info": datasets.Value("string"),
90
+ "poi_type": datasets.Value("string"),
91
+ "address": datasets.Value("string"),
92
+ "poi": datasets.Value("string"),
93
+ "room": datasets.Value("string"),
94
+ "agenda": datasets.Value("string"),
95
+ "time": datasets.Value("string"),
96
+ "date": datasets.Value("string"),
97
+ "party": datasets.Value("string"),
98
+ "event": datasets.Value("string"),
99
+ "weather_attribute": datasets.Value("string"),
100
+ "location": datasets.Value("string"),
101
+ },
102
+ "slots": {
103
+ "distance": datasets.Value("string"),
104
+ "traffic_info": datasets.Value("string"),
105
+ "poi_type": datasets.Value("string"),
106
+ "address": datasets.Value("string"),
107
+ "poi": datasets.Value("string"),
108
+ "room": datasets.Value("string"),
109
+ "agenda": datasets.Value("string"),
110
+ "time": datasets.Value("string"),
111
+ "date": datasets.Value("string"),
112
+ "party": datasets.Value("string"),
113
+ "event": datasets.Value("string"),
114
+ "weather_attribute": datasets.Value("string"),
115
+ "location": datasets.Value("string"),
116
+ },
117
+ },
118
+ }
119
+ ],
120
+ "scenario": {
121
+ "kb": {
122
+ "items": [
123
+ {
124
+ "distance": datasets.Value("string"),
125
+ "traffic_info": datasets.Value("string"),
126
+ "poi_type": datasets.Value("string"),
127
+ "address": datasets.Value("string"),
128
+ "poi": datasets.Value("string"),
129
+ "room": datasets.Value("string"),
130
+ "agenda": datasets.Value("string"),
131
+ "time": datasets.Value("string"),
132
+ "date": datasets.Value("string"),
133
+ "party": datasets.Value("string"),
134
+ "event": datasets.Value("string"),
135
+ "monday": datasets.Value("string"),
136
+ "tuesday": datasets.Value("string"),
137
+ "wednesday": datasets.Value("string"),
138
+ "thursday": datasets.Value("string"),
139
+ "friday": datasets.Value("string"),
140
+ "saturday": datasets.Value("string"),
141
+ "sunday": datasets.Value("string"),
142
+ "today": datasets.Value("string"),
143
+ "location": datasets.Value("string"),
144
+ }
145
+ ],
146
+ "column_names": [datasets.Value("string")],
147
+ "kb_title": datasets.Value("string"),
148
+ },
149
+ "task": {"intent": datasets.Value("string")},
150
+ "uuid": datasets.Value("string"),
151
+ },
152
+ }
153
+ )
154
+ elif self.config.schema == "seacrowd_tod":
155
+ features = schemas.tod_features
156
+ else:
157
+ raise NotImplementedError(f"Schema {self.config.schema} has not been implemented")
158
+
159
+ return datasets.DatasetInfo(
160
+ description=_DESCRIPTION,
161
+ features=features,
162
+ homepage=_HOMEPAGE,
163
+ license=_LICENSE,
164
+ citation=_CITATION,
165
+ )
166
+
167
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
168
+ """Returns SplitGenerators."""
169
+
170
+ urls = _URLS[_DATASETNAME]
171
+ data_dir = dl_manager.download_and_extract(urls)
172
+
173
+ return [
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TRAIN,
176
+ gen_kwargs={
177
+ "filepath": data_dir["train"],
178
+ "split": "train",
179
+ },
180
+ ),
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.VALIDATION,
183
+ gen_kwargs={
184
+ "filepath": data_dir["validation"],
185
+ "split": "validation",
186
+ },
187
+ ),
188
+ datasets.SplitGenerator(
189
+ name=datasets.Split.TEST,
190
+ gen_kwargs={
191
+ "filepath": data_dir["test"],
192
+ "split": "test",
193
+ },
194
+ ),
195
+ ]
196
+
197
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
198
+ """Yields examples as (key, example) tuples."""
199
+
200
+ key_slot_constant = ["distance", "traffic_info", "poi_type", "address", "poi", "room", "agenda", "time", "date", "party", "event", "weather_attribute", "location"]
201
+ key_kb_constant = ["distance", "traffic_info", "poi_type", "address", "poi", "room", "agenda", "time", "date", "party", "event", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday", "today", "location"]
202
+
203
+ with open(filepath, "r+") as fw:
204
+ data = json.loads(fw.read())
205
+
206
+ if self.config.schema == "source":
207
+ for idx, example in enumerate(data):
208
+ example["index"] = str(idx)
209
+ for i in range(len(example["dialogue"])):
210
+ if "requested" not in example["dialogue"][i]["data"]: # the difference between user and system utterance (user and system utterance is divided into each dict in the origin dataset)
211
+ example["dialogue"][i]["data"]["requested"] = {}
212
+ example["dialogue"][i]["data"]["slots"] = {}
213
+ for key in key_slot_constant:
214
+ example["dialogue"][i]["data"]["requested"][key] = ""
215
+ example["dialogue"][i]["data"]["slots"][key] = ""
216
+ else:
217
+ for key in key_slot_constant:
218
+ if key not in example["dialogue"][i]["data"]["requested"]:
219
+ example["dialogue"][i]["data"]["requested"][key] = ""
220
+ if key not in example["dialogue"][i]["data"]["slots"]:
221
+ example["dialogue"][i]["data"]["slots"][key] = ""
222
+
223
+ if not example["scenario"]["kb"].get("items"):
224
+ example["scenario"]["kb"]["items"] = []
225
+
226
+ for i in range(len(example["scenario"]["kb"]["items"])):
227
+ for key in key_kb_constant:
228
+ if key not in example["scenario"]["kb"]["items"][i]:
229
+ example["scenario"]["kb"]["items"][i][key] = ""
230
+
231
+ yield str(idx), example
232
+
233
+ elif self.config.schema == "seacrowd_tod":
234
+ for idx, tod_dialogue in enumerate(data):
235
+ example = {}
236
+ example["dialogue_idx"] = idx
237
+
238
+ dialogue = []
239
+ # NOTE: the dialogue always started with `driver` as first utterance
240
+ for turn, i in enumerate(range(0, len(tod_dialogue["dialogue"]) + 2, 2)):
241
+ dial = {}
242
+ dial["turn_idx"] = turn
243
+
244
+ # system_utterance properties
245
+ dial["system_utterance"] = ""
246
+ dial["system_acts"] = []
247
+ if turn != 0:
248
+ dial["system_utterance"] = tod_dialogue["dialogue"][i - 1]["data"]["utterance"]
249
+ if i < len(tod_dialogue["dialogue"]):
250
+ # NOTE: system_acts will be filled with every slot that has 'True' value on the origin dataset (on the requested field)
251
+ for act in tod_dialogue["dialogue"][i + 1]["data"]["requested"]:
252
+ if tod_dialogue["dialogue"][i + 1]["data"]["requested"][act]:
253
+ dial["system_acts"].append([act])
254
+
255
+ # user_utterance properties
256
+ dial["turn_label"] = []
257
+ dial["belief_state"] = []
258
+ if i == len(tod_dialogue["dialogue"]):
259
+ # case if turn_idx > len(dialogue) --> add dummy user_utterance
260
+ dial["user_utterance"] = ""
261
+ else:
262
+ dial["user_utterance"] = tod_dialogue["dialogue"][i]["data"]["utterance"]
263
+ # NOTE: belief_state will be filled with request act from `requested` field & inform act from `slots` field in the origin dataset
264
+ for act in tod_dialogue["dialogue"][i + 1]["data"]["requested"]:
265
+ if tod_dialogue["dialogue"][i + 1]["data"]["requested"][act]:
266
+ dial["belief_state"].append({"slots": [["slot", act]], "act": "request"})
267
+ for slot, slot_value in tod_dialogue["dialogue"][i + 1]["data"]["slots"].items():
268
+ dial["belief_state"].append({"slots": [[slot, slot_value]], "act": "inform"})
269
+
270
+ # append to dialogue
271
+ dialogue.append(dial)
272
+ example["dialogue"] = dialogue
273
+ yield str(idx), example