Datasets:
GEM
/

Languages:
Chinese
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
License:
zqwerty commited on
Commit
39d73d2
1 Parent(s): 8838d5e

add CrossWOZ script

Browse files
Files changed (1) hide show
  1. CrossWOZ.py +298 -0
CrossWOZ.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """CrossWOZ: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset"""
16
+
17
+
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @article{zhu2020crosswoz,
26
+ author = {Qi Zhu and Kaili Huang and Zheng Zhang and Xiaoyan Zhu and Minlie Huang},
27
+ title = {Cross{WOZ}: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset},
28
+ journal = {Transactions of the Association for Computational Linguistics},
29
+ year = {2020}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ CrossWOZ is the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. \
35
+ It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, \
36
+ restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of \
37
+ dialogue states and dialogue acts at both user and system sides.
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/thu-coai/CrossWOZ"
41
+
42
+ _LICENSE = "Apache License, Version 2.0"
43
+
44
+ _URLs = {
45
+ "train": "https://github.com/thu-coai/CrossWOZ/blob/master/data/crosswoz/train.json.zip",
46
+ "val": "https://github.com/thu-coai/CrossWOZ/blob/master/data/crosswoz/val.json.zip",
47
+ "test": "https://github.com/thu-coai/CrossWOZ/blob/master/data/crosswoz/test.json.zip"
48
+ }
49
+
50
+
51
+ class CrossWOZ(datasets.GeneratorBasedBuilder):
52
+ """CrossWOZ: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset"""
53
+
54
+ VERSION = datasets.Version("1.1.0")
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "gem_id": datasets.Value("string"),
60
+ "dialog_id": datasets.Value("string"),
61
+ "sys_id": datasets.Value("int32"),
62
+ "usr_id": datasets.Value("int32"),
63
+ "goal": datasets.Sequence(
64
+ {
65
+ "sub_goal_id": datasets.Value("int32"),
66
+ "domain": datasets.Value("string"),
67
+ "slot": datasets.Value("string"),
68
+ "value": datasets.Value("string"),
69
+ "has_mentioned": datasets.Value("bool"),
70
+ }
71
+ ),
72
+ "task description": datasets.Value("string"),
73
+ "type": datasets.ClassLabel(names=["单领域", "独立多领域", "独立多领域+交通", "不独立多领域", "不独立多领域+交通"]),
74
+ "messages": datasets.Sequence(
75
+ {
76
+ "content": datasets.Value("string"),
77
+ "role": datasets.ClassLabel(names=["usr", "sys"]),
78
+ "dialog_act": datasets.Sequence(
79
+ {
80
+ "intent": datasets.Value("string"),
81
+ "domain": datasets.Value("string"),
82
+ "slot": datasets.Value("string"),
83
+ "value": datasets.Value("string"),
84
+ }
85
+ ),
86
+ "user_state": datasets.Sequence(
87
+ {
88
+ "sub_goal_id": datasets.Value("int32"),
89
+ "domain": datasets.Value("string"),
90
+ "slot": datasets.Value("string"),
91
+ "value": datasets.Value("string"),
92
+ "has_mentioned": datasets.Value("bool"),
93
+ }
94
+ ),
95
+ "sys_state": {
96
+ "景点": {
97
+ "名称": datasets.Value("string"),
98
+ "门票": datasets.Value("string"),
99
+ "游玩时间": datasets.Value("string"),
100
+ "评分": datasets.Value("string"),
101
+ "周边景点": datasets.Value("string"),
102
+ "周边餐馆": datasets.Value("string"),
103
+ "周边酒店": datasets.Value("string"),
104
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
105
+ },
106
+ "餐馆": {
107
+ "名称": datasets.Value("string"),
108
+ "推荐菜": "驴杂汤",
109
+ "人均消费": datasets.Value("string"),
110
+ "评分": datasets.Value("string"),
111
+ "周边景点": datasets.Value("string"),
112
+ "周边餐馆": datasets.Value("string"),
113
+ "周边酒店": datasets.Value("string"),
114
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
115
+ },
116
+ "酒店": {
117
+ "名称": datasets.Value("string"),
118
+ "酒店类型": datasets.Value("string"),
119
+ "酒店设施": datasets.Value("string"),
120
+ "价格": datasets.Value("string"),
121
+ "评分": datasets.Value("string"),
122
+ "周边景点": datasets.Value("string"),
123
+ "周边餐馆": datasets.Value("string"),
124
+ "周边酒店": datasets.Value("string"),
125
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
126
+ },
127
+ "地铁": {
128
+ "出发地": datasets.Value("string"),
129
+ "目的地": datasets.Value("string"),
130
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
131
+ },
132
+ "出租": {
133
+ "出发地": datasets.Value("string"),
134
+ "目的地": datasets.Value("string"),
135
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
136
+ }
137
+ },
138
+ "sys_state_init": {
139
+ "景点": {
140
+ "名称": datasets.Value("string"),
141
+ "门票": datasets.Value("string"),
142
+ "游玩时间": datasets.Value("string"),
143
+ "评分": datasets.Value("string"),
144
+ "周边景点": datasets.Value("string"),
145
+ "周边餐馆": datasets.Value("string"),
146
+ "周边酒店": datasets.Value("string"),
147
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
148
+ },
149
+ "餐馆": {
150
+ "名称": datasets.Value("string"),
151
+ "推荐菜": "驴杂汤",
152
+ "人均消费": datasets.Value("string"),
153
+ "评分": datasets.Value("string"),
154
+ "周边景点": datasets.Value("string"),
155
+ "周边餐馆": datasets.Value("string"),
156
+ "周边酒店": datasets.Value("string"),
157
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
158
+ },
159
+ "酒店": {
160
+ "名称": datasets.Value("string"),
161
+ "酒店类型": datasets.Value("string"),
162
+ "酒店设施": datasets.Value("string"),
163
+ "价格": datasets.Value("string"),
164
+ "评分": datasets.Value("string"),
165
+ "周边景点": datasets.Value("string"),
166
+ "周边餐馆": datasets.Value("string"),
167
+ "周边酒店": datasets.Value("string"),
168
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
169
+ },
170
+ "地铁": {
171
+ "出发地": datasets.Value("string"),
172
+ "目的地": datasets.Value("string"),
173
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
174
+ },
175
+ "出租": {
176
+ "出发地": datasets.Value("string"),
177
+ "目的地": datasets.Value("string"),
178
+ "selectedResults": datasets.Sequence(datasets.Value("string"))
179
+ }
180
+ },
181
+ }
182
+ ),
183
+ "final_goal": datasets.Sequence(
184
+ {
185
+ "sub_goal_id": datasets.Value("int32"),
186
+ "domain": datasets.Value("string"),
187
+ "slot": datasets.Value("string"),
188
+ "value": datasets.Value("string"),
189
+ "has_mentioned": datasets.Value("bool"),
190
+ }
191
+ )
192
+ }
193
+ )
194
+ return datasets.DatasetInfo(
195
+ # This is the description that will appear on the datasets page.
196
+ description=_DESCRIPTION,
197
+ # This defines the different columns of the dataset and their types
198
+ features=features, # Here we define them above because they are different between the two configurations
199
+ # If there's a common (input, target) tuple from the features,
200
+ # specify them here. They'll be used if as_supervised=True in
201
+ # builder.as_dataset.
202
+ supervised_keys=None,
203
+ # Homepage of the dataset for documentation
204
+ homepage=_HOMEPAGE,
205
+ # License for the dataset if available
206
+ license=_LICENSE,
207
+ # Citation for the dataset
208
+ citation=_CITATION,
209
+ )
210
+
211
+ def _split_generators(self, dl_manager):
212
+ """Returns SplitGenerators."""
213
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
214
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
215
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
216
+ downloaded_files = dl_manager.download_and_extract(_URLs)
217
+ return [
218
+ datasets.SplitGenerator(
219
+ name=datasets.Split.TRAIN,
220
+ # These kwargs will be passed to _generate_examples
221
+ gen_kwargs={
222
+ "filepath": os.path.join(downloaded_files["train"], "train.json"),
223
+ "split": "train",
224
+ },
225
+ ),
226
+ datasets.SplitGenerator(
227
+ name=datasets.Split.TEST,
228
+ # These kwargs will be passed to _generate_examples
229
+ gen_kwargs={
230
+ "filepath": os.path.join(downloaded_files["test"], "test.json"),
231
+ "split": "test"
232
+ },
233
+ ),
234
+ datasets.SplitGenerator(
235
+ name=datasets.Split.VALIDATION,
236
+ # These kwargs will be passed to _generate_examples
237
+ gen_kwargs={
238
+ "filepath": os.path.join(downloaded_files["val"], "val.json"),
239
+ "split": "dev",
240
+ },
241
+ ),
242
+ ]
243
+
244
+ def _generate_examples(
245
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
246
+ ):
247
+ """ Yields examples as (key, example) tuples. """
248
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
249
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
250
+ def convert_goal(raw_goal):
251
+ goal = []
252
+ for subgoal in raw_goal:
253
+ goal.append({
254
+ "sub_goal_id": subgoal[0],
255
+ "domain": subgoal[1],
256
+ "slot": subgoal[2],
257
+ "value": str(subgoal[3]),
258
+ "has_mentioned": subgoal[4],
259
+ })
260
+ return goal
261
+
262
+ key = 0
263
+ with open(filepath, encoding="utf-8") as f:
264
+ data = json.load(f)
265
+ for dialog_id, dialog in data.items():
266
+ messages = []
267
+ for turn in dialog["messages"]:
268
+ dialog_act = []
269
+ for da in turn["dialog_act"]:
270
+ dialog_act.append({
271
+ "intent": da[0],
272
+ "domain": da[1],
273
+ "slot": da[2],
274
+ "value": da[3],
275
+ })
276
+ turn["dialog_act"] = dialog_act
277
+ if "user_state" not in turn:
278
+ turn["user_state"] = []
279
+ else:
280
+ turn["user_state"] = convert_goal(turn["user_state"])
281
+ if "sys_state" not in turn:
282
+ turn["sys_state"] = {}
283
+ if "sys_state_init" not in turn:
284
+ turn["sys_state_init"] = {}
285
+ messages.append(turn)
286
+
287
+ yield key, {
288
+ "gem_id": f"{self.config.name}-{split}-{key}",
289
+ "dialog_id": dialog_id,
290
+ "sys_id": dialog["sys-usr"][0],
291
+ "usr_id": dialog["sys-usr"][1],
292
+ "goal": convert_goal(dialog["goal"]),
293
+ "task description": dialog["task description"],
294
+ "type": dialog["type"],
295
+ "messages": messages
296
+ "final_goal": convert_goal(dialog["final_goal"])
297
+ }
298
+ key += 1