Upload preprocess.py
Browse files- preprocess.py +276 -0
preprocess.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import zipfile
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import copy
|
5 |
+
from shutil import copy2, rmtree
|
6 |
+
from zipfile import ZipFile, ZIP_DEFLATED
|
7 |
+
|
8 |
+
ontology = {
|
9 |
+
'domains': {
|
10 |
+
'restaurant': {
|
11 |
+
'description': 'find a restaurant to eat',
|
12 |
+
'slots': {
|
13 |
+
'area': {
|
14 |
+
'description': 'area where the restaurant is located',
|
15 |
+
'is_categorical': True,
|
16 |
+
'possible_values': ["centre","north","west","south","east"]
|
17 |
+
},
|
18 |
+
'price range': {
|
19 |
+
'description': 'price range of the restaurant',
|
20 |
+
'is_categorical': True,
|
21 |
+
'possible_values': ["cheap","moderate","expensive"]
|
22 |
+
},
|
23 |
+
'food': {
|
24 |
+
'description': 'the cuisine of the restaurant',
|
25 |
+
'is_categorical': False,
|
26 |
+
'possible_values': ["afghan","african","afternoon tea","asian oriental","australasian","australian","austrian","barbeque","basque","belgian","bistro","brazilian","british","canapes","cantonese","caribbean","catalan","chinese","christmas","corsica","creative","crossover","cuban","danish","eastern european","english","eritrean","european","french","fusion","gastropub","german","greek","halal","hungarian","indian","indonesian","international","irish","italian","jamaican","japanese","korean","kosher","latin american","lebanese","light bites","malaysian","mediterranean","mexican","middle eastern","modern american","modern eclectic","modern european","modern global","molecular gastronomy","moroccan","new zealand","north african","north american","north indian","northern european","panasian","persian","polish","polynesian","portuguese","romanian","russian","scandinavian","scottish","seafood","singaporean","south african","south indian","spanish","sri lankan","steakhouse","swedish","swiss","thai","the americas","traditional","turkish","tuscan","unusual","vegetarian","venetian","vietnamese","welsh","world"]
|
27 |
+
},
|
28 |
+
'name': {
|
29 |
+
'description': 'name of the restaurant',
|
30 |
+
'is_categorical': False,
|
31 |
+
'possible_values': []
|
32 |
+
},
|
33 |
+
'phone': {
|
34 |
+
'description': 'phone number of the restaurant',
|
35 |
+
'is_categorical': False,
|
36 |
+
'possible_values': []
|
37 |
+
},
|
38 |
+
'address': {
|
39 |
+
'description': 'exact location of the restaurant',
|
40 |
+
'is_categorical': False,
|
41 |
+
'possible_values': []
|
42 |
+
},
|
43 |
+
'postcode': {
|
44 |
+
'description': 'postcode of the restaurant',
|
45 |
+
'is_categorical': False,
|
46 |
+
'possible_values': []
|
47 |
+
}
|
48 |
+
}
|
49 |
+
}
|
50 |
+
},
|
51 |
+
'intents': {
|
52 |
+
'inform': {
|
53 |
+
'description': 'inform the value of a slot'
|
54 |
+
},
|
55 |
+
'request': {
|
56 |
+
'description': 'ask for the value of a slot'
|
57 |
+
},
|
58 |
+
'nooffer': {
|
59 |
+
'description': 'inform the user that there is no result satisfies user requirements'
|
60 |
+
}
|
61 |
+
},
|
62 |
+
'state': {
|
63 |
+
'restaurant': {
|
64 |
+
'price range': '',
|
65 |
+
'area': '',
|
66 |
+
'food': ''
|
67 |
+
}
|
68 |
+
},
|
69 |
+
'dialogue_acts': {
|
70 |
+
"categorical": {},
|
71 |
+
"non-categorical": {},
|
72 |
+
"binary": {}
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
|
77 |
+
def convert_da(utt, da):
|
78 |
+
global ontology
|
79 |
+
converted_da = {
|
80 |
+
'binary': [],
|
81 |
+
'categorical': [],
|
82 |
+
'non-categorical': []
|
83 |
+
}
|
84 |
+
|
85 |
+
for intent, svs in da.items():
|
86 |
+
assert intent in ontology['intents']
|
87 |
+
if intent == 'nooffer':
|
88 |
+
assert svs == [['none', 'none']]
|
89 |
+
converted_da['binary'].append({
|
90 |
+
'intent': intent,
|
91 |
+
'domain': 'restaurant',
|
92 |
+
'slot': '',
|
93 |
+
})
|
94 |
+
continue
|
95 |
+
|
96 |
+
for s, v in svs:
|
97 |
+
if 'care' in v:
|
98 |
+
assert v == 'dontcare', print(v)
|
99 |
+
assert s == s.lower()
|
100 |
+
if s == 'pricerange':
|
101 |
+
s = 'price range'
|
102 |
+
v = v
|
103 |
+
if intent == 'request':
|
104 |
+
assert v == '?'
|
105 |
+
converted_da['binary'].append({
|
106 |
+
'intent': intent,
|
107 |
+
'domain': 'restaurant',
|
108 |
+
'slot': s
|
109 |
+
})
|
110 |
+
continue
|
111 |
+
|
112 |
+
if s in ['price range', 'area']:
|
113 |
+
assert v.lower() in ontology['domains']['restaurant']['slots'][s]['possible_values'] + ['dontcare'], print(s, v)
|
114 |
+
converted_da['categorical'].append({
|
115 |
+
'intent': intent,
|
116 |
+
'domain': 'restaurant',
|
117 |
+
'slot': s,
|
118 |
+
'value': v
|
119 |
+
})
|
120 |
+
|
121 |
+
else:
|
122 |
+
# non-categorical
|
123 |
+
start_ch = utt.lower().find(v.lower())
|
124 |
+
|
125 |
+
if start_ch == -1:
|
126 |
+
if not v == 'dontcare':
|
127 |
+
print('non-categorical slot value not found')
|
128 |
+
print('value: {}'.format(v))
|
129 |
+
print('sentence: {}'.format(utt))
|
130 |
+
print()
|
131 |
+
|
132 |
+
converted_da['non-categorical'].append({
|
133 |
+
'intent': intent,
|
134 |
+
'domain': 'restaurant',
|
135 |
+
'slot': s,
|
136 |
+
'value': v,
|
137 |
+
})
|
138 |
+
else:
|
139 |
+
converted_da['non-categorical'].append({
|
140 |
+
'intent': intent,
|
141 |
+
'domain': 'restaurant',
|
142 |
+
'slot': s,
|
143 |
+
'value': utt[start_ch: start_ch + len(v)],
|
144 |
+
'start': start_ch,
|
145 |
+
'end': start_ch + len(v)
|
146 |
+
})
|
147 |
+
assert utt[start_ch: start_ch + len(v)].lower() == v.lower()
|
148 |
+
|
149 |
+
return converted_da
|
150 |
+
|
151 |
+
|
152 |
+
def convert_state(slu):
|
153 |
+
global ontology
|
154 |
+
ret_state = copy.deepcopy(ontology['state'])
|
155 |
+
for da in slu:
|
156 |
+
if da['act'] != 'inform':
|
157 |
+
continue
|
158 |
+
|
159 |
+
for s, v in da['slots']:
|
160 |
+
s = s if s != 'pricerange' else 'price range'
|
161 |
+
if s not in ret_state['restaurant']:
|
162 |
+
print('slot not in state')
|
163 |
+
print(da)
|
164 |
+
print()
|
165 |
+
continue
|
166 |
+
ret_state['restaurant'][s] = v
|
167 |
+
|
168 |
+
return ret_state
|
169 |
+
|
170 |
+
|
171 |
+
def preprocess():
|
172 |
+
# use convlab-2 version camrest which already has dialog act annotation
|
173 |
+
original_data_dir = '../../camrest/'
|
174 |
+
new_data_dir = 'data'
|
175 |
+
|
176 |
+
os.makedirs(new_data_dir, exist_ok=True)
|
177 |
+
|
178 |
+
copy2(f'{original_data_dir}/db/CamRestDB.json', new_data_dir)
|
179 |
+
|
180 |
+
dataset = 'camrest'
|
181 |
+
domain = 'restaurant'
|
182 |
+
splits = ['train', 'validation', 'test']
|
183 |
+
dialogues_by_split = {split:[] for split in splits}
|
184 |
+
|
185 |
+
for split in ['train', 'val', 'test']:
|
186 |
+
data = json.load(zipfile.ZipFile(os.path.join(original_data_dir, f'{split}.json.zip'), 'r').open(f'{split}.json'))
|
187 |
+
if split == 'val':
|
188 |
+
split = 'validation'
|
189 |
+
|
190 |
+
cur_domains = [domain]
|
191 |
+
|
192 |
+
for ori_dialog in data:
|
193 |
+
dialogue_id = f'{dataset}-{split}-{len(dialogues_by_split[split])}'
|
194 |
+
|
195 |
+
goal = {
|
196 |
+
'description': ori_dialog['goal']['text'],
|
197 |
+
'inform': {'restaurant': {}},
|
198 |
+
'request': {'restaurant': {}}
|
199 |
+
}
|
200 |
+
for slot, value in ori_dialog['goal']['info'].items():
|
201 |
+
if slot == 'pricerange':
|
202 |
+
slot = 'price range'
|
203 |
+
goal['inform'][domain][slot] = value
|
204 |
+
for slot in ori_dialog['goal']['reqt']:
|
205 |
+
if slot == 'pricerange':
|
206 |
+
slot = 'price range'
|
207 |
+
goal['request'][domain][slot] = ''
|
208 |
+
|
209 |
+
dialogue = {
|
210 |
+
'dataset': dataset,
|
211 |
+
'data_split': split,
|
212 |
+
'dialogue_id': dialogue_id,
|
213 |
+
'original_id': ori_dialog['dialogue_id'],
|
214 |
+
'domains': cur_domains,
|
215 |
+
'goal': goal,
|
216 |
+
'finished': ori_dialog['finished'],
|
217 |
+
'turns': []
|
218 |
+
}
|
219 |
+
|
220 |
+
for turn in ori_dialog['dial']:
|
221 |
+
usr_text = turn['usr']['transcript']
|
222 |
+
usr_da = turn['usr']['dialog_act']
|
223 |
+
|
224 |
+
sys_text = turn['sys']['sent']
|
225 |
+
sys_da = turn['sys']['dialog_act']
|
226 |
+
|
227 |
+
cur_state = convert_state(turn['usr']['slu'])
|
228 |
+
cur_user_da = convert_da(usr_text, usr_da)
|
229 |
+
|
230 |
+
usr_turn = {
|
231 |
+
'speaker': 'user',
|
232 |
+
'utterance': usr_text,
|
233 |
+
'utt_idx': len(dialogue['turns']),
|
234 |
+
'dialogue_acts': cur_user_da,
|
235 |
+
'state': cur_state,
|
236 |
+
}
|
237 |
+
|
238 |
+
sys_turn = {
|
239 |
+
'speaker': 'system',
|
240 |
+
'utterance': sys_text,
|
241 |
+
'utt_idx': len(dialogue['turns'])+1,
|
242 |
+
'dialogue_acts': convert_da(sys_text, sys_da),
|
243 |
+
}
|
244 |
+
|
245 |
+
dialogue['turns'].append(usr_turn)
|
246 |
+
dialogue['turns'].append(sys_turn)
|
247 |
+
|
248 |
+
for turn in dialogue['turns']:
|
249 |
+
speaker = turn['speaker']
|
250 |
+
dialogue_acts = turn['dialogue_acts']
|
251 |
+
|
252 |
+
# add to dialogue_acts dictionary in the ontology
|
253 |
+
for da_type in dialogue_acts:
|
254 |
+
das = dialogue_acts[da_type]
|
255 |
+
for da in das:
|
256 |
+
ontology["dialogue_acts"][da_type].setdefault((da['intent'], da['domain'], da['slot']), {})
|
257 |
+
ontology["dialogue_acts"][da_type][(da['intent'], da['domain'], da['slot'])][speaker] = True
|
258 |
+
dialogues_by_split[split].append(dialogue)
|
259 |
+
|
260 |
+
dialogues = []
|
261 |
+
for split in splits:
|
262 |
+
dialogues += dialogues_by_split[split]
|
263 |
+
for da_type in ontology['dialogue_acts']:
|
264 |
+
ontology["dialogue_acts"][da_type] = sorted([str({'user': speakers.get('user', False), 'system': speakers.get('system', False), 'intent':da[0],'domain':da[1], 'slot':da[2]}) for da, speakers in ontology["dialogue_acts"][da_type].items()])
|
265 |
+
json.dump(dialogues[:10], open(f'dummy_data.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
266 |
+
json.dump(ontology, open(f'{new_data_dir}/ontology.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
267 |
+
json.dump(dialogues, open(f'{new_data_dir}/dialogues.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
|
268 |
+
with ZipFile('data.zip', 'w', ZIP_DEFLATED) as zf:
|
269 |
+
for filename in os.listdir(new_data_dir):
|
270 |
+
zf.write(f'{new_data_dir}/{filename}')
|
271 |
+
rmtree(new_data_dir)
|
272 |
+
return dialogues, ontology
|
273 |
+
|
274 |
+
|
275 |
+
if __name__ == '__main__':
|
276 |
+
preprocess()
|