AnAutomaticPencil
commited on
Commit
•
a6326c7
1
Parent(s):
9373819
data preprocessing update
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +11 -345
- src/ABSA.py +41 -0
- src/CC.py +42 -0
- src/CI.py +43 -0
- src/DCRG.py +144 -0
- src/DS.py +39 -0
- src/DST.py +43 -0
- src/DT.py +41 -0
- src/ER.py +38 -0
- src/ID.py +32 -0
- src/MCQA.py +41 -0
- src/MRC.py +35 -0
- src/NLI.py +33 -0
- src/QCR.py +40 -0
- src/README.md +13 -0
- src/RRR.py +36 -0
- src/SF.py +36 -0
- src/SP.py +38 -0
- src/T2S.py +54 -0
- src/modules/preprocess/__pycache__/config.cpython-312.pyc +0 -0
- src/modules/preprocess/__pycache__/config.cpython-38.pyc +0 -0
- src/modules/preprocess/__pycache__/const.cpython-312.pyc +0 -0
- src/modules/preprocess/__pycache__/const.cpython-38.pyc +0 -0
- src/modules/preprocess/__pycache__/logger.cpython-312.pyc +0 -0
- src/modules/preprocess/__pycache__/logger.cpython-38.pyc +0 -0
- src/modules/preprocess/config.py +68 -0
- src/modules/preprocess/const.py +73 -0
- src/modules/preprocess/logger.py +30 -0
- src/modules/preprocess/preprocess.py +343 -0
- src/modules/preprocess/preprocessor/SerialPreprocessor.py +428 -0
- src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-312.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/base.cpython-312.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/base.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-312.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-312.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/process_turn_funcs.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-312.pyc +0 -0
- src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-38.pyc +0 -0
- src/modules/preprocess/preprocessor/base.py +150 -0
- src/modules/preprocess/preprocessor/knowledge_funcs.py +505 -0
- src/modules/preprocess/preprocessor/label_funs.py +324 -0
- src/modules/preprocess/preprocessor/process_turn_funcs.py +22 -0
- src/modules/preprocess/preprocessor/prompt_funcs.py +5 -0
- src/preprocess.sh +11 -0
- src/preprocess/ASTE.py +97 -0
- src/preprocess/AlphaNLI.py +84 -0
- src/preprocess/Banking77.py +32 -0
README.md
CHANGED
@@ -1,347 +1,13 @@
|
|
1 |
-
#
|
2 |
-
|
3 |
-
To replicate data construction, three steps are required:
|
4 |
-
* Download data: ```bash scripts/download.sh```
|
5 |
-
* Convert origin data into our unified format: ```bash scripts/convert_to_unified.sh```
|
6 |
```
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
# The dialogue, represented as a list where each element is a dictionary for a single turn.
|
18 |
-
"dialog": [
|
19 |
-
{
|
20 |
-
# The roles involved in each turn. Some datasets may have multiple roles per turn, so it's a list.
|
21 |
-
# For datasets without role annotations:
|
22 |
-
# * Use `ROLE` for single-turn data.
|
23 |
-
# * Use `ROLE1`, `ROLE2`, etc., for multi-turn data.
|
24 |
-
"roles": [str, ...],
|
25 |
-
|
26 |
-
# The text of the current turn.
|
27 |
-
"utterance": str,
|
28 |
-
|
29 |
-
# Used for the "answer" in QA tasks.
|
30 |
-
"start": int,
|
31 |
-
"end": int,
|
32 |
-
"dialog_turn": int
|
33 |
-
|
34 |
-
# Rewritten text corresponding to the current turn.
|
35 |
-
"rewritten": str,
|
36 |
-
|
37 |
-
# Dialogue state, represented as a list where each element includes:
|
38 |
-
# Domain: Some datasets constrain slot-value pairs within specific domains.
|
39 |
-
# Intent: Some datasets constrain slot-value pairs within specific intents.
|
40 |
-
# Slot-value pairs: A list where each element includes a slot and its corresponding values.
|
41 |
-
# Slot name: A string.
|
42 |
-
# Values: A list where a slot may have multiple values.
|
43 |
-
# Each value includes four parts: the value itself, the normalized value,
|
44 |
-
# the character index in the current turn's text, and more.
|
45 |
-
# Relation: Some slots are equal to a value, while others are greater than a value.
|
46 |
-
# Defaults to "equal" if not specified.
|
47 |
-
# Requested slots: A list of slots that need to be queried but are not filled in the current state.
|
48 |
-
"belief_state": [
|
49 |
-
{
|
50 |
-
# Intent
|
51 |
-
"intent": str,
|
52 |
-
# Slot-value pairs
|
53 |
-
"informed_slot_value_table": [
|
54 |
-
{
|
55 |
-
# Slot name
|
56 |
-
"slot": str,
|
57 |
-
# Values
|
58 |
-
"values": [{
|
59 |
-
# Actual value
|
60 |
-
"value": str,
|
61 |
-
# Normalized value
|
62 |
-
"cononical_value": str
|
63 |
-
}, ...],
|
64 |
-
# Slot-value relation
|
65 |
-
"relation": str,
|
66 |
-
},
|
67 |
-
...
|
68 |
-
],
|
69 |
-
# Requested slots
|
70 |
-
"requested_slots": [],
|
71 |
-
# Domain
|
72 |
-
"domain": str,
|
73 |
-
}, ...
|
74 |
-
],
|
75 |
-
|
76 |
-
# Dialogue actions, represented as a list where each element includes:
|
77 |
-
# Domain: Some datasets constrain slot-value pairs within specific domains.
|
78 |
-
# Action: The actions involved in the current turn.
|
79 |
-
# Slot-value pairs: Same as in dialogue state.
|
80 |
-
"dialog_acts": [
|
81 |
-
{
|
82 |
-
# Action
|
83 |
-
"act": str,
|
84 |
-
# Slot-value pairs
|
85 |
-
"slot_value_table": [
|
86 |
-
{
|
87 |
-
# Slot name
|
88 |
-
"slot": str,
|
89 |
-
# Slot-value relation
|
90 |
-
"relation": str,
|
91 |
-
# Values
|
92 |
-
"values": [
|
93 |
-
{
|
94 |
-
# Actual value
|
95 |
-
"value": str,
|
96 |
-
# Normalized value
|
97 |
-
"cononical_value": str,
|
98 |
-
# Start position
|
99 |
-
"start": int,
|
100 |
-
# End position
|
101 |
-
"end": int,
|
102 |
-
},...
|
103 |
-
]
|
104 |
-
},
|
105 |
-
...
|
106 |
-
],
|
107 |
-
# Domain
|
108 |
-
"domain": str,
|
109 |
-
},
|
110 |
-
...
|
111 |
-
],
|
112 |
-
|
113 |
-
# Slot filling
|
114 |
-
"slots_to_fill": {
|
115 |
-
"intent": str,
|
116 |
-
"slot_value_table": [
|
117 |
-
{
|
118 |
-
"slot": str,
|
119 |
-
"values": [
|
120 |
-
{
|
121 |
-
"value": str,
|
122 |
-
"start": int,
|
123 |
-
"end": int
|
124 |
-
}
|
125 |
-
],
|
126 |
-
"relation": str, # '=', '<=', and so on
|
127 |
-
}
|
128 |
-
]
|
129 |
-
},
|
130 |
-
|
131 |
-
# Named entity recognition
|
132 |
-
"named_entity_recognition": [
|
133 |
-
{
|
134 |
-
"type": str,
|
135 |
-
"values": [
|
136 |
-
{
|
137 |
-
"value": str,
|
138 |
-
"start": int,
|
139 |
-
"end": int
|
140 |
-
}, ...
|
141 |
-
]
|
142 |
-
}, ...
|
143 |
-
],
|
144 |
-
|
145 |
-
"characters": [
|
146 |
-
{
|
147 |
-
"value": str,
|
148 |
-
"start": int,
|
149 |
-
"end": int
|
150 |
-
}
|
151 |
-
]
|
152 |
-
|
153 |
-
# Intent detection
|
154 |
-
"active_intents": [str],
|
155 |
-
|
156 |
-
# Query
|
157 |
-
"query" {
|
158 |
-
...
|
159 |
-
},
|
160 |
-
|
161 |
-
# Query result
|
162 |
-
"querying_result": {
|
163 |
-
...
|
164 |
-
},
|
165 |
-
|
166 |
-
# Recorded satisfied main items
|
167 |
-
"main_items": [],
|
168 |
-
|
169 |
-
# Aspect Sentiment Triplet Extraction task, represented as a list where each element includes three parts:
|
170 |
-
# Target entity.
|
171 |
-
# Related sentiment.
|
172 |
-
# Words reflecting the sentiment.
|
173 |
-
"aspects": [
|
174 |
-
{
|
175 |
-
# Target entity
|
176 |
-
"target": {
|
177 |
-
# Entity value
|
178 |
-
"value": str,
|
179 |
-
# Start position in the current turn's text
|
180 |
-
"start": int,
|
181 |
-
# End position in the current turn's text
|
182 |
-
"end": int
|
183 |
-
},
|
184 |
-
|
185 |
-
# Category of the target entity
|
186 |
-
"category": str,
|
187 |
-
|
188 |
-
# Words reflecting the sentiment
|
189 |
-
"opinion": {
|
190 |
-
# Sentiment word
|
191 |
-
"value": str,
|
192 |
-
# Start position in the current turn's text
|
193 |
-
"start": int,
|
194 |
-
# End position in the current turn's text
|
195 |
-
"end": int
|
196 |
-
},
|
197 |
-
# Related sentiment
|
198 |
-
"sentiment": str
|
199 |
-
}
|
200 |
-
],
|
201 |
-
|
202 |
-
"emotions": [
|
203 |
-
{
|
204 |
-
"emotion": str,
|
205 |
-
"sentiment": "positive", "negative", or "ambiguous",
|
206 |
-
"evidences": [
|
207 |
-
{
|
208 |
-
"turn": int,
|
209 |
-
"span": str,
|
210 |
-
"start": int,
|
211 |
-
"end": int
|
212 |
-
}
|
213 |
-
],
|
214 |
-
"evidence_types": [str]
|
215 |
-
}
|
216 |
-
],
|
217 |
-
|
218 |
-
"kg_label": str,
|
219 |
-
|
220 |
-
# Knowledge that may be required for each turn, used to select knowledge.
|
221 |
-
"knowledge_to_select": str,
|
222 |
-
|
223 |
-
# SQL
|
224 |
-
"sql": str,
|
225 |
-
|
226 |
-
# Rewritten text
|
227 |
-
"rewritten": str,
|
228 |
-
|
229 |
-
"roles_to_select": [str],
|
230 |
-
},
|
231 |
-
|
232 |
-
],
|
233 |
-
|
234 |
-
# Summary derived from the entire dialogue.
|
235 |
-
"summary": str,
|
236 |
-
|
237 |
-
# Entity relations determined from the entire dialogue.
|
238 |
-
"instance_relations": [
|
239 |
-
{
|
240 |
-
"instance1": str,
|
241 |
-
"instance2": str,
|
242 |
-
"relations": [
|
243 |
-
{
|
244 |
-
"relation": str,
|
245 |
-
"trigger": str
|
246 |
-
}, ...
|
247 |
-
]
|
248 |
-
}, ...
|
249 |
-
]
|
250 |
-
|
251 |
-
# Role relations determined from the entire dialogue.
|
252 |
-
"role_relations": [
|
253 |
-
{
|
254 |
-
"turn": int,
|
255 |
-
"relation": str
|
256 |
-
}
|
257 |
-
],
|
258 |
-
|
259 |
-
# Used in FriendsPersona to determine a character's persona based on the entire dialogue.
|
260 |
-
"role_personas": [
|
261 |
-
{
|
262 |
-
"name": str,
|
263 |
-
"personas": [
|
264 |
-
{
|
265 |
-
"persona": str,
|
266 |
-
"sentiment": int
|
267 |
-
}, ...
|
268 |
-
]
|
269 |
-
}
|
270 |
-
],
|
271 |
-
|
272 |
-
# External knowledge required for the dialogue.
|
273 |
-
"knowledge": {
|
274 |
-
# `text`, `persona`, `kg`, or `schema`.
|
275 |
-
"type": str,
|
276 |
-
|
277 |
-
# For `text`.
|
278 |
-
"value": str,
|
279 |
-
|
280 |
-
# For `persona`, persona of all roles, used for personachat.
|
281 |
-
"value": [
|
282 |
-
{
|
283 |
-
# Role name, matching the dialogue turn.
|
284 |
-
"role": str,
|
285 |
-
|
286 |
-
# Persona description, which may include several sentences.
|
287 |
-
"description": []
|
288 |
-
},
|
289 |
-
...
|
290 |
-
]
|
291 |
-
|
292 |
-
# For `kg`.
|
293 |
-
"value": {
|
294 |
-
# `directed` or `undirected`.
|
295 |
-
"direction": str,
|
296 |
-
|
297 |
-
# Graph.
|
298 |
-
"graph": [
|
299 |
-
{
|
300 |
-
# Source node.
|
301 |
-
"source": str,
|
302 |
-
|
303 |
-
# Target node.
|
304 |
-
"target": str,
|
305 |
-
|
306 |
-
# Relation.
|
307 |
-
"relation": str
|
308 |
-
},
|
309 |
-
...
|
310 |
-
]
|
311 |
-
}
|
312 |
-
|
313 |
-
# For `schema`.
|
314 |
-
"value": {
|
315 |
-
...
|
316 |
-
}
|
317 |
-
|
318 |
-
# For `dialogue`.
|
319 |
-
"value": {
|
320 |
-
"dialog": [],
|
321 |
-
"relations": []
|
322 |
-
}
|
323 |
-
|
324 |
-
# For `wiki`.
|
325 |
-
"value": {
|
326 |
-
...
|
327 |
-
}
|
328 |
-
|
329 |
-
# For `sql`.
|
330 |
-
"value": [
|
331 |
-
{
|
332 |
-
"turn": int,
|
333 |
-
"sql": str,
|
334 |
-
"result": ...
|
335 |
-
}, ...
|
336 |
-
],
|
337 |
-
|
338 |
-
# For dialogues based on specific article excerpts, this field indicates the article and section titles.
|
339 |
-
"value": {
|
340 |
-
"article title": str,
|
341 |
-
"section title": str
|
342 |
-
},
|
343 |
-
}
|
344 |
-
}
|
345 |
-
|
346 |
```
|
347 |
-
* Linearize: ```bash scripts/convert_to_seq.sh```
|
|
|
1 |
+
# Code directory
|
|
|
|
|
|
|
|
|
2 |
```
|
3 |
+
.
|
4 |
+
|-- pretrain: pre-training package
|
5 |
+
|-- utils
|
6 |
+
| |-- data
|
7 |
+
| |-- logger
|
8 |
+
| |-- model
|
9 |
+
| |-- tokenizer
|
10 |
+
| `-- trainer
|
11 |
+
|-- 😀[TODO: some other tasks directories]😀
|
12 |
+
`-- README.md
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
```
|
|
src/ABSA.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("modules/preprocess")
|
3 |
+
|
4 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
5 |
+
from const import (
|
6 |
+
ABSA_TERM_CATEGORY_SENTIMENT, ABSA_TERM_OPINION_SENTIMENT, ABSA_CATEGORY_SENTIMENT, ABSA_TERM_SENTIMENT
|
7 |
+
)
|
8 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
9 |
+
from preprocessor.knowledge_funcs import (
|
10 |
+
None_knowledge,
|
11 |
+
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_aspects_wrapper,
|
15 |
+
)
|
16 |
+
import sys
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
TASK = ABSA_TERM_SENTIMENT
|
20 |
+
input_data_path = sys.argv[1]
|
21 |
+
output_data_path = sys.argv[2]
|
22 |
+
|
23 |
+
serial_proc = SerialPreprocessor(
|
24 |
+
SerialConfig(
|
25 |
+
input_data_path,
|
26 |
+
output_data_path,
|
27 |
+
TASK,
|
28 |
+
logger_name=TASK,
|
29 |
+
task_bos_token=f"[{TASK}]",
|
30 |
+
prompt_func=const_prompt_func_wrapper(
|
31 |
+
"Extract all the aspects."
|
32 |
+
),
|
33 |
+
# knowledge_func=concat_list_knowledge_wrapper("person 2 persona: ", " | "),
|
34 |
+
knowledge_func=None_knowledge,
|
35 |
+
label_func=extract_aspects_wrapper(" | ", ", "),
|
36 |
+
)
|
37 |
+
)
|
38 |
+
|
39 |
+
serial_proc.launch()
|
40 |
+
|
41 |
+
|
src/CC.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("modules/preprocess")
|
3 |
+
|
4 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
5 |
+
from const import (
|
6 |
+
CHIT_CHAT
|
7 |
+
)
|
8 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
9 |
+
from preprocessor.knowledge_funcs import (
|
10 |
+
concat_list_knowledge_wrapper,
|
11 |
+
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_turn_utterance,
|
15 |
+
)
|
16 |
+
import sys
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
TASK = CHIT_CHAT
|
20 |
+
input_data_path = sys.argv[1]
|
21 |
+
output_data_path = sys.argv[2]
|
22 |
+
|
23 |
+
serial_proc = SerialPreprocessor(
|
24 |
+
SerialConfig(
|
25 |
+
input_data_path,
|
26 |
+
output_data_path,
|
27 |
+
TASK,
|
28 |
+
logger_name=TASK,
|
29 |
+
task_bos_token=f"[{TASK}]",
|
30 |
+
prompt_func=const_prompt_func_wrapper(
|
31 |
+
"Response based on the dialogue context and given self persona"
|
32 |
+
),
|
33 |
+
knowledge_func=concat_list_knowledge_wrapper("person 2 persona: ", " | "),
|
34 |
+
# knowledge_func=None_knowledge,
|
35 |
+
label_func=extract_turn_utterance,
|
36 |
+
roles_to_build_example=[["person 2"]],
|
37 |
+
)
|
38 |
+
)
|
39 |
+
|
40 |
+
serial_proc.launch()
|
41 |
+
|
42 |
+
|
src/CI.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("modules/preprocess")
|
3 |
+
|
4 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
5 |
+
from const import (
|
6 |
+
CHARACTER_IDENTIFICATION
|
7 |
+
)
|
8 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
9 |
+
from preprocessor.knowledge_funcs import (
|
10 |
+
None_knowledge,
|
11 |
+
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_characters,
|
15 |
+
)
|
16 |
+
from preprocessor.process_turn_funcs import introduce_mention_to_utterance_wrapper
|
17 |
+
import sys
|
18 |
+
|
19 |
+
if __name__ == "__main__":
|
20 |
+
TASK = CHARACTER_IDENTIFICATION
|
21 |
+
input_data_path = sys.argv[1]
|
22 |
+
output_data_path = sys.argv[2]
|
23 |
+
|
24 |
+
serial_proc = SerialPreprocessor(
|
25 |
+
SerialConfig(
|
26 |
+
input_data_path,
|
27 |
+
output_data_path,
|
28 |
+
TASK,
|
29 |
+
logger_name=TASK,
|
30 |
+
task_bos_token=f"[{TASK}]",
|
31 |
+
prompt_func=const_prompt_func_wrapper(
|
32 |
+
"Predict all characters mentioned in the given dialogue marked by [ mention ] tag based on the context."
|
33 |
+
),
|
34 |
+
knowledge_func=None_knowledge,
|
35 |
+
# knowledge_func=None_knowledge,
|
36 |
+
label_func=extract_characters,
|
37 |
+
all_turns_process_func=introduce_mention_to_utterance_wrapper(" [ ", " ] "),
|
38 |
+
)
|
39 |
+
)
|
40 |
+
|
41 |
+
serial_proc.launch()
|
42 |
+
|
43 |
+
|
src/DCRG.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION,
|
8 |
+
DOCUMENT_GROUNDED_CONVERSATION,
|
9 |
+
MULTI_REF_SEP,
|
10 |
+
)
|
11 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
12 |
+
from preprocessor.knowledge_funcs import (
|
13 |
+
extract_dialogue_knowledge_wrapper,
|
14 |
+
origin_knowledge,
|
15 |
+
None_knowledge,
|
16 |
+
extract_kg_knowledge_wrapper,
|
17 |
+
extract_turn_knowledge_wrapper,
|
18 |
+
)
|
19 |
+
from preprocessor.label_funs import (
|
20 |
+
extract_turn_utterance,
|
21 |
+
)
|
22 |
+
import sys
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
input_data_path = sys.argv[1]
|
26 |
+
output_data_path = sys.argv[2]
|
27 |
+
TASK = DOCUMENT_GROUNDED_CONVERSATION
|
28 |
+
|
29 |
+
if len(sys.argv) <= 3:
|
30 |
+
based_on = "dialogue"
|
31 |
+
else:
|
32 |
+
based_on = sys.argv[3]
|
33 |
+
|
34 |
+
if len(sys.argv) < 5:
|
35 |
+
if based_on == "turn-document":
|
36 |
+
serial_proc = SerialPreprocessor(
|
37 |
+
SerialConfig(
|
38 |
+
input_data_path,
|
39 |
+
output_data_path,
|
40 |
+
TASK,
|
41 |
+
logger_name=TASK,
|
42 |
+
task_bos_token=f"[{TASK}]",
|
43 |
+
prompt_func=const_prompt_func_wrapper(
|
44 |
+
"Response based on the dialogue context and given knowledge"
|
45 |
+
),
|
46 |
+
# knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "),
|
47 |
+
# knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "),
|
48 |
+
# knowledge_func=None_knowledge,
|
49 |
+
knowledge_func=origin_knowledge,
|
50 |
+
turn_knowledge_func=extract_turn_knowledge_wrapper(
|
51 |
+
": ", " | ", ", "
|
52 |
+
),
|
53 |
+
label_func=extract_turn_utterance,
|
54 |
+
roles_to_build_example=[["user1"], ["user2"]],
|
55 |
+
# dev_and_test_roles_to_build_example=[["user2"]],
|
56 |
+
roles_in_history=None,
|
57 |
+
multi_ref_sep=None,
|
58 |
+
)
|
59 |
+
)
|
60 |
+
elif based_on == "document":
|
61 |
+
serial_proc = SerialPreprocessor(
|
62 |
+
SerialConfig(
|
63 |
+
input_data_path,
|
64 |
+
output_data_path,
|
65 |
+
TASK,
|
66 |
+
logger_name=TASK,
|
67 |
+
task_bos_token=f"[{TASK}]",
|
68 |
+
prompt_func=const_prompt_func_wrapper(
|
69 |
+
"Response based on the dialogue context and given knowledge"
|
70 |
+
),
|
71 |
+
# knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "),
|
72 |
+
knowledge_func=extract_dialogue_knowledge_wrapper(
|
73 |
+
": ", " | ", ", "
|
74 |
+
),
|
75 |
+
# knowledge_func=None_knowledge,
|
76 |
+
# knowledge_func=origin_knowledge,
|
77 |
+
label_func=extract_turn_utterance,
|
78 |
+
roles_to_build_example=[
|
79 |
+
["third-person"],
|
80 |
+
["Listener"],
|
81 |
+
["Speaker"],
|
82 |
+
],
|
83 |
+
dev_and_test_roles_to_build_example=[
|
84 |
+
["third-person"],
|
85 |
+
["Listener"],
|
86 |
+
],
|
87 |
+
)
|
88 |
+
)
|
89 |
+
elif based_on == "None":
|
90 |
+
serial_proc = SerialPreprocessor(
|
91 |
+
SerialConfig(
|
92 |
+
input_data_path,
|
93 |
+
output_data_path,
|
94 |
+
TASK,
|
95 |
+
logger_name=TASK,
|
96 |
+
task_bos_token=f"[{TASK}]",
|
97 |
+
prompt_func=const_prompt_func_wrapper(
|
98 |
+
"Response based on the dialogue context and given knowledge"
|
99 |
+
),
|
100 |
+
knowledge_func=None_knowledge,
|
101 |
+
label_func=extract_turn_utterance,
|
102 |
+
roles_to_build_example=[["SYSTEM"]],
|
103 |
+
)
|
104 |
+
)
|
105 |
+
else:
|
106 |
+
serial_proc = SerialPreprocessor(
|
107 |
+
SerialConfig(
|
108 |
+
input_data_path,
|
109 |
+
output_data_path,
|
110 |
+
TASK,
|
111 |
+
logger_name=TASK,
|
112 |
+
task_bos_token=f"[{TASK}]",
|
113 |
+
prompt_func=const_prompt_func_wrapper(
|
114 |
+
"Response based on the dialogue context and given knowledge"
|
115 |
+
),
|
116 |
+
knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "),
|
117 |
+
# knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "),
|
118 |
+
# knowledge_func=None_knowledge,
|
119 |
+
label_func=extract_turn_utterance,
|
120 |
+
roles_to_build_example=[["SYSTEM"], ["USER"]],
|
121 |
+
dev_and_test_roles_to_build_example=[["SYSTEM"]],
|
122 |
+
)
|
123 |
+
)
|
124 |
+
else:
|
125 |
+
serial_proc = SerialPreprocessor(
|
126 |
+
SerialConfig(
|
127 |
+
input_data_path,
|
128 |
+
output_data_path,
|
129 |
+
TASK,
|
130 |
+
logger_name=TASK,
|
131 |
+
task_bos_token=f"[{TASK}]",
|
132 |
+
prompt_func=const_prompt_func_wrapper(
|
133 |
+
"Response based on the dialogue context and given knowledge"
|
134 |
+
),
|
135 |
+
# knowledge_func=extract_kg_knowledge_wrapper(": ", " | ", "; ", " "),
|
136 |
+
knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "),
|
137 |
+
label_func=extract_turn_utterance,
|
138 |
+
roles_to_build_example=[["SYSTEM"]],
|
139 |
+
roles_in_history=[["USER"]],
|
140 |
+
multi_ref_sep=MULTI_REF_SEP,
|
141 |
+
)
|
142 |
+
)
|
143 |
+
|
144 |
+
serial_proc.launch()
|
src/DS.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("modules/preprocess")
|
3 |
+
|
4 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
5 |
+
from const import (
|
6 |
+
DIALOGUE_SUMMARY,
|
7 |
+
)
|
8 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
9 |
+
from preprocessor.knowledge_funcs import (
|
10 |
+
None_knowledge,
|
11 |
+
)
|
12 |
+
from preprocessor.label_funs import (
|
13 |
+
extract_summary,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
TASK = DIALOGUE_SUMMARY
|
19 |
+
input_data_path = sys.argv[1]
|
20 |
+
output_data_path = sys.argv[2]
|
21 |
+
|
22 |
+
serial_proc = SerialPreprocessor(
|
23 |
+
SerialConfig(
|
24 |
+
input_data_path,
|
25 |
+
output_data_path,
|
26 |
+
TASK,
|
27 |
+
logger_name=TASK,
|
28 |
+
task_bos_token=f"[{TASK}]",
|
29 |
+
prompt_func=const_prompt_func_wrapper(
|
30 |
+
"Summarize the dialogue."
|
31 |
+
),
|
32 |
+
knowledge_func=None_knowledge,
|
33 |
+
label_func=extract_summary,
|
34 |
+
)
|
35 |
+
)
|
36 |
+
|
37 |
+
serial_proc.launch()
|
38 |
+
|
39 |
+
|
src/DST.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
DIALOGUE_STATE_TRACKING,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import None_knowledge, extract_turn_domains_wrapper
|
11 |
+
from preprocessor.label_funs import (
|
12 |
+
extract_belief_state_wrapper,
|
13 |
+
)
|
14 |
+
import os, shutil
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
TASK = DIALOGUE_STATE_TRACKING
|
18 |
+
input_data_path = sys.argv[1]
|
19 |
+
output_data_path = sys.argv[2]
|
20 |
+
serial_proc = SerialPreprocessor(
|
21 |
+
SerialConfig(
|
22 |
+
input_data_path,
|
23 |
+
output_data_path,
|
24 |
+
TASK,
|
25 |
+
logger_name=TASK,
|
26 |
+
task_bos_token=f"[{TASK}]",
|
27 |
+
prompt_func=const_prompt_func_wrapper(
|
28 |
+
"Generate the dialogue state based on the given dialogue context."
|
29 |
+
),
|
30 |
+
knowledge_func=None_knowledge,
|
31 |
+
label_func=extract_belief_state_wrapper(", ", " | ", "; ", ": "),
|
32 |
+
roles_to_build_example=[["USER"]],
|
33 |
+
)
|
34 |
+
)
|
35 |
+
|
36 |
+
serial_proc.launch()
|
37 |
+
|
38 |
+
for split in ["train", "dev", "test"]:
|
39 |
+
if os.path.isfile(os.path.join(input_data_path, f"{split}_ontology.json")):
|
40 |
+
shutil.copyfile(
|
41 |
+
os.path.join(input_data_path, f"{split}_ontology.json"),
|
42 |
+
os.path.join(output_data_path, f"{split}_ontology.json"),
|
43 |
+
)
|
src/DT.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
DATA_TO_TEXT, MULTI_REF_SEP
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import (
|
11 |
+
extract_dict_knowledge_wrapper,
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_turn_utterance,
|
15 |
+
)
|
16 |
+
import sys
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
# 2. Emotion Recognition
|
20 |
+
TASK = DATA_TO_TEXT
|
21 |
+
input_data_path = sys.argv[1]
|
22 |
+
output_data_path = sys.argv[2]
|
23 |
+
|
24 |
+
serial_proc = SerialPreprocessor(
|
25 |
+
SerialConfig(
|
26 |
+
input_data_path,
|
27 |
+
output_data_path,
|
28 |
+
TASK,
|
29 |
+
logger_name=TASK,
|
30 |
+
task_bos_token=f"[{TASK}]",
|
31 |
+
prompt_func=const_prompt_func_wrapper(
|
32 |
+
"Generate the corresponding text based on the given knowledge."
|
33 |
+
),
|
34 |
+
knowledge_func=extract_dict_knowledge_wrapper(": ", " | "),
|
35 |
+
label_func=extract_turn_utterance,
|
36 |
+
roles_in_history=[],
|
37 |
+
multi_ref_sep=MULTI_REF_SEP
|
38 |
+
)
|
39 |
+
)
|
40 |
+
|
41 |
+
serial_proc.launch()
|
src/ER.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("modules/preprocess")
|
3 |
+
|
4 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
5 |
+
from const import (
|
6 |
+
EMOTION_RECOGNITION,
|
7 |
+
)
|
8 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
9 |
+
from preprocessor.knowledge_funcs import (
|
10 |
+
None_knowledge,
|
11 |
+
)
|
12 |
+
from preprocessor.label_funs import (
|
13 |
+
extract_turn_emotion_wrapper,
|
14 |
+
)
|
15 |
+
import sys
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
# 2. Emotion Recognition
|
19 |
+
TASK = EMOTION_RECOGNITION
|
20 |
+
input_data_path = sys.argv[1]
|
21 |
+
output_data_path = sys.argv[2]
|
22 |
+
|
23 |
+
serial_proc = SerialPreprocessor(
|
24 |
+
SerialConfig(
|
25 |
+
input_data_path,
|
26 |
+
output_data_path,
|
27 |
+
TASK,
|
28 |
+
logger_name=TASK,
|
29 |
+
task_bos_token=f"[{TASK}]",
|
30 |
+
prompt_func=const_prompt_func_wrapper(
|
31 |
+
"Recognize correct emotions based on the dialogue context."
|
32 |
+
),
|
33 |
+
knowledge_func=None_knowledge,
|
34 |
+
label_func=extract_turn_emotion_wrapper(", "),
|
35 |
+
)
|
36 |
+
)
|
37 |
+
|
38 |
+
serial_proc.launch()
|
src/ID.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import INTENT_DETECTION, MULTI_REF_SEP
|
7 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
8 |
+
from preprocessor.knowledge_funcs import None_knowledge
|
9 |
+
from preprocessor.label_funs import (
|
10 |
+
extract_intents_wrapper,
|
11 |
+
)
|
12 |
+
|
13 |
+
if __name__ == "__main__":
|
14 |
+
TASK = INTENT_DETECTION
|
15 |
+
input_data_path = sys.argv[1]
|
16 |
+
output_data_path = sys.argv[2]
|
17 |
+
serial_proc = SerialPreprocessor(
|
18 |
+
SerialConfig(
|
19 |
+
input_data_path,
|
20 |
+
output_data_path,
|
21 |
+
TASK,
|
22 |
+
logger_name=TASK,
|
23 |
+
task_bos_token=f"[{TASK}]",
|
24 |
+
prompt_func=const_prompt_func_wrapper(
|
25 |
+
"Detect the intent based on the given dialogue context."
|
26 |
+
),
|
27 |
+
knowledge_func=None_knowledge,
|
28 |
+
label_func=extract_intents_wrapper(" | "),
|
29 |
+
)
|
30 |
+
)
|
31 |
+
|
32 |
+
serial_proc.launch()
|
src/MCQA.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
MULTIPLE_CHOICE_QUESTION_ANSWERING,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import (
|
11 |
+
# extract_dict_knowledge_wrapper
|
12 |
+
extract_dialogue_knowledge_wrapper,
|
13 |
+
# None_knowledge,
|
14 |
+
)
|
15 |
+
from preprocessor.label_funs import (
|
16 |
+
extract_options,
|
17 |
+
)
|
18 |
+
|
19 |
+
if __name__ == "__main__":
|
20 |
+
TASK = MULTIPLE_CHOICE_QUESTION_ANSWERING
|
21 |
+
input_data_path = sys.argv[1]
|
22 |
+
output_data_path = sys.argv[2]
|
23 |
+
sort_or_not = len(sys.argv) > 3
|
24 |
+
|
25 |
+
prompt = "Generate the best choice."
|
26 |
+
|
27 |
+
serial_proc = SerialPreprocessor(
|
28 |
+
SerialConfig(
|
29 |
+
input_data_path,
|
30 |
+
output_data_path,
|
31 |
+
TASK,
|
32 |
+
logger_name=TASK,
|
33 |
+
task_bos_token=f"[{TASK}]",
|
34 |
+
prompt_func=const_prompt_func_wrapper(prompt),
|
35 |
+
knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "),
|
36 |
+
# knowledge_func=extract_dict_knowledge_wrapper(": ", " | "),
|
37 |
+
label_func=extract_options,
|
38 |
+
)
|
39 |
+
)
|
40 |
+
|
41 |
+
serial_proc.launch()
|
src/MRC.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import MACHINE_READING_COMPREHENSION, MULTI_REF_SEP
|
7 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
8 |
+
from preprocessor.knowledge_funcs import extract_dialogue_knowledge_wrapper
|
9 |
+
from preprocessor.label_funs import (
|
10 |
+
extract_turn_utterance,
|
11 |
+
)
|
12 |
+
|
13 |
+
if __name__ == "__main__":
|
14 |
+
TASK = MACHINE_READING_COMPREHENSION
|
15 |
+
input_data_path = sys.argv[1]
|
16 |
+
output_data_path = sys.argv[2]
|
17 |
+
serial_proc = SerialPreprocessor(
|
18 |
+
SerialConfig(
|
19 |
+
input_data_path,
|
20 |
+
output_data_path,
|
21 |
+
TASK,
|
22 |
+
logger_name=TASK,
|
23 |
+
task_bos_token=f"[{TASK}]",
|
24 |
+
prompt_func=const_prompt_func_wrapper(
|
25 |
+
"Answer the question based on the given document and dialogue context."
|
26 |
+
),
|
27 |
+
knowledge_func=extract_dialogue_knowledge_wrapper(": ", " | ", ", "),
|
28 |
+
label_func=extract_turn_utterance,
|
29 |
+
roles_in_history=[["USER"]],
|
30 |
+
roles_to_build_example=[["SYSTEM"]],
|
31 |
+
multi_ref_sep=MULTI_REF_SEP,
|
32 |
+
)
|
33 |
+
)
|
34 |
+
|
35 |
+
serial_proc.launch()
|
src/NLI.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
NATURAL_LANGUAGE_INFERENCE,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import extract_dict_knowledge_wrapper
|
11 |
+
from preprocessor.label_funs import (
|
12 |
+
extract_options,
|
13 |
+
)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
TASK = NATURAL_LANGUAGE_INFERENCE
|
17 |
+
input_data_path = sys.argv[1]
|
18 |
+
output_data_path = sys.argv[2]
|
19 |
+
serial_proc = SerialPreprocessor(
|
20 |
+
SerialConfig(
|
21 |
+
input_data_path,
|
22 |
+
output_data_path,
|
23 |
+
TASK,
|
24 |
+
logger_name=TASK,
|
25 |
+
task_bos_token=f"[{TASK}]",
|
26 |
+
prompt_func=const_prompt_func_wrapper("Generate the better hypothesis."),
|
27 |
+
knowledge_func=extract_dict_knowledge_wrapper(": ", " | "),
|
28 |
+
# knowledge_func=None_knowledge,
|
29 |
+
label_func=extract_options,
|
30 |
+
)
|
31 |
+
)
|
32 |
+
|
33 |
+
serial_proc.launch()
|
src/QCR.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
QUESTION_IN_CONTEXT_REWRITING,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import extract_dict_knowledge_wrapper, None_knowledge
|
11 |
+
from preprocessor.label_funs import (
|
12 |
+
extrac_rewritten,
|
13 |
+
)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
TASK = QUESTION_IN_CONTEXT_REWRITING
|
17 |
+
input_data_path = sys.argv[1]
|
18 |
+
output_data_path = sys.argv[2]
|
19 |
+
has_knowledge = len(sys.argv) > 3
|
20 |
+
|
21 |
+
kf = (
|
22 |
+
extract_dict_knowledge_wrapper(": ", " | ") if has_knowledge else None_knowledge
|
23 |
+
)
|
24 |
+
serial_proc = SerialPreprocessor(
|
25 |
+
SerialConfig(
|
26 |
+
input_data_path,
|
27 |
+
output_data_path,
|
28 |
+
TASK,
|
29 |
+
logger_name=TASK,
|
30 |
+
task_bos_token=f"[{TASK}]",
|
31 |
+
prompt_func=const_prompt_func_wrapper(
|
32 |
+
"Rewrite the user utterance of current turn based on the given dialogue context."
|
33 |
+
),
|
34 |
+
knowledge_func=kf,
|
35 |
+
# knowledge_func=None_knowledge,
|
36 |
+
label_func=extrac_rewritten,
|
37 |
+
)
|
38 |
+
)
|
39 |
+
|
40 |
+
serial_proc.launch()
|
src/README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code directory
|
2 |
+
```
|
3 |
+
.
|
4 |
+
|-- pretrain: pre-training package
|
5 |
+
|-- utils
|
6 |
+
| |-- data
|
7 |
+
| |-- logger
|
8 |
+
| |-- model
|
9 |
+
| |-- tokenizer
|
10 |
+
| `-- trainer
|
11 |
+
|-- 😀[TODO: some other tasks directories]😀
|
12 |
+
`-- README.md
|
13 |
+
```
|
src/RRR.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
ROLE_RELATION_RECOGNITION,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import (
|
11 |
+
None_knowledge,
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_role_relation_without_turn_wrapper,
|
15 |
+
)
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
TASK = ROLE_RELATION_RECOGNITION
|
19 |
+
input_data_path = sys.argv[1]
|
20 |
+
output_data_path = sys.argv[2]
|
21 |
+
serial_proc = SerialPreprocessor(
|
22 |
+
SerialConfig(
|
23 |
+
input_data_path,
|
24 |
+
output_data_path,
|
25 |
+
TASK,
|
26 |
+
logger_name=TASK,
|
27 |
+
task_bos_token=f"[{TASK}]",
|
28 |
+
prompt_func=const_prompt_func_wrapper(
|
29 |
+
"Judge the relation of two roles in the given dialogue."
|
30 |
+
),
|
31 |
+
knowledge_func=None_knowledge,
|
32 |
+
label_func=extract_role_relation_without_turn_wrapper("; "),
|
33 |
+
)
|
34 |
+
)
|
35 |
+
|
36 |
+
serial_proc.launch()
|
src/SF.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
SLOT_FILLING,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import (
|
11 |
+
None_knowledge,
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_slots_without_intents_wrapper,
|
15 |
+
)
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
TASK = SLOT_FILLING
|
19 |
+
input_data_path = sys.argv[1]
|
20 |
+
output_data_path = sys.argv[2]
|
21 |
+
serial_proc = SerialPreprocessor(
|
22 |
+
SerialConfig(
|
23 |
+
input_data_path,
|
24 |
+
output_data_path,
|
25 |
+
TASK,
|
26 |
+
logger_name=TASK,
|
27 |
+
task_bos_token=f"[{TASK}]",
|
28 |
+
prompt_func=const_prompt_func_wrapper(
|
29 |
+
"Fill all the slots based on the given utterance."
|
30 |
+
),
|
31 |
+
knowledge_func=None_knowledge,
|
32 |
+
label_func=extract_slots_without_intents_wrapper(", ", "; "),
|
33 |
+
)
|
34 |
+
)
|
35 |
+
|
36 |
+
serial_proc.launch()
|
src/SP.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
sys.path.append("modules/preprocess")
|
4 |
+
|
5 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
6 |
+
from const import (
|
7 |
+
DIALOGUE_SUMMARY,
|
8 |
+
)
|
9 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
10 |
+
from preprocessor.knowledge_funcs import (
|
11 |
+
None_knowledge,
|
12 |
+
)
|
13 |
+
from preprocessor.label_funs import (
|
14 |
+
extract_sql,
|
15 |
+
)
|
16 |
+
import sys
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
TASK = "Semantic Parsing"
|
20 |
+
input_data_path = sys.argv[1]
|
21 |
+
output_data_path = sys.argv[2]
|
22 |
+
|
23 |
+
serial_proc = SerialPreprocessor(
|
24 |
+
SerialConfig(
|
25 |
+
input_data_path,
|
26 |
+
output_data_path,
|
27 |
+
TASK,
|
28 |
+
logger_name=TASK,
|
29 |
+
task_bos_token=f"[{TASK}]",
|
30 |
+
prompt_func=const_prompt_func_wrapper(
|
31 |
+
"Parse the sentence into intents and slots."
|
32 |
+
),
|
33 |
+
knowledge_func=None_knowledge,
|
34 |
+
label_func=extract_sql,
|
35 |
+
)
|
36 |
+
)
|
37 |
+
|
38 |
+
serial_proc.launch()
|
src/T2S.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
|
4 |
+
sys.path.append("modules/preprocess")
|
5 |
+
|
6 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
7 |
+
from const import (
|
8 |
+
TEXT2SQL,
|
9 |
+
)
|
10 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
11 |
+
from preprocessor.knowledge_funcs import (
|
12 |
+
origin_knowledge,
|
13 |
+
extract_schema_knowledge_wrapper,
|
14 |
+
)
|
15 |
+
from preprocessor.label_funs import (
|
16 |
+
extract_sql,
|
17 |
+
)
|
18 |
+
|
19 |
+
import shutil
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
# 8. Text2SQL
|
23 |
+
TASK = TEXT2SQL
|
24 |
+
input_data_path = sys.argv[1]
|
25 |
+
output_data_path = sys.argv[2]
|
26 |
+
|
27 |
+
serial_proc = SerialPreprocessor(
|
28 |
+
SerialConfig(
|
29 |
+
input_data_path,
|
30 |
+
output_data_path,
|
31 |
+
TASK,
|
32 |
+
logger_name=TASK,
|
33 |
+
task_bos_token=f"[{TASK}]",
|
34 |
+
prompt_func=const_prompt_func_wrapper(
|
35 |
+
"Parse the SQL based on the given dialogue context and schema."
|
36 |
+
),
|
37 |
+
knowledge_func=origin_knowledge,
|
38 |
+
turn_knowledge_func=extract_schema_knowledge_wrapper(),
|
39 |
+
label_func=extract_sql,
|
40 |
+
)
|
41 |
+
)
|
42 |
+
|
43 |
+
serial_proc.launch()
|
44 |
+
|
45 |
+
shutil.copyfile(
|
46 |
+
os.path.join(input_data_path, "tables.json"),
|
47 |
+
os.path.join(output_data_path, "tables.json"),
|
48 |
+
)
|
49 |
+
|
50 |
+
if not os.path.exists(os.path.join(output_data_path, "database")):
|
51 |
+
shutil.copytree(
|
52 |
+
os.path.join(input_data_path, "database"),
|
53 |
+
os.path.join(output_data_path, "database"),
|
54 |
+
)
|
src/modules/preprocess/__pycache__/config.cpython-312.pyc
ADDED
Binary file (3.54 kB). View file
|
|
src/modules/preprocess/__pycache__/config.cpython-38.pyc
ADDED
Binary file (2.6 kB). View file
|
|
src/modules/preprocess/__pycache__/const.cpython-312.pyc
ADDED
Binary file (2.59 kB). View file
|
|
src/modules/preprocess/__pycache__/const.cpython-38.pyc
ADDED
Binary file (2.37 kB). View file
|
|
src/modules/preprocess/__pycache__/logger.cpython-312.pyc
ADDED
Binary file (1.21 kB). View file
|
|
src/modules/preprocess/__pycache__/logger.cpython-38.pyc
ADDED
Binary file (764 Bytes). View file
|
|
src/modules/preprocess/config.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base config supporting save and load. Refer to https://github.com/huggingface/transformers/blob/main/src/transformers/configuration_utils.py.
|
3 |
+
Author: md
|
4 |
+
"""
|
5 |
+
|
6 |
+
from typing import Dict, Any
|
7 |
+
import copy
|
8 |
+
import json
|
9 |
+
|
10 |
+
|
11 |
+
class BaseConfig(object):
|
12 |
+
def __init__(
|
13 |
+
self,
|
14 |
+
logger_name: str = None,
|
15 |
+
log_file: str = None,
|
16 |
+
log_mode: str = "a",
|
17 |
+
formatter: str = "%(asctime)s | %(levelname)s | %(message)s",
|
18 |
+
) -> None:
|
19 |
+
"""
|
20 |
+
params:
|
21 |
+
------
|
22 |
+
|
23 |
+
logger_name: logger name
|
24 |
+
log_file: the file to ouput log. If `None`, output to stdout
|
25 |
+
log_mode: mode to write to the log file, `a` is appending.
|
26 |
+
formatter: logging formatter.
|
27 |
+
"""
|
28 |
+
self.logger_name = logger_name
|
29 |
+
self.log_file = log_file
|
30 |
+
self.log_mode = log_mode
|
31 |
+
self.formatter = formatter
|
32 |
+
|
33 |
+
def to_json_string(self) -> Dict[str, Any]:
|
34 |
+
output = self.to_dict()
|
35 |
+
return json.dumps(output)
|
36 |
+
|
37 |
+
def to_dict(self) -> Dict[str, Any]:
|
38 |
+
"""
|
39 |
+
Serializes this instance to a Python dictionary.
|
40 |
+
|
41 |
+
Returns:
|
42 |
+
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
|
43 |
+
"""
|
44 |
+
output = copy.deepcopy(self.__dict__)
|
45 |
+
# if "_auto_class" in output:
|
46 |
+
# del output["_auto_class"]
|
47 |
+
|
48 |
+
return output
|
49 |
+
|
50 |
+
def from_dict(self, config_dict: Dict[str, Any]) -> None:
|
51 |
+
self.__dict__.update(config_dict)
|
52 |
+
|
53 |
+
def save(self, save_path: str, indent: int = 4) -> None:
|
54 |
+
with open(save_path, "w") as writer:
|
55 |
+
json.dump(self.to_dict(), writer, indent=indent)
|
56 |
+
|
57 |
+
def load(self, load_path: str) -> None:
|
58 |
+
with open(load_path, "r") as reader:
|
59 |
+
self.__dict__.update(json.load(reader))
|
60 |
+
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
config = BaseConfig()
|
64 |
+
|
65 |
+
config.from_dict({"a": 1, "b": 2, "c": "test", "d": True, "e": 3.2})
|
66 |
+
config.save("../../test/test1.json")
|
67 |
+
config.load("../../test/test1.json")
|
68 |
+
config.save("../../test/test2.json")
|
src/modules/preprocess/const.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Some constant variables.
|
3 |
+
Author: md
|
4 |
+
"""
|
5 |
+
|
6 |
+
# The split names
|
7 |
+
TRAIN_SPLIT = "train"
|
8 |
+
DEV_SPLIT = "dev"
|
9 |
+
TEST_SPLIT = "test"
|
10 |
+
|
11 |
+
# Universal dialogue format keywords
|
12 |
+
DIALOG = "dialog"
|
13 |
+
ROLES = "roles"
|
14 |
+
TARGET = "target"
|
15 |
+
SUMMARY = "summary"
|
16 |
+
KNOWLEDGE = "knowledge"
|
17 |
+
UTTERANCE = "utterance"
|
18 |
+
EMOTIONS = "emotions"
|
19 |
+
EMOTION = "emotion"
|
20 |
+
VALUE = "value"
|
21 |
+
ASPECTS = "aspects"
|
22 |
+
CATEGORY = "category"
|
23 |
+
OPINION = "opinion"
|
24 |
+
SENTIMENT = "sentiment"
|
25 |
+
CHARACTERS = "characters"
|
26 |
+
START = "start"
|
27 |
+
END = "end"
|
28 |
+
BELIEF_STATE = "belief_state"
|
29 |
+
DOMAIN = "domain"
|
30 |
+
INFORMED_SLOT_VALUE_TABLE = "informed_slot_value_table"
|
31 |
+
SLOT = "slot"
|
32 |
+
VALUES = "values"
|
33 |
+
RELATION = "relation"
|
34 |
+
KNOWLEDGE_TO_SELECT = "knowledge_to_select"
|
35 |
+
SQL = "sql"
|
36 |
+
SLOT_VALUE_TABLE = "slot_value_table"
|
37 |
+
SLOTS_TO_FILL = "slots_to_fill"
|
38 |
+
ROLE_RELATIONS = "role_relations"
|
39 |
+
REWRITTEN = "rewritten"
|
40 |
+
ROLES_TO_SELECT = "roles_to_select"
|
41 |
+
ACTIVE_INTENTS = "active_intents"
|
42 |
+
|
43 |
+
|
44 |
+
# TASK NAMES
|
45 |
+
DIALOGUE_SUMMARY = "Dialogue Summary"
|
46 |
+
EMOTION_RECOGNITION = "Emotion Recognition"
|
47 |
+
DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION = "Dialogue Context-to-Response Generation"
|
48 |
+
ABSA = "ABSA"
|
49 |
+
ABSA_TERM_OPINION_SENTIMENT = "ABSA: term opinion sentiment"
|
50 |
+
ABSA_TERM_CATEGORY_SENTIMENT = "ABSA: term category sentiment"
|
51 |
+
ABSA_TERM_SENTIMENT = "ABSA: term sentiment"
|
52 |
+
ABSA_CATEGORY_SENTIMENT = "ABSA: category sentiment"
|
53 |
+
CHARACTER_IDENTIFICATION = "Character Identification"
|
54 |
+
DIALOGUE_STATE_TRACKING = "Dialogue State Tracking"
|
55 |
+
DOCUMENT_GROUNDED_CONVERSATION = "Document Grounded Conversation"
|
56 |
+
TEXT2SQL = "Text2SQL"
|
57 |
+
SLOT_FILLING = "Slot Filling"
|
58 |
+
ROLE_RELATION_RECOGNITION = "Role Relation Recognition"
|
59 |
+
QUESTION_IN_CONTEXT_REWRITING = "Question in Context Rewriting"
|
60 |
+
NATURAL_LANGUAGE_INFERENCE = "Natural Language Inference"
|
61 |
+
MACHINE_READING_COMPREHENSION = "Machine Reading Comprehension"
|
62 |
+
MULTIPLE_CHOICE_QUESTION_ANSWERING = "Multiple Choice Question Answering"
|
63 |
+
INTENT_DETECTION = "Intent Detection"
|
64 |
+
DATA_TO_TEXT = "Data-to-Text"
|
65 |
+
CHIT_CHAT = "Chit-Chat"
|
66 |
+
|
67 |
+
# Seq2Seq
|
68 |
+
MULTI_REF_SEP = "__multi_ref_sep__"
|
69 |
+
OPTION_LABEL = "option_label"
|
70 |
+
CANDIDATES = "candidates"
|
71 |
+
|
72 |
+
# MENTION
|
73 |
+
MENTION = "mention"
|
src/modules/preprocess/logger.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Author: md
|
3 |
+
"""
|
4 |
+
import logging
|
5 |
+
import sys
|
6 |
+
|
7 |
+
|
8 |
+
def build_logger(
|
9 |
+
logger_name: str,
|
10 |
+
level: int,
|
11 |
+
log_file: str = None,
|
12 |
+
log_mode: str = "a",
|
13 |
+
formatter: str = "%(asctime)s | [%(name)s] | %(levelname)s | %(message)s",
|
14 |
+
):
|
15 |
+
logger = logging.getLogger(logger_name)
|
16 |
+
|
17 |
+
logger.setLevel(level)
|
18 |
+
|
19 |
+
logger.handlers.clear()
|
20 |
+
|
21 |
+
formatter = logging.Formatter(formatter)
|
22 |
+
if log_file is not None:
|
23 |
+
handler = logging.FileHandler(log_file, log_mode)
|
24 |
+
else:
|
25 |
+
handler = logging.StreamHandler(sys.stdout)
|
26 |
+
|
27 |
+
handler.setFormatter(formatter)
|
28 |
+
logger.addHandler(handler)
|
29 |
+
|
30 |
+
return logger
|
src/modules/preprocess/preprocess.py
ADDED
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from preprocessor.SerialPreprocessor import SerialConfig, SerialPreprocessor
|
2 |
+
from const import (
|
3 |
+
DIALOGUE_SUMMARY,
|
4 |
+
EMOTION_RECOGNITION,
|
5 |
+
DIALOGUE_CONTEXT_TO_TEXT_GENERATION,
|
6 |
+
ABSA_TERM_OPINION_SENTIMENT,
|
7 |
+
ABSA_TERM_SENTIMENT,
|
8 |
+
ABSA_CATEGORY_SENTIMENT,
|
9 |
+
ABSA_TERM_CATEGORY_SENTIMENT,
|
10 |
+
CHARACTER_IDENTIFICATION,
|
11 |
+
DIALOGUE_STATE_TRACKING,
|
12 |
+
DOCUMENT_GROUNDED_CONVERSATION,
|
13 |
+
TEXT2SQL,
|
14 |
+
SLOT_FILLING,
|
15 |
+
)
|
16 |
+
from preprocessor.prompt_funcs import const_prompt_func_wrapper
|
17 |
+
from preprocessor.knowledge_funcs import (
|
18 |
+
None_knowledge,
|
19 |
+
concat_list_knowledge_wrapper,
|
20 |
+
extract_turn_knowledge_wrapper,
|
21 |
+
origin_knowledge,
|
22 |
+
extract_schema_knowledge_wrapper,
|
23 |
+
)
|
24 |
+
from preprocessor.label_funs import (
|
25 |
+
extract_summary,
|
26 |
+
extract_turn_emotion_wrapper,
|
27 |
+
extract_turn_utterance,
|
28 |
+
extract_aspects_wrapper,
|
29 |
+
rebuild_utterance_with_characters,
|
30 |
+
extract_belief_state_wrapper,
|
31 |
+
extract_sql,
|
32 |
+
extract_slots_without_intents_wrapper,
|
33 |
+
)
|
34 |
+
import os
|
35 |
+
|
36 |
+
if __name__ == "__main__":
|
37 |
+
# 1. Dialogue Summary
|
38 |
+
TASK = DIALOGUE_SUMMARY
|
39 |
+
input_path = r"E:\research\processed\DialogueSummary"
|
40 |
+
output_path = r"E:\research\seq\DialogueSummary"
|
41 |
+
|
42 |
+
for dataset in os.listdir(input_path):
|
43 |
+
input_data_path = os.path.join(input_path, dataset)
|
44 |
+
output_data_path = os.path.join(output_path, dataset)
|
45 |
+
|
46 |
+
serial_proc = SerialPreprocessor(
|
47 |
+
SerialConfig(
|
48 |
+
input_data_path,
|
49 |
+
output_data_path,
|
50 |
+
TASK,
|
51 |
+
logger_name=TASK,
|
52 |
+
task_bos_token=f"[{TASK}]",
|
53 |
+
prompt_func=const_prompt_func_wrapper(
|
54 |
+
"Give a summary of this dialogue."
|
55 |
+
),
|
56 |
+
knowledge_func=None_knowledge,
|
57 |
+
label_func=extract_summary,
|
58 |
+
)
|
59 |
+
)
|
60 |
+
|
61 |
+
serial_proc.launch()
|
62 |
+
|
63 |
+
# 2. Emotion Recognition
|
64 |
+
TASK = EMOTION_RECOGNITION
|
65 |
+
input_path = r"E:\research\processed\EmotionRecognition"
|
66 |
+
output_path = r"E:\research\seq\EmotionRecognition"
|
67 |
+
|
68 |
+
for dataset in os.listdir(input_path):
|
69 |
+
input_data_path = os.path.join(input_path, dataset)
|
70 |
+
output_data_path = os.path.join(output_path, dataset)
|
71 |
+
|
72 |
+
serial_proc = SerialPreprocessor(
|
73 |
+
SerialConfig(
|
74 |
+
input_data_path,
|
75 |
+
output_data_path,
|
76 |
+
TASK,
|
77 |
+
logger_name=TASK,
|
78 |
+
task_bos_token=f"[{TASK}]",
|
79 |
+
prompt_func=const_prompt_func_wrapper(
|
80 |
+
"With given possible emotions, select the correct answer."
|
81 |
+
),
|
82 |
+
knowledge_func=concat_list_knowledge_wrapper(
|
83 |
+
"possible choices: ", " | "
|
84 |
+
),
|
85 |
+
label_func=extract_turn_emotion_wrapper(", "),
|
86 |
+
)
|
87 |
+
)
|
88 |
+
|
89 |
+
serial_proc.launch()
|
90 |
+
|
91 |
+
# 3. Dialogue Context-to-Text Generation
|
92 |
+
TASK = DIALOGUE_CONTEXT_TO_TEXT_GENERATION
|
93 |
+
input_path = r"E:\research\processed\Dialogue-Context-to-Text Generation"
|
94 |
+
output_path = r"E:\research\seq\Dialogue-Context-to-Text Generation"
|
95 |
+
|
96 |
+
for dataset in os.listdir(input_path):
|
97 |
+
input_data_path = os.path.join(input_path, dataset)
|
98 |
+
output_data_path = os.path.join(output_path, dataset)
|
99 |
+
|
100 |
+
serial_proc = SerialPreprocessor(
|
101 |
+
SerialConfig(
|
102 |
+
input_data_path,
|
103 |
+
output_data_path,
|
104 |
+
TASK,
|
105 |
+
logger_name=TASK,
|
106 |
+
task_bos_token=f"[{TASK}]",
|
107 |
+
prompt_func=const_prompt_func_wrapper(
|
108 |
+
"With given dialogue context, give the response."
|
109 |
+
),
|
110 |
+
knowledge_func=None_knowledge,
|
111 |
+
label_func=extract_turn_utterance,
|
112 |
+
roles_to_build_example=[["Listener"], ["third-person"]],
|
113 |
+
)
|
114 |
+
)
|
115 |
+
|
116 |
+
serial_proc.launch()
|
117 |
+
|
118 |
+
# 4. Aspect Sentiment Analysis
|
119 |
+
# 4.1 ABSA: term opinion sentiment
|
120 |
+
TASK = ABSA_TERM_OPINION_SENTIMENT
|
121 |
+
input_path = r"E:\research\processed\ABSA-term opinion sentiment\ASTE"
|
122 |
+
output_path = r"E:\research\seq\Aspect-based Sentiment Analysis\ASTE"
|
123 |
+
|
124 |
+
for dataset in os.listdir(input_path):
|
125 |
+
input_data_path = os.path.join(input_path, dataset)
|
126 |
+
output_data_path = os.path.join(output_path, dataset)
|
127 |
+
|
128 |
+
serial_proc = SerialPreprocessor(
|
129 |
+
SerialConfig(
|
130 |
+
input_data_path,
|
131 |
+
output_data_path,
|
132 |
+
TASK,
|
133 |
+
logger_name=TASK,
|
134 |
+
task_bos_token=f"[{TASK}]",
|
135 |
+
prompt_func=const_prompt_func_wrapper("Give all the aspects."),
|
136 |
+
knowledge_func=None_knowledge,
|
137 |
+
label_func=extract_aspects_wrapper(" | ", ", "),
|
138 |
+
)
|
139 |
+
)
|
140 |
+
|
141 |
+
serial_proc.launch()
|
142 |
+
|
143 |
+
# 4.2 ABSA: term sentiment
|
144 |
+
TASK = ABSA_TERM_SENTIMENT
|
145 |
+
input_path = r"E:\research\processed\ABSA-term sentiment"
|
146 |
+
output_path = r"E:\research\seq\Aspect-based Sentiment Analysis"
|
147 |
+
|
148 |
+
for dataset in os.listdir(input_path):
|
149 |
+
input_data_path = os.path.join(input_path, dataset)
|
150 |
+
output_data_path = os.path.join(output_path, dataset)
|
151 |
+
|
152 |
+
serial_proc = SerialPreprocessor(
|
153 |
+
SerialConfig(
|
154 |
+
input_data_path,
|
155 |
+
output_data_path,
|
156 |
+
TASK,
|
157 |
+
logger_name=TASK,
|
158 |
+
task_bos_token=f"[{TASK}]",
|
159 |
+
prompt_func=const_prompt_func_wrapper("Give all the aspects."),
|
160 |
+
knowledge_func=None_knowledge,
|
161 |
+
label_func=extract_aspects_wrapper(" | ", ", "),
|
162 |
+
)
|
163 |
+
)
|
164 |
+
|
165 |
+
serial_proc.launch()
|
166 |
+
|
167 |
+
# 4.3 ABSA: category sentiment
|
168 |
+
TASK = ABSA_CATEGORY_SENTIMENT
|
169 |
+
input_path = r"E:\research\processed\ABSA-category sentiment"
|
170 |
+
output_path = r"E:\research\seq\Aspect-based Sentiment Analysis"
|
171 |
+
|
172 |
+
for dataset in os.listdir(input_path):
|
173 |
+
input_data_path = os.path.join(input_path, dataset)
|
174 |
+
output_data_path = os.path.join(output_path, dataset)
|
175 |
+
|
176 |
+
serial_proc = SerialPreprocessor(
|
177 |
+
SerialConfig(
|
178 |
+
input_data_path,
|
179 |
+
output_data_path,
|
180 |
+
TASK,
|
181 |
+
logger_name=TASK,
|
182 |
+
task_bos_token=f"[{TASK}]",
|
183 |
+
prompt_func=const_prompt_func_wrapper("Give all the aspects."),
|
184 |
+
knowledge_func=None_knowledge,
|
185 |
+
label_func=extract_aspects_wrapper(" | ", ", "),
|
186 |
+
)
|
187 |
+
)
|
188 |
+
|
189 |
+
serial_proc.launch()
|
190 |
+
|
191 |
+
# 4.4 ABSA: term category sentiment
|
192 |
+
TASK = ABSA_TERM_CATEGORY_SENTIMENT
|
193 |
+
input_path = r"E:\research\processed\ABSA-term category sentiment"
|
194 |
+
output_path = r"E:\research\seq\Aspect-based Sentiment Analysis"
|
195 |
+
|
196 |
+
for dataset in os.listdir(input_path):
|
197 |
+
input_data_path = os.path.join(input_path, dataset)
|
198 |
+
output_data_path = os.path.join(output_path, dataset)
|
199 |
+
|
200 |
+
serial_proc = SerialPreprocessor(
|
201 |
+
SerialConfig(
|
202 |
+
input_data_path,
|
203 |
+
output_data_path,
|
204 |
+
TASK,
|
205 |
+
logger_name=TASK,
|
206 |
+
task_bos_token=f"[{TASK}]",
|
207 |
+
prompt_func=const_prompt_func_wrapper("Give all the aspects."),
|
208 |
+
knowledge_func=None_knowledge,
|
209 |
+
label_func=extract_aspects_wrapper(" | ", ", "),
|
210 |
+
)
|
211 |
+
)
|
212 |
+
|
213 |
+
serial_proc.launch()
|
214 |
+
|
215 |
+
# 5. Character Identification
|
216 |
+
TASK = CHARACTER_IDENTIFICATION
|
217 |
+
input_path = r"E:\research\processed\CharacterIdentification"
|
218 |
+
output_path = r"E:\research\seq\CharacterIdentification"
|
219 |
+
|
220 |
+
for dataset in os.listdir(input_path):
|
221 |
+
input_data_path = os.path.join(input_path, dataset)
|
222 |
+
output_data_path = os.path.join(output_path, dataset)
|
223 |
+
|
224 |
+
serial_proc = SerialPreprocessor(
|
225 |
+
SerialConfig(
|
226 |
+
input_data_path,
|
227 |
+
output_data_path,
|
228 |
+
TASK,
|
229 |
+
logger_name=TASK,
|
230 |
+
task_bos_token=f"[{TASK}]",
|
231 |
+
prompt_func=const_prompt_func_wrapper("Generate with all characters."),
|
232 |
+
knowledge_func=concat_list_knowledge_wrapper("all speakers: ", " | "),
|
233 |
+
label_func=rebuild_utterance_with_characters,
|
234 |
+
)
|
235 |
+
)
|
236 |
+
|
237 |
+
serial_proc.launch()
|
238 |
+
|
239 |
+
|
240 |
+
# 6. Dialogue State Tracking
|
241 |
+
TASK = DIALOGUE_STATE_TRACKING
|
242 |
+
input_path = r"E:\research\processed\DialogueStateTracking"
|
243 |
+
output_path = r"E:\research\seq\DialogueStateTracking"
|
244 |
+
|
245 |
+
for dataset in os.listdir(input_path):
|
246 |
+
input_data_path = os.path.join(input_path, dataset)
|
247 |
+
output_data_path = os.path.join(output_path, dataset)
|
248 |
+
|
249 |
+
serial_proc = SerialPreprocessor(
|
250 |
+
SerialConfig(
|
251 |
+
input_data_path,
|
252 |
+
output_data_path,
|
253 |
+
TASK,
|
254 |
+
logger_name=TASK,
|
255 |
+
task_bos_token=f"[{TASK}]",
|
256 |
+
prompt_func=const_prompt_func_wrapper(
|
257 |
+
"With given dialogue context, give the dialogue state."
|
258 |
+
),
|
259 |
+
knowledge_func=None_knowledge,
|
260 |
+
label_func=extract_belief_state_wrapper(", ", " | ", "; ", ": "),
|
261 |
+
roles_to_build_example=[["USER"]],
|
262 |
+
)
|
263 |
+
)
|
264 |
+
|
265 |
+
serial_proc.launch()
|
266 |
+
|
267 |
+
# 7. Document Grounded Conversation
|
268 |
+
TASK = DOCUMENT_GROUNDED_CONVERSATION
|
269 |
+
input_path = r"E:\research\processed\DocumentGroundedConversations"
|
270 |
+
output_path = r"E:\research\seq\DocumentGroundedConversation"
|
271 |
+
|
272 |
+
for dataset in os.listdir(input_path):
|
273 |
+
input_data_path = os.path.join(input_path, dataset)
|
274 |
+
output_data_path = os.path.join(output_path, dataset)
|
275 |
+
|
276 |
+
serial_proc = SerialPreprocessor(
|
277 |
+
SerialConfig(
|
278 |
+
input_data_path,
|
279 |
+
output_data_path,
|
280 |
+
TASK,
|
281 |
+
logger_name=TASK,
|
282 |
+
task_bos_token=f"[{TASK}]",
|
283 |
+
prompt_func=const_prompt_func_wrapper(
|
284 |
+
"With given dialogue context, give the response."
|
285 |
+
),
|
286 |
+
knowledge_func=origin_knowledge,
|
287 |
+
turn_knowledge_func=extract_turn_knowledge_wrapper(": ", " | ", "; "),
|
288 |
+
label_func=extract_turn_utterance,
|
289 |
+
)
|
290 |
+
)
|
291 |
+
|
292 |
+
serial_proc.launch()
|
293 |
+
# 8. Text2SQL
|
294 |
+
TASK = TEXT2SQL
|
295 |
+
input_path = r"E:\research\processed\Text2SQL"
|
296 |
+
output_path = r"E:\research\seq\Text2SQL"
|
297 |
+
|
298 |
+
for dataset in os.listdir(input_path):
|
299 |
+
input_data_path = os.path.join(input_path, dataset)
|
300 |
+
output_data_path = os.path.join(output_path, dataset)
|
301 |
+
|
302 |
+
serial_proc = SerialPreprocessor(
|
303 |
+
SerialConfig(
|
304 |
+
input_data_path,
|
305 |
+
output_data_path,
|
306 |
+
TASK,
|
307 |
+
logger_name=TASK,
|
308 |
+
task_bos_token=f"[{TASK}]",
|
309 |
+
prompt_func=const_prompt_func_wrapper(
|
310 |
+
"With given dialogue context, give the sql."
|
311 |
+
),
|
312 |
+
knowledge_func=origin_knowledge,
|
313 |
+
turn_knowledge_func=extract_schema_knowledge_wrapper(),
|
314 |
+
label_func=extract_sql,
|
315 |
+
)
|
316 |
+
)
|
317 |
+
|
318 |
+
serial_proc.launch()
|
319 |
+
|
320 |
+
TASK = SLOT_FILLING
|
321 |
+
input_path = r"E:\research\processed\SlotFilling\MultiDoGo"
|
322 |
+
output_path = r"E:\research\seq\SlotFilling\MultiDoGo"
|
323 |
+
|
324 |
+
for dataset in os.listdir(input_path):
|
325 |
+
input_data_path = os.path.join(input_path, dataset)
|
326 |
+
output_data_path = os.path.join(output_path, dataset)
|
327 |
+
|
328 |
+
serial_proc = SerialPreprocessor(
|
329 |
+
SerialConfig(
|
330 |
+
input_data_path,
|
331 |
+
output_data_path,
|
332 |
+
TASK,
|
333 |
+
logger_name=TASK,
|
334 |
+
task_bos_token=f"[{TASK}]",
|
335 |
+
prompt_func=const_prompt_func_wrapper(
|
336 |
+
"With given utterance, fill the slots."
|
337 |
+
),
|
338 |
+
knowledge_func=None_knowledge,
|
339 |
+
label_func=extract_slots_without_intents_wrapper(", ", " | "),
|
340 |
+
)
|
341 |
+
)
|
342 |
+
|
343 |
+
serial_proc.launch()
|
src/modules/preprocess/preprocessor/SerialPreprocessor.py
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Several preprocessor classes.
|
3 |
+
Author: md
|
4 |
+
"""
|
5 |
+
|
6 |
+
from preprocessor.base import BasePreprocessorConfig, BasePreprocessor
|
7 |
+
from const import (
|
8 |
+
DIALOGUE_SUMMARY,
|
9 |
+
DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION,
|
10 |
+
DIALOG,
|
11 |
+
KNOWLEDGE,
|
12 |
+
UTTERANCE,
|
13 |
+
ROLES,
|
14 |
+
EMOTION_RECOGNITION,
|
15 |
+
VALUE,
|
16 |
+
ABSA,
|
17 |
+
CHARACTER_IDENTIFICATION,
|
18 |
+
DIALOGUE_STATE_TRACKING,
|
19 |
+
DOCUMENT_GROUNDED_CONVERSATION,
|
20 |
+
TEXT2SQL,
|
21 |
+
SLOT_FILLING,
|
22 |
+
ROLE_RELATION_RECOGNITION,
|
23 |
+
QUESTION_IN_CONTEXT_REWRITING,
|
24 |
+
NATURAL_LANGUAGE_INFERENCE,
|
25 |
+
MACHINE_READING_COMPREHENSION,
|
26 |
+
MULTIPLE_CHOICE_QUESTION_ANSWERING,
|
27 |
+
INTENT_DETECTION,
|
28 |
+
DATA_TO_TEXT,
|
29 |
+
CHIT_CHAT,
|
30 |
+
TRAIN_SPLIT,
|
31 |
+
)
|
32 |
+
from typing import Dict, List, Callable
|
33 |
+
from copy import deepcopy
|
34 |
+
|
35 |
+
|
36 |
+
class SerialConfig(BasePreprocessorConfig):
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
input_dir: str,
|
40 |
+
output_dir: str,
|
41 |
+
task: str,
|
42 |
+
task_bos_token: str = "<s>",
|
43 |
+
knowledge_bos_token: str = "[EK]",
|
44 |
+
prompt_bos_token: str = "[C]",
|
45 |
+
use_role: bool = True,
|
46 |
+
turn_sep: str = None,
|
47 |
+
roles_to_build_example: List = None,
|
48 |
+
dev_and_test_roles_to_build_example: List = None,
|
49 |
+
prompt_func: Callable = None,
|
50 |
+
knowledge_func: Callable = None,
|
51 |
+
label_func: Callable = None,
|
52 |
+
turn_knowledge_func: Callable = None,
|
53 |
+
roles_in_history: List[List] = None,
|
54 |
+
cur_turn_process_func: Callable = None,
|
55 |
+
all_turns_process_func: Callable = None,
|
56 |
+
multi_ref_sep: str = None,
|
57 |
+
*args,
|
58 |
+
**kwargs,
|
59 |
+
) -> None:
|
60 |
+
super().__init__(input_dir, output_dir, task, *args, **kwargs)
|
61 |
+
|
62 |
+
self.use_role = use_role
|
63 |
+
self.turn_sep = turn_sep
|
64 |
+
self.roles_to_build_example = roles_to_build_example
|
65 |
+
self.prompt_func = prompt_func
|
66 |
+
self.task_bos_token = task_bos_token
|
67 |
+
self.knowledge_bos_token = knowledge_bos_token
|
68 |
+
self.prompt_bos_token = prompt_bos_token
|
69 |
+
self.knowledge_func = knowledge_func
|
70 |
+
self.label_func = label_func
|
71 |
+
self.turn_knowledge_func = turn_knowledge_func
|
72 |
+
self.roles_in_history = roles_in_history
|
73 |
+
self.multi_ref_sep = multi_ref_sep
|
74 |
+
self.dev_and_test_roles_to_build_example = dev_and_test_roles_to_build_example
|
75 |
+
self.cur_turn_process_func = cur_turn_process_func
|
76 |
+
self.all_turns_process_func = all_turns_process_func
|
77 |
+
|
78 |
+
|
79 |
+
def concat_roles(roles):
|
80 |
+
return ", ".join(roles)
|
81 |
+
|
82 |
+
|
83 |
+
def concat_dial_history(config: SerialConfig, history: List[Dict]):
|
84 |
+
# utterance_list = [
|
85 |
+
# f"{concat_roles(turn[ROLES])}: {turn[UTTERANCE].strip()}"
|
86 |
+
# if config.use_role
|
87 |
+
# else turn[UTTERANCE].strip()
|
88 |
+
# for turn in history
|
89 |
+
# ]
|
90 |
+
|
91 |
+
utterance_list = []
|
92 |
+
for turn in history:
|
93 |
+
if (
|
94 |
+
config.roles_in_history is not None
|
95 |
+
and turn[ROLES] not in config.roles_in_history
|
96 |
+
):
|
97 |
+
continue
|
98 |
+
|
99 |
+
if config.use_role:
|
100 |
+
utterance_list.append(
|
101 |
+
f"{concat_roles(turn[ROLES])}: {turn[UTTERANCE].strip()}"
|
102 |
+
)
|
103 |
+
else:
|
104 |
+
utterance_list.append(turn[UTTERANCE].strip())
|
105 |
+
|
106 |
+
if not utterance_list:
|
107 |
+
return "None"
|
108 |
+
|
109 |
+
turn_sep = " "
|
110 |
+
if config.turn_sep is not None:
|
111 |
+
turn_sep = f" {config.turn_sep} "
|
112 |
+
|
113 |
+
return turn_sep.join(utterance_list)
|
114 |
+
|
115 |
+
|
116 |
+
def concat_history_knowledge_prompt(
|
117 |
+
config: SerialConfig, history: str, knowledge: str = "", prompt: str = ""
|
118 |
+
):
|
119 |
+
"""Concat `history`, `knowledge` and `prompt`.
|
120 |
+
|
121 |
+
NOTE: the order is fixed now.
|
122 |
+
"""
|
123 |
+
text = ""
|
124 |
+
|
125 |
+
if config.task_bos_token is not None:
|
126 |
+
text = f"{config.task_bos_token} "
|
127 |
+
|
128 |
+
text += history
|
129 |
+
|
130 |
+
if knowledge is not None:
|
131 |
+
text += f" {config.knowledge_bos_token} {knowledge}"
|
132 |
+
|
133 |
+
if prompt is not None:
|
134 |
+
text += f" {config.prompt_bos_token} {prompt}"
|
135 |
+
|
136 |
+
return text
|
137 |
+
|
138 |
+
|
139 |
+
def clean(text):
|
140 |
+
return text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ")
|
141 |
+
|
142 |
+
|
143 |
+
def add_prefix_to_label(prefix, split, label):
|
144 |
+
tgt = f"{prefix} {label}" if split == "train" else label
|
145 |
+
return tgt
|
146 |
+
|
147 |
+
|
148 |
+
class SerialPreprocessor(BasePreprocessor):
|
149 |
+
def __init__(self, config: SerialConfig) -> None:
|
150 |
+
super().__init__(config)
|
151 |
+
|
152 |
+
def extract_knowledge(self, example: Dict):
|
153 |
+
if self.config.knowledge_func is None:
|
154 |
+
knowledge = None
|
155 |
+
|
156 |
+
elif (
|
157 |
+
KNOWLEDGE not in example
|
158 |
+
or not self.config.knowledge_func.__code__.co_argcount
|
159 |
+
):
|
160 |
+
knowledge = self.config.knowledge_func()
|
161 |
+
else:
|
162 |
+
knowledge = self.config.knowledge_func(example[KNOWLEDGE][VALUE])
|
163 |
+
|
164 |
+
return knowledge
|
165 |
+
|
166 |
+
def preprocess_for_dialogue_level(self, split: str, example: Dict, knowledge: str):
|
167 |
+
label = self.config.label_func(example)
|
168 |
+
tgt = add_prefix_to_label(self.config.task_bos_token, split, label)
|
169 |
+
|
170 |
+
history = concat_dial_history(self.config, example[DIALOG])
|
171 |
+
|
172 |
+
if self.config.prompt_func is None:
|
173 |
+
prompt = ""
|
174 |
+
elif not self.config.prompt_func.__code__.co_argcount:
|
175 |
+
prompt = self.config.prompt_func()
|
176 |
+
|
177 |
+
src = concat_history_knowledge_prompt(self.config, history, knowledge, prompt)
|
178 |
+
|
179 |
+
return [{"src": clean(src), "tgt": clean(tgt)}]
|
180 |
+
|
181 |
+
def preprocess_for_label_level(self, split: str, example: Dict, knowledge: str):
|
182 |
+
label_generator = self.config.label_func(example)
|
183 |
+
|
184 |
+
examples = []
|
185 |
+
for turn_id, label, extra_args in label_generator:
|
186 |
+
tgt = add_prefix_to_label(self.config.task_bos_token, split, label)
|
187 |
+
|
188 |
+
hist = deepcopy(example[DIALOG])
|
189 |
+
if self.config.all_turns_process_func is not None:
|
190 |
+
hist[turn_id] = self.config.all_turns_process_func(
|
191 |
+
hist[turn_id], *extra_args
|
192 |
+
)
|
193 |
+
|
194 |
+
history = concat_dial_history(self.config, hist)
|
195 |
+
|
196 |
+
if self.config.prompt_func is None:
|
197 |
+
prompt = ""
|
198 |
+
elif not self.config.prompt_func.__code__.co_argcount:
|
199 |
+
prompt = self.config.prompt_func()
|
200 |
+
|
201 |
+
src = concat_history_knowledge_prompt(
|
202 |
+
self.config, history, knowledge, prompt
|
203 |
+
)
|
204 |
+
|
205 |
+
examples.append({"src": clean(src), "tgt": clean(tgt)})
|
206 |
+
|
207 |
+
return examples
|
208 |
+
|
209 |
+
def get_label(
|
210 |
+
self, turn, include_current_turn, turn_idx, split, origin_knowledge=None
|
211 |
+
):
|
212 |
+
# skip the roles not requiring to build examples
|
213 |
+
if (
|
214 |
+
split != TRAIN_SPLIT
|
215 |
+
and self.config.dev_and_test_roles_to_build_example is not None
|
216 |
+
):
|
217 |
+
roles_to_build_example = self.config.dev_and_test_roles_to_build_example
|
218 |
+
else:
|
219 |
+
roles_to_build_example = self.config.roles_to_build_example
|
220 |
+
if (
|
221 |
+
roles_to_build_example is not None
|
222 |
+
and turn[ROLES] not in roles_to_build_example
|
223 |
+
):
|
224 |
+
return None
|
225 |
+
|
226 |
+
# skip the first turn if not including current turn
|
227 |
+
if not include_current_turn and turn_idx == 0:
|
228 |
+
return None
|
229 |
+
|
230 |
+
if self.config.task != DIALOGUE_STATE_TRACKING:
|
231 |
+
try:
|
232 |
+
label = self.config.label_func(turn, split=split)
|
233 |
+
except:
|
234 |
+
label = self.config.label_func(turn, origin_knowledge, split=split)
|
235 |
+
else:
|
236 |
+
label = self.config.label_func(
|
237 |
+
turn, self.ontologies[split], do_train=(split == TRAIN_SPLIT)
|
238 |
+
)
|
239 |
+
|
240 |
+
return label
|
241 |
+
|
242 |
+
def preprocess_for_turn_level(
|
243 |
+
self,
|
244 |
+
split: str,
|
245 |
+
example: Dict,
|
246 |
+
knowledge: str,
|
247 |
+
include_current_turn=False,
|
248 |
+
origin_knowledge=None,
|
249 |
+
):
|
250 |
+
examples = []
|
251 |
+
multiref = []
|
252 |
+
for turn_idx, turn in enumerate(example[DIALOG]):
|
253 |
+
label = self.get_label(
|
254 |
+
turn, include_current_turn, turn_idx, split, origin_knowledge
|
255 |
+
)
|
256 |
+
|
257 |
+
if label is None:
|
258 |
+
continue
|
259 |
+
|
260 |
+
multiref.append(label)
|
261 |
+
# requre to merge and arrive at the final consecutive label
|
262 |
+
if (
|
263 |
+
self.config.multi_ref_sep is not None
|
264 |
+
and split != "train"
|
265 |
+
and turn_idx < len(example[DIALOG]) - 1
|
266 |
+
and self.get_label(
|
267 |
+
example[DIALOG][turn_idx + 1],
|
268 |
+
include_current_turn,
|
269 |
+
turn_idx + 1,
|
270 |
+
split,
|
271 |
+
)
|
272 |
+
is not None
|
273 |
+
):
|
274 |
+
continue
|
275 |
+
|
276 |
+
if self.config.multi_ref_sep is not None and split != "train":
|
277 |
+
label = self.config.multi_ref_sep.join(multiref)
|
278 |
+
|
279 |
+
tgt = add_prefix_to_label(self.config.task_bos_token, split, label)
|
280 |
+
|
281 |
+
end = (turn_idx + 1) if include_current_turn else turn_idx
|
282 |
+
|
283 |
+
hist = deepcopy(example[DIALOG][:end])
|
284 |
+
if self.config.cur_turn_process_func is not None:
|
285 |
+
hist[-1] = self.config.cur_turn_process_func(hist[-1])
|
286 |
+
|
287 |
+
history = concat_dial_history(self.config, hist)
|
288 |
+
|
289 |
+
if self.config.prompt_func is None:
|
290 |
+
prompt = ""
|
291 |
+
elif not self.config.prompt_func.__code__.co_argcount:
|
292 |
+
prompt = self.config.prompt_func()
|
293 |
+
|
294 |
+
if self.config.turn_knowledge_func is not None:
|
295 |
+
knowledge_to_use = self.config.turn_knowledge_func(knowledge, turn)
|
296 |
+
else:
|
297 |
+
knowledge_to_use = knowledge
|
298 |
+
|
299 |
+
src = concat_history_knowledge_prompt(
|
300 |
+
self.config, history, knowledge_to_use, prompt
|
301 |
+
)
|
302 |
+
|
303 |
+
examples.append({"src": clean(src), "tgt": clean(tgt)})
|
304 |
+
|
305 |
+
multiref = []
|
306 |
+
|
307 |
+
return examples
|
308 |
+
|
309 |
+
def preprocess_line(self, split: str, example: Dict) -> List[Dict]:
|
310 |
+
knowledge = self.extract_knowledge(example)
|
311 |
+
|
312 |
+
# 1. Dialogue Summary
|
313 |
+
if self.config.task == DIALOGUE_SUMMARY:
|
314 |
+
return self.preprocess_for_dialogue_level(split, example, knowledge)
|
315 |
+
|
316 |
+
# 2. Emotion Recognition
|
317 |
+
if self.config.task == EMOTION_RECOGNITION:
|
318 |
+
return self.preprocess_for_turn_level(
|
319 |
+
split, example, knowledge, include_current_turn=True
|
320 |
+
)
|
321 |
+
|
322 |
+
# 3. Dialogue Context-to-Text Generation
|
323 |
+
if self.config.task == DIALOGUE_CONTEXT_TO_RESPONSE_GENERATION:
|
324 |
+
return self.preprocess_for_turn_level(
|
325 |
+
split, example, knowledge, include_current_turn=False
|
326 |
+
)
|
327 |
+
|
328 |
+
# 4. ABSA
|
329 |
+
if self.config.task.startswith(ABSA):
|
330 |
+
return self.preprocess_for_turn_level(
|
331 |
+
split, example, knowledge, include_current_turn=True
|
332 |
+
)
|
333 |
+
|
334 |
+
# 5. Character Identification
|
335 |
+
if self.config.task == CHARACTER_IDENTIFICATION:
|
336 |
+
# return self.preprocess_for_turn_level(
|
337 |
+
# split, example, knowledge, include_current_turn=True
|
338 |
+
# )
|
339 |
+
# return self.preprocess_for_dialogue_level(split, example, knowledge)
|
340 |
+
return self.preprocess_for_label_level(split, example, knowledge)
|
341 |
+
|
342 |
+
# 6. Dialogue State Tracking
|
343 |
+
if self.config.task == DIALOGUE_STATE_TRACKING:
|
344 |
+
return self.preprocess_for_turn_level(
|
345 |
+
split, example, knowledge, include_current_turn=True
|
346 |
+
)
|
347 |
+
|
348 |
+
# 7. Document Grounded Conversation
|
349 |
+
if self.config.task == DOCUMENT_GROUNDED_CONVERSATION:
|
350 |
+
return self.preprocess_for_turn_level(
|
351 |
+
split, example, knowledge, include_current_turn=False
|
352 |
+
)
|
353 |
+
|
354 |
+
# 8. Text2SQL
|
355 |
+
if self.config.task == TEXT2SQL:
|
356 |
+
seq_examples = self.preprocess_for_turn_level(
|
357 |
+
split, example, knowledge, include_current_turn=True
|
358 |
+
)
|
359 |
+
|
360 |
+
for idx in range(len(seq_examples)):
|
361 |
+
seq_examples[idx]["db_id"] = knowledge["db_id"]
|
362 |
+
|
363 |
+
return seq_examples
|
364 |
+
|
365 |
+
# 9. Slot Filling
|
366 |
+
if self.config.task == SLOT_FILLING:
|
367 |
+
return self.preprocess_for_turn_level(
|
368 |
+
split, example, knowledge, include_current_turn=True
|
369 |
+
)
|
370 |
+
|
371 |
+
# 10. Relation Recognition
|
372 |
+
if self.config.task == ROLE_RELATION_RECOGNITION:
|
373 |
+
return self.preprocess_for_dialogue_level(split, example, knowledge)
|
374 |
+
|
375 |
+
# 11. Question in Context Rewriting
|
376 |
+
if self.config.task == QUESTION_IN_CONTEXT_REWRITING:
|
377 |
+
return self.preprocess_for_turn_level(
|
378 |
+
split, example, knowledge, include_current_turn=True
|
379 |
+
)
|
380 |
+
|
381 |
+
# 12. Natural Language Inference
|
382 |
+
if self.config.task == NATURAL_LANGUAGE_INFERENCE:
|
383 |
+
return self.preprocess_for_turn_level(
|
384 |
+
split,
|
385 |
+
example,
|
386 |
+
knowledge,
|
387 |
+
include_current_turn=True,
|
388 |
+
origin_knowledge=example[KNOWLEDGE][VALUE],
|
389 |
+
)
|
390 |
+
|
391 |
+
# 13. Machine Reading Comprehension
|
392 |
+
if self.config.task == MACHINE_READING_COMPREHENSION:
|
393 |
+
return self.preprocess_for_turn_level(split, example, knowledge)
|
394 |
+
|
395 |
+
# 14. Multiple Choice Question Answering
|
396 |
+
if self.config.task == MULTIPLE_CHOICE_QUESTION_ANSWERING:
|
397 |
+
return self.preprocess_for_turn_level(
|
398 |
+
split,
|
399 |
+
example,
|
400 |
+
knowledge,
|
401 |
+
include_current_turn=True,
|
402 |
+
origin_knowledge=example[KNOWLEDGE][VALUE],
|
403 |
+
)
|
404 |
+
|
405 |
+
# 15. Intent Detection
|
406 |
+
if self.config.task == INTENT_DETECTION:
|
407 |
+
return self.preprocess_for_turn_level(
|
408 |
+
split, example, knowledge, include_current_turn=True
|
409 |
+
)
|
410 |
+
|
411 |
+
# 16. Data-to-Text
|
412 |
+
if self.config.task == DATA_TO_TEXT:
|
413 |
+
return self.preprocess_for_turn_level(
|
414 |
+
split, example, knowledge, include_current_turn=True
|
415 |
+
)
|
416 |
+
|
417 |
+
# 17. Chit-Chat
|
418 |
+
if self.config.task == CHIT_CHAT:
|
419 |
+
return self.preprocess_for_turn_level(
|
420 |
+
split, example, knowledge, include_current_turn=False
|
421 |
+
)
|
422 |
+
|
423 |
+
if self.config.task == "Semantic Parsing":
|
424 |
+
seq_examples = self.preprocess_for_turn_level(
|
425 |
+
split, example, knowledge, include_current_turn=True
|
426 |
+
)
|
427 |
+
|
428 |
+
return seq_examples
|
src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-312.pyc
ADDED
Binary file (15 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/SerialPreprocessor.cpython-38.pyc
ADDED
Binary file (7.83 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/base.cpython-312.pyc
ADDED
Binary file (8.5 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/base.cpython-38.pyc
ADDED
Binary file (4.48 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-312.pyc
ADDED
Binary file (20.9 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/knowledge_funcs.cpython-38.pyc
ADDED
Binary file (15.1 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-312.pyc
ADDED
Binary file (14 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/label_funs.cpython-38.pyc
ADDED
Binary file (9.78 kB). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/process_turn_funcs.cpython-38.pyc
ADDED
Binary file (937 Bytes). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-312.pyc
ADDED
Binary file (497 Bytes). View file
|
|
src/modules/preprocess/preprocessor/__pycache__/prompt_funcs.cpython-38.pyc
ADDED
Binary file (468 Bytes). View file
|
|
src/modules/preprocess/preprocessor/base.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base preprocessor class.
|
3 |
+
Author: md
|
4 |
+
"""
|
5 |
+
|
6 |
+
from config import BaseConfig
|
7 |
+
import os
|
8 |
+
from logger import build_logger
|
9 |
+
import logging
|
10 |
+
import json
|
11 |
+
from const import TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT, DIALOGUE_STATE_TRACKING
|
12 |
+
from typing import Dict
|
13 |
+
import shutil
|
14 |
+
|
15 |
+
|
16 |
+
class BasePreprocessorConfig(BaseConfig):
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
input_dir: str,
|
20 |
+
output_dir: str,
|
21 |
+
task: str,
|
22 |
+
formatter="%(asctime)s | [%(name)s] | %(levelname)s | %(message)s",
|
23 |
+
*args,
|
24 |
+
**kwargs,
|
25 |
+
) -> None:
|
26 |
+
super().__init__(*args, **kwargs)
|
27 |
+
|
28 |
+
self.input_dir = input_dir
|
29 |
+
self.output_dir = output_dir
|
30 |
+
self.task = task
|
31 |
+
self.formatter = formatter
|
32 |
+
|
33 |
+
|
34 |
+
class BasePreprocessor(object):
|
35 |
+
def __init__(self, config: BasePreprocessorConfig) -> None:
|
36 |
+
self.config = config
|
37 |
+
self.logger = build_logger(
|
38 |
+
config.logger_name,
|
39 |
+
logging.INFO,
|
40 |
+
config.log_file,
|
41 |
+
config.log_mode,
|
42 |
+
config.formatter,
|
43 |
+
)
|
44 |
+
|
45 |
+
if self.config.task == DIALOGUE_STATE_TRACKING:
|
46 |
+
self.ontologies = {
|
47 |
+
split: self.load_ontology(split)
|
48 |
+
for split in [TRAIN_SPLIT, DEV_SPLIT, TEST_SPLIT]
|
49 |
+
}
|
50 |
+
|
51 |
+
def load_ontology(self, split: str) -> Dict:
|
52 |
+
"""
|
53 |
+
Load the ontology file.
|
54 |
+
"""
|
55 |
+
ontology_file = os.path.join(self.config.input_dir, f"{split}_ontology.json")
|
56 |
+
if not os.path.exists(ontology_file):
|
57 |
+
return None
|
58 |
+
return json.load(open(ontology_file, "r", encoding="utf8"))
|
59 |
+
|
60 |
+
def preprocess_line(self, split: str, example: Dict) -> Dict:
|
61 |
+
"""
|
62 |
+
Every preprocessor should customize this function for all `train`, `dev` and `test` split.
|
63 |
+
"""
|
64 |
+
raise NotImplementedError("The preprocess line procedure is required!")
|
65 |
+
|
66 |
+
def _preprocess_file(
|
67 |
+
self, start, infile, src_writer, tgt_writer, split, encoding="UTF-8"
|
68 |
+
):
|
69 |
+
with open(infile, "r", encoding=encoding) as reader:
|
70 |
+
for line in reader:
|
71 |
+
if line.strip():
|
72 |
+
example = json.loads(line)
|
73 |
+
if start:
|
74 |
+
start = False
|
75 |
+
elif split != "train":
|
76 |
+
tgt_writer.write("\n")
|
77 |
+
for processed_example in self.preprocess_line(split, example):
|
78 |
+
src_writer.write(f"{processed_example['src']}\n")
|
79 |
+
tgt_writer.write(f"{processed_example['tgt']}")
|
80 |
+
|
81 |
+
if "db_id" in processed_example and split != "train":
|
82 |
+
tgt_writer.write(f"\t{processed_example['db_id']}")
|
83 |
+
tgt_writer.write("\n")
|
84 |
+
return start
|
85 |
+
|
86 |
+
def preprocess(self, split: str) -> bool:
|
87 |
+
if not os.path.exists(self.config.output_dir):
|
88 |
+
os.makedirs(self.config.output_dir)
|
89 |
+
|
90 |
+
src_file = os.path.join(self.config.output_dir, f"{split}.src")
|
91 |
+
tgt_file = os.path.join(
|
92 |
+
self.config.output_dir,
|
93 |
+
f"{split}.tgt" if split == "train" else f"{split}.gold",
|
94 |
+
)
|
95 |
+
exist = False
|
96 |
+
|
97 |
+
with open(src_file, "w") as src_writer, open(tgt_file, "w") as tgt_writer:
|
98 |
+
start = True
|
99 |
+
for filename in os.listdir(self.config.input_dir):
|
100 |
+
if split not in filename or not filename.endswith(".jsonl"):
|
101 |
+
continue
|
102 |
+
|
103 |
+
exist = True
|
104 |
+
infile = os.path.join(self.config.input_dir, filename)
|
105 |
+
|
106 |
+
self.logger.info(f"preprocessing {infile}")
|
107 |
+
try:
|
108 |
+
start = self._preprocess_file(
|
109 |
+
start, infile, src_writer, tgt_writer, split
|
110 |
+
)
|
111 |
+
except UnicodeDecodeError:
|
112 |
+
start = self._preprocess_file(
|
113 |
+
start, infile, src_writer, tgt_writer, split, "ISO-8859-1"
|
114 |
+
)
|
115 |
+
|
116 |
+
return exist
|
117 |
+
|
118 |
+
def launch(self) -> None:
|
119 |
+
self.logger.info(f"Start to preprocess: {TRAIN_SPLIT}")
|
120 |
+
train = self.preprocess(TRAIN_SPLIT)
|
121 |
+
assert train
|
122 |
+
|
123 |
+
self.logger.info(f"Start to preprocess: {DEV_SPLIT}")
|
124 |
+
dev = self.preprocess(DEV_SPLIT)
|
125 |
+
self.logger.info(f"Start to preprocess: {TEST_SPLIT}")
|
126 |
+
test = self.preprocess(TEST_SPLIT)
|
127 |
+
|
128 |
+
if dev and not test:
|
129 |
+
self.logger.info("Copy dev to test")
|
130 |
+
shutil.copyfile(
|
131 |
+
os.path.join(self.config.output_dir, "dev.src"),
|
132 |
+
os.path.join(self.config.output_dir, "test.src"),
|
133 |
+
)
|
134 |
+
shutil.copyfile(
|
135 |
+
os.path.join(self.config.output_dir, "dev.gold"),
|
136 |
+
os.path.join(self.config.output_dir, "test.gold"),
|
137 |
+
)
|
138 |
+
|
139 |
+
if test and not dev:
|
140 |
+
self.logger.info("Copy test to dev")
|
141 |
+
shutil.copyfile(
|
142 |
+
os.path.join(self.config.output_dir, "test.src"),
|
143 |
+
os.path.join(self.config.output_dir, "dev.src"),
|
144 |
+
)
|
145 |
+
shutil.copyfile(
|
146 |
+
os.path.join(self.config.output_dir, "test.gold"),
|
147 |
+
os.path.join(self.config.output_dir, "dev.gold"),
|
148 |
+
)
|
149 |
+
|
150 |
+
self.logger.info("Preprocess successfully!")
|
src/modules/preprocess/preprocessor/knowledge_funcs.py
ADDED
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict, Optional, Tuple
|
2 |
+
import random
|
3 |
+
import difflib
|
4 |
+
from rapidfuzz import fuzz
|
5 |
+
import sqlite3
|
6 |
+
import functools
|
7 |
+
from const import KNOWLEDGE_TO_SELECT, UTTERANCE, ROLES, BELIEF_STATE, DOMAIN
|
8 |
+
|
9 |
+
|
10 |
+
def None_knowledge():
|
11 |
+
return "None"
|
12 |
+
|
13 |
+
|
14 |
+
def concat_list_knowledge_wrapper(prompt: str = "", sep: str = " | "):
|
15 |
+
def get_list_knowledge(str_list: List[str]):
|
16 |
+
return prompt + sep.join(str_list)
|
17 |
+
|
18 |
+
return get_list_knowledge
|
19 |
+
|
20 |
+
|
21 |
+
def origin_knowledge(knowledge):
|
22 |
+
return knowledge
|
23 |
+
|
24 |
+
|
25 |
+
def extract_turn_knowledge(
|
26 |
+
knowledge, section_prompt_op, section_sep, section_value_sep
|
27 |
+
):
|
28 |
+
if isinstance(knowledge, dict):
|
29 |
+
sec_list = []
|
30 |
+
for section in sorted(knowledge.keys()):
|
31 |
+
sec_str = f"{section}{section_prompt_op}"
|
32 |
+
if isinstance(knowledge[section], str):
|
33 |
+
sec_str += knowledge[section]
|
34 |
+
elif isinstance(knowledge[section], list):
|
35 |
+
sec_str += section_value_sep.join(knowledge[section])
|
36 |
+
sec_list.append(sec_str)
|
37 |
+
|
38 |
+
return section_sep.join(sec_list)
|
39 |
+
|
40 |
+
elif isinstance(knowledge, str):
|
41 |
+
return knowledge
|
42 |
+
|
43 |
+
elif isinstance(knowledge, list):
|
44 |
+
return ";; ".join(
|
45 |
+
[
|
46 |
+
extract_turn_knowledge(
|
47 |
+
sec, section_prompt_op, section_sep, section_value_sep
|
48 |
+
)
|
49 |
+
for sec in knowledge
|
50 |
+
]
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
def extract_turn_domains_wrapper(prompt: str = "", sep: str = ", "):
|
55 |
+
def extract_turn_domains(knowledge, turn):
|
56 |
+
bs = turn[BELIEF_STATE]
|
57 |
+
domains = []
|
58 |
+
for state in bs:
|
59 |
+
domain = state[DOMAIN]
|
60 |
+
if domain not in domains:
|
61 |
+
domains.append(domain)
|
62 |
+
|
63 |
+
return prompt + sep.join(domains)
|
64 |
+
|
65 |
+
return extract_turn_domains
|
66 |
+
|
67 |
+
|
68 |
+
def extract_turn_knowledge_wrapper(section_prompt_op, section_sep, section_value_sep):
|
69 |
+
def extract_turn_knowledge_func(knowledge, turn):
|
70 |
+
return extract_turn_knowledge(
|
71 |
+
[knowledge[sec] for sec in turn[KNOWLEDGE_TO_SELECT]],
|
72 |
+
section_prompt_op,
|
73 |
+
section_sep,
|
74 |
+
section_value_sep,
|
75 |
+
)
|
76 |
+
|
77 |
+
return extract_turn_knowledge_func
|
78 |
+
|
79 |
+
|
80 |
+
# Text2SQL
|
81 |
+
EXIST = {"atis", "geo", "advising", "yelp", "restaurants", "imdb", "academic"}
|
82 |
+
|
83 |
+
# fmt: off
|
84 |
+
_stopwords = {'who', 'ourselves', 'down', 'only', 'were', 'him', 'at', "weren't", 'has', 'few', "it's", 'm', 'again',
|
85 |
+
'd', 'haven', 'been', 'other', 'we', 'an', 'own', 'doing', 'ma', 'hers', 'all', "haven't", 'in', 'but',
|
86 |
+
"shouldn't", 'does', 'out', 'aren', 'you', "you'd", 'himself', "isn't", 'most', 'y', 'below', 'is',
|
87 |
+
"wasn't", 'hasn', 'them', 'wouldn', 'against', 'this', 'about', 'there', 'don', "that'll", 'a', 'being',
|
88 |
+
'with', 'your', 'theirs', 'its', 'any', 'why', 'now', 'during', 'weren', 'if', 'should', 'those', 'be',
|
89 |
+
'they', 'o', 't', 'of', 'or', 'me', 'i', 'some', 'her', 'do', 'will', 'yours', 'for', 'mightn', 'nor',
|
90 |
+
'needn', 'the', 'until', "couldn't", 'he', 'which', 'yourself', 'to', "needn't", "you're", 'because',
|
91 |
+
'their', 'where', 'it', "didn't", 've', 'whom', "should've", 'can', "shan't", 'on', 'had', 'have',
|
92 |
+
'myself', 'am', "don't", 'under', 'was', "won't", 'these', 'so', 'as', 'after', 'above', 'each', 'ours',
|
93 |
+
'hadn', 'having', 'wasn', 's', 'doesn', "hadn't", 'than', 'by', 'that', 'both', 'herself', 'his',
|
94 |
+
"wouldn't", 'into', "doesn't", 'before', 'my', 'won', 'more', 'are', 'through', 'same', 'how', 'what',
|
95 |
+
'over', 'll', 'yourselves', 'up', 'mustn', "mustn't", "she's", 're', 'such', 'didn', "you'll", 'shan',
|
96 |
+
'when', "you've", 'themselves', "mightn't", 'she', 'from', 'isn', 'ain', 'between', 'once', 'here',
|
97 |
+
'shouldn', 'our', 'and', 'not', 'too', 'very', 'further', 'while', 'off', 'couldn', "hasn't", 'itself',
|
98 |
+
'then', 'did', 'just', "aren't"}
|
99 |
+
# fmt: on
|
100 |
+
|
101 |
+
_commonwords = {"no", "yes", "many"}
|
102 |
+
|
103 |
+
|
104 |
+
def is_number(s: str) -> bool:
|
105 |
+
try:
|
106 |
+
float(s.replace(",", ""))
|
107 |
+
return True
|
108 |
+
except:
|
109 |
+
return False
|
110 |
+
|
111 |
+
|
112 |
+
def is_stopword(s: str) -> bool:
|
113 |
+
return s.strip() in _stopwords
|
114 |
+
|
115 |
+
|
116 |
+
def is_commonword(s: str) -> bool:
|
117 |
+
return s.strip() in _commonwords
|
118 |
+
|
119 |
+
|
120 |
+
def is_common_db_term(s: str) -> bool:
|
121 |
+
return s.strip() in ["id"]
|
122 |
+
|
123 |
+
|
124 |
+
class Match(object):
|
125 |
+
def __init__(self, start: int, size: int) -> None:
|
126 |
+
self.start = start
|
127 |
+
self.size = size
|
128 |
+
|
129 |
+
|
130 |
+
def is_span_separator(c: str) -> bool:
|
131 |
+
return c in "'\"()`,.?! "
|
132 |
+
|
133 |
+
|
134 |
+
def split(s: str) -> List[str]:
|
135 |
+
return [c.lower() for c in s.strip()]
|
136 |
+
|
137 |
+
|
138 |
+
def prefix_match(s1: str, s2: str) -> bool:
|
139 |
+
i, j = 0, 0
|
140 |
+
for i in range(len(s1)):
|
141 |
+
if not is_span_separator(s1[i]):
|
142 |
+
break
|
143 |
+
for j in range(len(s2)):
|
144 |
+
if not is_span_separator(s2[j]):
|
145 |
+
break
|
146 |
+
if i < len(s1) and j < len(s2):
|
147 |
+
return s1[i] == s2[j]
|
148 |
+
elif i >= len(s1) and j >= len(s2):
|
149 |
+
return True
|
150 |
+
else:
|
151 |
+
return False
|
152 |
+
|
153 |
+
|
154 |
+
def get_effective_match_source(s: str, start: int, end: int) -> Match:
|
155 |
+
_start = -1
|
156 |
+
|
157 |
+
for i in range(start, start - 2, -1):
|
158 |
+
if i < 0:
|
159 |
+
_start = i + 1
|
160 |
+
break
|
161 |
+
if is_span_separator(s[i]):
|
162 |
+
_start = i
|
163 |
+
break
|
164 |
+
|
165 |
+
if _start < 0:
|
166 |
+
return None
|
167 |
+
|
168 |
+
_end = -1
|
169 |
+
for i in range(end - 1, end + 3):
|
170 |
+
if i >= len(s):
|
171 |
+
_end = i - 1
|
172 |
+
break
|
173 |
+
if is_span_separator(s[i]):
|
174 |
+
_end = i
|
175 |
+
break
|
176 |
+
|
177 |
+
if _end < 0:
|
178 |
+
return None
|
179 |
+
|
180 |
+
while _start < len(s) and is_span_separator(s[_start]):
|
181 |
+
_start += 1
|
182 |
+
while _end >= 0 and is_span_separator(s[_end]):
|
183 |
+
_end -= 1
|
184 |
+
|
185 |
+
return Match(_start, _end - _start + 1)
|
186 |
+
|
187 |
+
|
188 |
+
def get_matched_entries(
|
189 |
+
s: str, field_values: List[str], m_theta: float = 0.85, s_theta: float = 0.85
|
190 |
+
) -> Optional[List[Tuple[str, Tuple[str, str, float, float, int]]]]:
|
191 |
+
if not field_values:
|
192 |
+
return None
|
193 |
+
|
194 |
+
if isinstance(s, str):
|
195 |
+
n_grams = split(s)
|
196 |
+
else:
|
197 |
+
n_grams = s
|
198 |
+
|
199 |
+
matched = dict()
|
200 |
+
for field_value in field_values:
|
201 |
+
if not isinstance(field_value, str):
|
202 |
+
continue
|
203 |
+
fv_tokens = split(field_value)
|
204 |
+
sm = difflib.SequenceMatcher(None, n_grams, fv_tokens)
|
205 |
+
match = sm.find_longest_match(0, len(n_grams), 0, len(fv_tokens))
|
206 |
+
if match.size > 0:
|
207 |
+
source_match = get_effective_match_source(
|
208 |
+
n_grams, match.a, match.a + match.size
|
209 |
+
)
|
210 |
+
if source_match and source_match.size > 1:
|
211 |
+
match_str = field_value[match.b : match.b + match.size]
|
212 |
+
source_match_str = s[
|
213 |
+
source_match.start : source_match.start + source_match.size
|
214 |
+
]
|
215 |
+
c_match_str = match_str.lower().strip()
|
216 |
+
c_source_match_str = source_match_str.lower().strip()
|
217 |
+
c_field_value = field_value.lower().strip()
|
218 |
+
if (
|
219 |
+
c_match_str
|
220 |
+
and not is_number(c_match_str)
|
221 |
+
and not is_common_db_term(c_match_str)
|
222 |
+
):
|
223 |
+
if (
|
224 |
+
is_stopword(c_match_str)
|
225 |
+
or is_stopword(c_source_match_str)
|
226 |
+
or is_stopword(c_field_value)
|
227 |
+
):
|
228 |
+
continue
|
229 |
+
if c_source_match_str.endswith(c_match_str + "'s"):
|
230 |
+
match_score = 1.0
|
231 |
+
else:
|
232 |
+
if prefix_match(c_field_value, c_source_match_str):
|
233 |
+
match_score = (
|
234 |
+
fuzz.ratio(c_field_value, c_source_match_str) / 100
|
235 |
+
)
|
236 |
+
else:
|
237 |
+
match_score = 0
|
238 |
+
if (
|
239 |
+
is_commonword(c_match_str)
|
240 |
+
or is_commonword(c_source_match_str)
|
241 |
+
or is_commonword(c_field_value)
|
242 |
+
) and match_score < 1:
|
243 |
+
continue
|
244 |
+
s_match_score = match_score
|
245 |
+
if match_score >= m_theta and s_match_score >= s_theta:
|
246 |
+
if field_value.isupper() and match_score * s_match_score < 1:
|
247 |
+
continue
|
248 |
+
matched[match_str] = (
|
249 |
+
field_value,
|
250 |
+
source_match_str,
|
251 |
+
match_score,
|
252 |
+
s_match_score,
|
253 |
+
match.size,
|
254 |
+
)
|
255 |
+
|
256 |
+
if not matched:
|
257 |
+
return None
|
258 |
+
else:
|
259 |
+
return sorted(
|
260 |
+
matched.items(),
|
261 |
+
key=lambda x: (1e16 * x[1][2] + 1e8 * x[1][3] + x[1][4]),
|
262 |
+
reverse=True,
|
263 |
+
)
|
264 |
+
|
265 |
+
|
266 |
+
@functools.lru_cache(maxsize=1000, typed=False)
|
267 |
+
def get_column_picklist(table_name: str, column_name: str, db_path: str) -> list:
|
268 |
+
fetch_sql = "SELECT DISTINCT `{}` FROM `{}`".format(column_name, table_name)
|
269 |
+
try:
|
270 |
+
conn = sqlite3.connect(db_path)
|
271 |
+
conn.text_factory = bytes
|
272 |
+
c = conn.cursor()
|
273 |
+
c.execute(fetch_sql)
|
274 |
+
picklist = set()
|
275 |
+
for x in c.fetchall():
|
276 |
+
if isinstance(x[0], str):
|
277 |
+
picklist.add(x[0].encode("utf-8"))
|
278 |
+
elif isinstance(x[0], bytes):
|
279 |
+
try:
|
280 |
+
picklist.add(x[0].decode("utf-8"))
|
281 |
+
except UnicodeDecodeError:
|
282 |
+
picklist.add(x[0].decode("latin-1"))
|
283 |
+
else:
|
284 |
+
picklist.add(x[0])
|
285 |
+
picklist = list(picklist)
|
286 |
+
finally:
|
287 |
+
conn.close()
|
288 |
+
return picklist
|
289 |
+
|
290 |
+
|
291 |
+
def get_database_matches(
|
292 |
+
question: str,
|
293 |
+
table_name: str,
|
294 |
+
column_name: str,
|
295 |
+
db_path: str,
|
296 |
+
top_k_matches: int = 2,
|
297 |
+
match_threshold: float = 0.85,
|
298 |
+
) -> List[str]:
|
299 |
+
picklist = get_column_picklist(
|
300 |
+
table_name=table_name, column_name=column_name, db_path=db_path
|
301 |
+
)
|
302 |
+
matches = []
|
303 |
+
if picklist and isinstance(picklist[0], str):
|
304 |
+
matched_entries = get_matched_entries(
|
305 |
+
s=question,
|
306 |
+
field_values=picklist,
|
307 |
+
m_theta=match_threshold,
|
308 |
+
s_theta=match_threshold,
|
309 |
+
)
|
310 |
+
if matched_entries:
|
311 |
+
num_values_inserted = 0
|
312 |
+
for _match_str, (
|
313 |
+
field_value,
|
314 |
+
_s_match_str,
|
315 |
+
match_score,
|
316 |
+
s_match_score,
|
317 |
+
_match_size,
|
318 |
+
) in matched_entries:
|
319 |
+
if "name" in column_name and match_score * s_match_score < 1:
|
320 |
+
continue
|
321 |
+
if table_name != "sqlite_sequence": # Spider database artifact
|
322 |
+
matches.append(field_value)
|
323 |
+
num_values_inserted += 1
|
324 |
+
if num_values_inserted >= top_k_matches:
|
325 |
+
break
|
326 |
+
return matches
|
327 |
+
|
328 |
+
|
329 |
+
def serialize_schema(
|
330 |
+
question: str,
|
331 |
+
db_path: str,
|
332 |
+
db_id: str,
|
333 |
+
db_column_names: Dict[str, str],
|
334 |
+
db_table_names: List[str],
|
335 |
+
schema_serialization_type: str = "peteshaw",
|
336 |
+
schema_serialization_randomized: bool = False,
|
337 |
+
schema_serialization_with_db_id: bool = True,
|
338 |
+
schema_serialization_with_db_content: bool = False,
|
339 |
+
normalize_query: bool = True,
|
340 |
+
) -> str:
|
341 |
+
if schema_serialization_type == "verbose":
|
342 |
+
db_id_str = "Database: {db_id}. "
|
343 |
+
table_sep = ". "
|
344 |
+
table_str = "Table: {table}. Columns: {columns}"
|
345 |
+
column_sep = ", "
|
346 |
+
column_str_with_values = "{column} ({values})"
|
347 |
+
column_str_without_values = "{column}"
|
348 |
+
value_sep = ", "
|
349 |
+
elif schema_serialization_type == "peteshaw":
|
350 |
+
# see https://github.com/google-research/language/blob/master/language/nqg/tasks/spider/append_schema.py#L42
|
351 |
+
db_id_str = "{db_id}"
|
352 |
+
table_sep = ""
|
353 |
+
table_str = " | {table} : {columns}"
|
354 |
+
column_sep = " , "
|
355 |
+
column_str_with_values = "{column} ( {values} )"
|
356 |
+
column_str_without_values = "{column}"
|
357 |
+
value_sep = " , "
|
358 |
+
else:
|
359 |
+
raise NotImplementedError
|
360 |
+
|
361 |
+
def get_column_str(table_name: str, column_name: str) -> str:
|
362 |
+
column_name_str = column_name.lower() if normalize_query else column_name
|
363 |
+
if schema_serialization_with_db_content:
|
364 |
+
matches = get_database_matches(
|
365 |
+
question=question,
|
366 |
+
table_name=table_name,
|
367 |
+
column_name=column_name,
|
368 |
+
db_path=(db_path + "/" + db_id + "/" + db_id + ".sqlite"),
|
369 |
+
)
|
370 |
+
if matches:
|
371 |
+
return column_str_with_values.format(
|
372 |
+
column=column_name_str, values=value_sep.join(matches)
|
373 |
+
)
|
374 |
+
else:
|
375 |
+
return column_str_without_values.format(column=column_name_str)
|
376 |
+
else:
|
377 |
+
return column_str_without_values.format(column=column_name_str)
|
378 |
+
|
379 |
+
tables = [
|
380 |
+
table_str.format(
|
381 |
+
table=table_name.lower() if normalize_query else table_name,
|
382 |
+
columns=column_sep.join(
|
383 |
+
map(
|
384 |
+
lambda y: get_column_str(table_name=table_name, column_name=y[1]),
|
385 |
+
filter(
|
386 |
+
lambda y: y[0] == table_id,
|
387 |
+
zip(
|
388 |
+
db_column_names["table_id"],
|
389 |
+
db_column_names["column_name"],
|
390 |
+
),
|
391 |
+
),
|
392 |
+
)
|
393 |
+
),
|
394 |
+
)
|
395 |
+
for table_id, table_name in enumerate(db_table_names)
|
396 |
+
]
|
397 |
+
if schema_serialization_randomized:
|
398 |
+
random.shuffle(tables)
|
399 |
+
if schema_serialization_with_db_id:
|
400 |
+
serialized_schema = db_id_str.format(db_id=db_id) + table_sep.join(tables)
|
401 |
+
else:
|
402 |
+
serialized_schema = table_sep.join(tables)
|
403 |
+
return serialized_schema
|
404 |
+
|
405 |
+
|
406 |
+
def extract_schema_knowledge_wrapper(
|
407 |
+
schema_serialization_type: str = "peteshaw",
|
408 |
+
schema_serialization_randomized: bool = False,
|
409 |
+
schema_serialization_with_db_id: bool = True,
|
410 |
+
schema_serialization_with_db_content: bool = False,
|
411 |
+
normalize_query: bool = True,
|
412 |
+
):
|
413 |
+
def extract_turn_schema_knowledge_func(knowledge, turn):
|
414 |
+
schema = knowledge["schema"]
|
415 |
+
db_column_names = {
|
416 |
+
"table_id": [table_id for table_id, _ in schema["column_names_original"]],
|
417 |
+
"column_name": [
|
418 |
+
column_name for _, column_name in schema["column_names_original"]
|
419 |
+
],
|
420 |
+
}
|
421 |
+
return serialize_schema(
|
422 |
+
turn[UTTERANCE],
|
423 |
+
knowledge["db_path"],
|
424 |
+
knowledge["db_id"],
|
425 |
+
db_column_names,
|
426 |
+
schema["table_names_original"],
|
427 |
+
schema_serialization_type,
|
428 |
+
schema_serialization_randomized,
|
429 |
+
schema_serialization_with_db_id,
|
430 |
+
schema_serialization_with_db_content,
|
431 |
+
normalize_query,
|
432 |
+
)
|
433 |
+
|
434 |
+
return extract_turn_schema_knowledge_func
|
435 |
+
|
436 |
+
|
437 |
+
def extract_dict_knowledge(knowledge, key_prompt_op, pair_sep):
|
438 |
+
pair_list = []
|
439 |
+
for key in knowledge:
|
440 |
+
pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}")
|
441 |
+
|
442 |
+
if not pair_list:
|
443 |
+
return "None"
|
444 |
+
|
445 |
+
return pair_sep.join(pair_list)
|
446 |
+
|
447 |
+
|
448 |
+
def extract_dict_knowledge_wrapper(key_prompt_op, pair_sep):
|
449 |
+
def extract_dict_knowledge_func(knowledge):
|
450 |
+
return extract_dict_knowledge(knowledge, key_prompt_op, pair_sep)
|
451 |
+
|
452 |
+
return extract_dict_knowledge_func
|
453 |
+
|
454 |
+
|
455 |
+
def extract_dialogue_knowledge(knowledge, key_prompt_op, pair_sep, role_sep):
|
456 |
+
pair_list = []
|
457 |
+
for key in knowledge:
|
458 |
+
if isinstance(knowledge[key], str):
|
459 |
+
pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}")
|
460 |
+
elif isinstance(knowledge[key], list):
|
461 |
+
turns = []
|
462 |
+
for turn in knowledge[key]:
|
463 |
+
role_str = role_sep.join(turn[ROLES])
|
464 |
+
turns.append(f"{role_str}# {turn[UTTERANCE]}")
|
465 |
+
dial_str = " ".join(turns)
|
466 |
+
pair_list.append(f"{key}{key_prompt_op}{dial_str}")
|
467 |
+
if not pair_list:
|
468 |
+
return "None"
|
469 |
+
return pair_sep.join(pair_list)
|
470 |
+
|
471 |
+
|
472 |
+
def extract_dialogue_knowledge_wrapper(key_prompt_op, pair_sep, role_sep):
|
473 |
+
def extract_dialogue_knowledge_func(knowledge):
|
474 |
+
return extract_dialogue_knowledge(knowledge, key_prompt_op, pair_sep, role_sep)
|
475 |
+
|
476 |
+
return extract_dialogue_knowledge_func
|
477 |
+
|
478 |
+
|
479 |
+
def extract_kg_knowledge(
|
480 |
+
knowledge, key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep
|
481 |
+
):
|
482 |
+
pair_list = []
|
483 |
+
for key in knowledge:
|
484 |
+
if isinstance(knowledge[key], str):
|
485 |
+
pair_list.append(f"{key}{key_prompt_op}{knowledge[key]}")
|
486 |
+
elif isinstance(knowledge[key], list):
|
487 |
+
edges = []
|
488 |
+
for edge in knowledge[key]:
|
489 |
+
edges.append(inner_edge_sep.join(edge))
|
490 |
+
kg_str = intra_edge_sep.join(edges)
|
491 |
+
pair_list.append(f"{key}{key_prompt_op}{kg_str}")
|
492 |
+
if not pair_list:
|
493 |
+
return "None"
|
494 |
+
return pair_sep.join(pair_list)
|
495 |
+
|
496 |
+
|
497 |
+
def extract_kg_knowledge_wrapper(
|
498 |
+
key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep
|
499 |
+
):
|
500 |
+
def extract_kg_knowledge_func(knowledge):
|
501 |
+
return extract_kg_knowledge(
|
502 |
+
knowledge, key_prompt_op, pair_sep, intra_edge_sep, inner_edge_sep
|
503 |
+
)
|
504 |
+
|
505 |
+
return extract_kg_knowledge_func
|
src/modules/preprocess/preprocessor/label_funs.py
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from const import (
|
2 |
+
SUMMARY,
|
3 |
+
EMOTIONS,
|
4 |
+
EMOTION,
|
5 |
+
UTTERANCE,
|
6 |
+
ASPECTS,
|
7 |
+
TARGET,
|
8 |
+
VALUE,
|
9 |
+
OPINION,
|
10 |
+
SENTIMENT,
|
11 |
+
CATEGORY,
|
12 |
+
CHARACTERS,
|
13 |
+
DIALOG,
|
14 |
+
START,
|
15 |
+
END,
|
16 |
+
BELIEF_STATE,
|
17 |
+
DOMAIN,
|
18 |
+
INFORMED_SLOT_VALUE_TABLE,
|
19 |
+
SLOT,
|
20 |
+
VALUES,
|
21 |
+
RELATION,
|
22 |
+
SQL,
|
23 |
+
SLOT_VALUE_TABLE,
|
24 |
+
SLOTS_TO_FILL,
|
25 |
+
ROLE_RELATIONS,
|
26 |
+
REWRITTEN,
|
27 |
+
ROLES_TO_SELECT,
|
28 |
+
ACTIVE_INTENTS,
|
29 |
+
TRAIN_SPLIT,
|
30 |
+
OPTION_LABEL,
|
31 |
+
CANDIDATES,
|
32 |
+
)
|
33 |
+
from typing import Dict
|
34 |
+
import re
|
35 |
+
import random
|
36 |
+
import copy
|
37 |
+
import json
|
38 |
+
|
39 |
+
|
40 |
+
def extract_summary(dial: Dict, **kwargs):
|
41 |
+
"""
|
42 |
+
`dial` is the full dialog.
|
43 |
+
"""
|
44 |
+
return dial[SUMMARY]
|
45 |
+
|
46 |
+
|
47 |
+
def extract_turn_emotion(turn: Dict, sep: str, **kwargs):
|
48 |
+
if EMOTIONS not in turn:
|
49 |
+
return None
|
50 |
+
return sep.join(map(lambda x: x[EMOTION], turn[EMOTIONS]))
|
51 |
+
|
52 |
+
|
53 |
+
def extract_turn_emotion_wrapper(sep: str):
|
54 |
+
def extract_turn_emotion_func(turn: Dict, **kwargs):
|
55 |
+
return extract_turn_emotion(turn, sep)
|
56 |
+
|
57 |
+
return extract_turn_emotion_func
|
58 |
+
|
59 |
+
|
60 |
+
def extract_turn_utterance(turn: Dict, **kwargs):
|
61 |
+
return turn[UTTERANCE]
|
62 |
+
|
63 |
+
|
64 |
+
def extract_aspects(turn: Dict, ext_aspect_sep: str, int_aspect_sep: str):
|
65 |
+
if not turn[ASPECTS]:
|
66 |
+
return "None"
|
67 |
+
|
68 |
+
aspects = turn[ASPECTS]
|
69 |
+
|
70 |
+
tgt_seq = []
|
71 |
+
for aspect in aspects:
|
72 |
+
aspect_seq = []
|
73 |
+
if TARGET in aspect:
|
74 |
+
aspect_seq.append(aspect[TARGET][VALUE])
|
75 |
+
if CATEGORY in aspect:
|
76 |
+
aspect_seq.append(aspect[CATEGORY])
|
77 |
+
|
78 |
+
if OPINION in aspect:
|
79 |
+
aspect_seq.append(aspect[OPINION][VALUE])
|
80 |
+
|
81 |
+
if SENTIMENT in aspect:
|
82 |
+
aspect_seq.append(aspect[SENTIMENT])
|
83 |
+
|
84 |
+
tgt_seq.append(int_aspect_sep.join(aspect_seq))
|
85 |
+
|
86 |
+
return ext_aspect_sep.join(tgt_seq)
|
87 |
+
|
88 |
+
|
89 |
+
def extract_aspects_wrapper(ext_aspect_sep: str, int_aspect_sep: str):
|
90 |
+
def extract_aspects_func(turn: Dict, **kwargs):
|
91 |
+
return extract_aspects(turn, ext_aspect_sep, int_aspect_sep)
|
92 |
+
|
93 |
+
return extract_aspects_func
|
94 |
+
|
95 |
+
|
96 |
+
def rebuild_utterance_with_characters(turn: Dict, split):
|
97 |
+
if split == "train":
|
98 |
+
utterance = turn[UTTERANCE]
|
99 |
+
parts = []
|
100 |
+
pre = 0
|
101 |
+
|
102 |
+
for character in turn[CHARACTERS]:
|
103 |
+
parts.append(utterance[pre : character[START]])
|
104 |
+
parts.append(
|
105 |
+
f"[{utterance[character[START]: character[END]]} | {character[VALUE]}]"
|
106 |
+
)
|
107 |
+
pre = character[END]
|
108 |
+
|
109 |
+
parts.append(utterance[pre:])
|
110 |
+
return "".join(parts)
|
111 |
+
|
112 |
+
else:
|
113 |
+
tuples = []
|
114 |
+
for character in turn[CHARACTERS]:
|
115 |
+
tuples.append(f"{character[VALUE]}, {character[START]}, {character[END]}")
|
116 |
+
|
117 |
+
if not tuples:
|
118 |
+
return "None"
|
119 |
+
return " | ".join(tuples)
|
120 |
+
|
121 |
+
|
122 |
+
def extract_characters(example):
|
123 |
+
for turn_id, turn in enumerate(example[DIALOG]):
|
124 |
+
if CHARACTERS not in turn:
|
125 |
+
continue
|
126 |
+
|
127 |
+
for character in turn[CHARACTERS]:
|
128 |
+
yield turn_id, character[VALUE], (character[END],)
|
129 |
+
|
130 |
+
|
131 |
+
def extract_belief_state(
|
132 |
+
turn,
|
133 |
+
value_sep,
|
134 |
+
domain_sep,
|
135 |
+
slot_sep,
|
136 |
+
domain_prompt_op,
|
137 |
+
ontology=None,
|
138 |
+
do_train=True,
|
139 |
+
):
|
140 |
+
domain_bs = dict()
|
141 |
+
bs = turn[BELIEF_STATE]
|
142 |
+
|
143 |
+
# spare_bs = {domain: {slot for slot in ontology[domain]} for domain in ontology}
|
144 |
+
|
145 |
+
for state in bs:
|
146 |
+
domain = state[DOMAIN]
|
147 |
+
if domain not in domain_bs:
|
148 |
+
domain_bs[domain] = dict()
|
149 |
+
|
150 |
+
if INFORMED_SLOT_VALUE_TABLE not in state:
|
151 |
+
continue
|
152 |
+
|
153 |
+
for svp in state[INFORMED_SLOT_VALUE_TABLE]:
|
154 |
+
slot = svp[SLOT]
|
155 |
+
values = svp[VALUES]
|
156 |
+
relation = svp[RELATION]
|
157 |
+
|
158 |
+
if slot not in domain_bs[domain]:
|
159 |
+
domain_bs[domain][slot] = {"relation": relation, "values": []}
|
160 |
+
domain_bs[domain][slot]["values"] += list(map(lambda x: x[VALUE], values))
|
161 |
+
|
162 |
+
# spare_bs[domain].remove(slot)
|
163 |
+
|
164 |
+
domain_bs_list = []
|
165 |
+
for domain in domain_bs:
|
166 |
+
svp_list = []
|
167 |
+
for slot in domain_bs[domain]:
|
168 |
+
val_str = value_sep.join(domain_bs[domain][slot]["values"])
|
169 |
+
svp_list.append(f"{slot} {domain_bs[domain][slot]['relation']} {val_str}")
|
170 |
+
|
171 |
+
# control whether to add spare slots
|
172 |
+
# for slot in sorted(spare_bs[domain]):
|
173 |
+
# svp_list.append(f"{slot} = None")
|
174 |
+
if not svp_list:
|
175 |
+
continue
|
176 |
+
if do_train:
|
177 |
+
# shuffle for training
|
178 |
+
random.shuffle(svp_list)
|
179 |
+
|
180 |
+
# append a slot separator at the end to alleviate the problem of end point prediction of T5
|
181 |
+
svt_str = slot_sep.join(svp_list) + slot_sep
|
182 |
+
|
183 |
+
domain_bs_list.append(f"{domain}{domain_prompt_op}{svt_str.strip()}")
|
184 |
+
|
185 |
+
if not domain_bs_list:
|
186 |
+
return "None"
|
187 |
+
|
188 |
+
return domain_sep.join(domain_bs_list)
|
189 |
+
|
190 |
+
|
191 |
+
def extract_belief_state_wrapper(value_sep, domain_sep, slot_sep, domain_prompt_op):
|
192 |
+
def extract_belief_state_func(turn, ontology, do_train=True, **kwargs):
|
193 |
+
return extract_belief_state(
|
194 |
+
turn,
|
195 |
+
value_sep,
|
196 |
+
domain_sep,
|
197 |
+
slot_sep,
|
198 |
+
domain_prompt_op,
|
199 |
+
ontology,
|
200 |
+
do_train=do_train,
|
201 |
+
)
|
202 |
+
|
203 |
+
return extract_belief_state_func
|
204 |
+
|
205 |
+
|
206 |
+
def normalize(query: str) -> str:
|
207 |
+
def comma_fix(s):
|
208 |
+
# Remove spaces in front of commas
|
209 |
+
return s.replace(" , ", ", ")
|
210 |
+
|
211 |
+
def white_space_fix(s):
|
212 |
+
# Remove double and triple spaces
|
213 |
+
return " ".join(s.split())
|
214 |
+
|
215 |
+
def lower(s):
|
216 |
+
# Convert everything except text between (single or double) quotation marks to lower case
|
217 |
+
return re.sub(
|
218 |
+
r"\b(?<!['\"])(\w+)(?!['\"])\b", lambda match: match.group(1).lower(), s
|
219 |
+
)
|
220 |
+
|
221 |
+
def space_fix(sql: str):
|
222 |
+
def agg_fix(sql: str):
|
223 |
+
return re.sub(
|
224 |
+
r"(count|max|min|sum|avg)\s\(",
|
225 |
+
lambda match: match.group(0).replace(" ", ""),
|
226 |
+
sql,
|
227 |
+
)
|
228 |
+
|
229 |
+
def brackets_fix(sql: str):
|
230 |
+
sql = re.sub(r"\(\s", lambda match: match.group(0)[:-1], sql)
|
231 |
+
sql = re.sub(r"\s\)", lambda match: match.group(0)[1:], sql)
|
232 |
+
|
233 |
+
return sql
|
234 |
+
|
235 |
+
def double_chars_op_fix(sql: str):
|
236 |
+
return re.sub(
|
237 |
+
r"((>|<|!)\s=)",
|
238 |
+
lambda match: match.group(0).replace(" ", ""),
|
239 |
+
sql,
|
240 |
+
)
|
241 |
+
|
242 |
+
return double_chars_op_fix(brackets_fix(agg_fix(sql)))
|
243 |
+
|
244 |
+
return space_fix(comma_fix(white_space_fix(lower(query))))
|
245 |
+
|
246 |
+
|
247 |
+
def extract_sql(turn, split):
|
248 |
+
if SQL not in turn:
|
249 |
+
return None
|
250 |
+
_normalize = normalize if split == "train" else (lambda x: x)
|
251 |
+
return _normalize(turn[SQL])
|
252 |
+
|
253 |
+
|
254 |
+
def extract_slots_without_intents(turn, value_sep, slot_sep):
|
255 |
+
if SLOTS_TO_FILL not in turn or not turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]:
|
256 |
+
return "None"
|
257 |
+
slots = []
|
258 |
+
for svp in turn[SLOTS_TO_FILL][SLOT_VALUE_TABLE]:
|
259 |
+
slots.append(
|
260 |
+
svp[SLOT]
|
261 |
+
+ " "
|
262 |
+
+ svp[RELATION]
|
263 |
+
+ " "
|
264 |
+
+ value_sep.join(map(lambda x: x[VALUE], svp[VALUES]))
|
265 |
+
)
|
266 |
+
|
267 |
+
return (slot_sep.join(slots) + slot_sep).strip()
|
268 |
+
|
269 |
+
|
270 |
+
def extract_slots_without_intents_wrapper(value_sep, slot_sep):
|
271 |
+
def extract_slots_without_intents_func(turn, **kwargs):
|
272 |
+
return extract_slots_without_intents(turn, value_sep, slot_sep)
|
273 |
+
|
274 |
+
return extract_slots_without_intents_func
|
275 |
+
|
276 |
+
|
277 |
+
def extract_role_relation_without_turn(dialog, relation_sep):
|
278 |
+
return relation_sep.join(map(lambda x: x[RELATION], dialog[ROLE_RELATIONS]))
|
279 |
+
|
280 |
+
|
281 |
+
def extract_role_relation_without_turn_wrapper(relation_sep):
|
282 |
+
def extract_role_relation_without_turn_func(dialog, **kwargs):
|
283 |
+
return extract_role_relation_without_turn(dialog, relation_sep)
|
284 |
+
|
285 |
+
return extract_role_relation_without_turn_func
|
286 |
+
|
287 |
+
|
288 |
+
def extrac_rewritten(turn, **kwargs):
|
289 |
+
if REWRITTEN not in turn:
|
290 |
+
return None
|
291 |
+
return turn[REWRITTEN]
|
292 |
+
|
293 |
+
|
294 |
+
def extract_options(turn, knowledge, split=None):
|
295 |
+
if ROLES_TO_SELECT not in turn:
|
296 |
+
return None
|
297 |
+
if split == TRAIN_SPLIT:
|
298 |
+
return knowledge[turn[ROLES_TO_SELECT][0]]
|
299 |
+
else:
|
300 |
+
return json.dumps(
|
301 |
+
{OPTION_LABEL: turn[ROLES_TO_SELECT][0], CANDIDATES: knowledge}
|
302 |
+
)
|
303 |
+
|
304 |
+
|
305 |
+
# def extract_roles_wrapper(role_sep):
|
306 |
+
# def extract_roles_func(turn, knowledge, split=None):
|
307 |
+
# return extract_options(turn, know)
|
308 |
+
|
309 |
+
# return extract_roles_func
|
310 |
+
|
311 |
+
|
312 |
+
def extract_intents(turn, intent_sep):
|
313 |
+
if not turn[ACTIVE_INTENTS]:
|
314 |
+
return "None"
|
315 |
+
return intent_sep.join(
|
316 |
+
map(lambda intent: intent.replace("_", " "), turn[ACTIVE_INTENTS])
|
317 |
+
)
|
318 |
+
|
319 |
+
|
320 |
+
def extract_intents_wrapper(intent_sep):
|
321 |
+
def extract_intents_func(turn, **kwargs):
|
322 |
+
return extract_intents(turn, intent_sep)
|
323 |
+
|
324 |
+
return extract_intents_func
|
src/modules/preprocess/preprocessor/process_turn_funcs.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from const import UTTERANCE, CHARACTERS, START, END, MENTION
|
2 |
+
|
3 |
+
|
4 |
+
def introduce_mention_to_utterance(turn, insert_index, left_bracket, right_bracket):
|
5 |
+
turn[UTTERANCE] = (
|
6 |
+
turn[UTTERANCE][:insert_index]
|
7 |
+
+ left_bracket
|
8 |
+
+ MENTION
|
9 |
+
+ right_bracket
|
10 |
+
+ turn[UTTERANCE][insert_index:]
|
11 |
+
)
|
12 |
+
|
13 |
+
return turn
|
14 |
+
|
15 |
+
|
16 |
+
def introduce_mention_to_utterance_wrapper(left_bracket, right_bracket):
|
17 |
+
def introduce_mention_to_utterance_func(turn, insert_index, **kwargs):
|
18 |
+
return introduce_mention_to_utterance(
|
19 |
+
turn, insert_index, left_bracket, right_bracket
|
20 |
+
)
|
21 |
+
|
22 |
+
return introduce_mention_to_utterance_func
|
src/modules/preprocess/preprocessor/prompt_funcs.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def const_prompt_func_wrapper(const_prompt):
|
2 |
+
def const_prompt_func():
|
3 |
+
return const_prompt
|
4 |
+
|
5 |
+
return const_prompt_func
|
src/preprocess.sh
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd preprocess
|
4 |
+
|
5 |
+
INPUT=$1
|
6 |
+
OUTPUT=$2
|
7 |
+
|
8 |
+
DATA="MAMS-ACSA"
|
9 |
+
echo "--> ${DATA}"
|
10 |
+
# python ${DATA}.py --input_dir "${INPUT}/TaskMaster/TM-1-2019" --output_dir "${OUTPUT}/${DATA}"
|
11 |
+
python ${DATA}.py --input_dir "${INPUT}/MAMS/data/${DATA}/raw" --output_dir "${OUTPUT}/MAMS/${DATA}"
|
src/preprocess/ASTE.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import write_jsonl_file, parse, read_line_labels
|
2 |
+
import os
|
3 |
+
|
4 |
+
sent_map = {
|
5 |
+
"POS": "positive",
|
6 |
+
"NEU": "neutral",
|
7 |
+
"NEG": "negative",
|
8 |
+
}
|
9 |
+
|
10 |
+
|
11 |
+
def get_char_index(lengths, tok_ids):
|
12 |
+
start = lengths[tok_ids[0]] + tok_ids[0]
|
13 |
+
end = (
|
14 |
+
lengths[tok_ids[-1] + 1]
|
15 |
+
- lengths[tok_ids[0]]
|
16 |
+
+ start
|
17 |
+
+ tok_ids[-1]
|
18 |
+
- tok_ids[0]
|
19 |
+
)
|
20 |
+
return start, end
|
21 |
+
|
22 |
+
|
23 |
+
def parse_aspects(utterance, aspects):
|
24 |
+
toks = utterance.split()
|
25 |
+
lengths = list(map(lambda x: len(x), toks))
|
26 |
+
lengths = [0] + lengths
|
27 |
+
|
28 |
+
for i in range(1, len(lengths)):
|
29 |
+
lengths[i] += lengths[i - 1]
|
30 |
+
|
31 |
+
parsed_aspects = []
|
32 |
+
for target, opinion, sentiment in aspects:
|
33 |
+
target_start, target_end = get_char_index(lengths, target)
|
34 |
+
opinion_start, opinion_end = get_char_index(lengths, opinion)
|
35 |
+
target_value = " ".join(toks[target[0] : target[-1] + 1])
|
36 |
+
opinion_value = " ".join(toks[opinion[0] : opinion[-1] + 1])
|
37 |
+
|
38 |
+
assert target_value == utterance[target_start:target_end]
|
39 |
+
assert opinion_value == utterance[opinion_start:opinion_end]
|
40 |
+
|
41 |
+
parsed_aspects.append(
|
42 |
+
{
|
43 |
+
"target": {
|
44 |
+
"value": target_value,
|
45 |
+
"start": target_start,
|
46 |
+
"end": target_end,
|
47 |
+
},
|
48 |
+
"opinion": {
|
49 |
+
"value": opinion_value,
|
50 |
+
"start": opinion_start,
|
51 |
+
"end": opinion_end,
|
52 |
+
},
|
53 |
+
"sentiment": sent_map[sentiment],
|
54 |
+
}
|
55 |
+
)
|
56 |
+
|
57 |
+
return parsed_aspects
|
58 |
+
|
59 |
+
|
60 |
+
def reformat(args, file):
|
61 |
+
for domain in os.listdir(args.input_dir):
|
62 |
+
path = os.path.join(os.path.join(args.input_dir, domain), f"{file}.txt")
|
63 |
+
data = read_line_labels(path)
|
64 |
+
|
65 |
+
dials = []
|
66 |
+
for line in data:
|
67 |
+
utterance, aspects = line.strip().split("####")
|
68 |
+
aspects = eval(aspects)
|
69 |
+
|
70 |
+
dial = {
|
71 |
+
"turn": "single",
|
72 |
+
"locale": "en",
|
73 |
+
"dialog": [
|
74 |
+
{
|
75 |
+
"roles": ["USER"],
|
76 |
+
"utterance": utterance,
|
77 |
+
"aspects": parse_aspects(utterance, aspects),
|
78 |
+
}
|
79 |
+
],
|
80 |
+
}
|
81 |
+
|
82 |
+
dials.append(dial)
|
83 |
+
|
84 |
+
write_jsonl_file(
|
85 |
+
dials, os.path.join(os.path.join(args.output_dir, domain), f"{file}.jsonl")
|
86 |
+
)
|
87 |
+
|
88 |
+
|
89 |
+
def preprocess(args):
|
90 |
+
reformat(args, "train")
|
91 |
+
reformat(args, "dev")
|
92 |
+
reformat(args, "test")
|
93 |
+
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
args = parse()
|
97 |
+
preprocess(args)
|
src/preprocess/AlphaNLI.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import read_jsonl_file, write_jsonl_file, parse, read_line_labels
|
2 |
+
import os
|
3 |
+
import copy
|
4 |
+
|
5 |
+
label2nl = {"1": "First", "2": "Second"}
|
6 |
+
|
7 |
+
|
8 |
+
def preprocess_for_train_and_dev(args, file):
|
9 |
+
data_path = os.path.join(args.input_dir, f"{file}.jsonl")
|
10 |
+
data = read_jsonl_file(data_path)
|
11 |
+
|
12 |
+
label_path = os.path.join(args.input_dir, f"{file}-labels.lst")
|
13 |
+
labels = read_line_labels(label_path)
|
14 |
+
|
15 |
+
turns = []
|
16 |
+
for idx, example in enumerate(data):
|
17 |
+
turn = {
|
18 |
+
"turn": "multi",
|
19 |
+
"locale": "en",
|
20 |
+
"dialog": [
|
21 |
+
{"roles": ["First observation"], "utterance": example["obs1"]},
|
22 |
+
{
|
23 |
+
"roles": ["Second observation"],
|
24 |
+
"utterance": example["obs2"],
|
25 |
+
"roles_to_select": [f"hypothesis candidate {labels[idx]}"],
|
26 |
+
},
|
27 |
+
],
|
28 |
+
}
|
29 |
+
|
30 |
+
# turn["dialog"].append(
|
31 |
+
# {
|
32 |
+
# "roles": ["First hypothesis"],
|
33 |
+
# "utterance": example["hyp1"],
|
34 |
+
# }
|
35 |
+
# )
|
36 |
+
|
37 |
+
# turn["dialog"].append(
|
38 |
+
# {
|
39 |
+
# "roles": ["Second hypothesis"],
|
40 |
+
# "utterance": example["hyp2"],
|
41 |
+
# "roles_to_select": [label2nl[labels[idx]] + " hypothesis"],
|
42 |
+
# }
|
43 |
+
# )
|
44 |
+
|
45 |
+
turn["knowledge"] = {
|
46 |
+
"type": "text",
|
47 |
+
"value": {
|
48 |
+
"hypothesis candidate 1": example["hyp1"],
|
49 |
+
"hypothesis candidate 2": example["hyp2"],
|
50 |
+
},
|
51 |
+
}
|
52 |
+
|
53 |
+
# turn["roles_to_select"] = ["HYPOTHESIS " + labels[idx]]
|
54 |
+
turns.append(turn)
|
55 |
+
|
56 |
+
# if labels[idx] == "1":
|
57 |
+
# pos_hyp = example["hyp1"]
|
58 |
+
# neg_hyp = example["hyp2"]
|
59 |
+
# else:
|
60 |
+
# pos_hyp = example["hyp2"]
|
61 |
+
# neg_hyp = example["hyp1"]
|
62 |
+
|
63 |
+
# # possitive hypothesis
|
64 |
+
# pos_turn = copy.deepcopy(turn)
|
65 |
+
# pos_turn["dialog"].append({"roles": ["HYPOTHESIS"], "utterance": pos_hyp, "class_label": True})
|
66 |
+
|
67 |
+
# # negative hypothesis
|
68 |
+
# neg_turn = copy.deepcopy(turn)
|
69 |
+
# neg_turn["dialog"].append({"roles": ["HYPOTHESIS"], "utterance": neg_hyp, "class_label": False})
|
70 |
+
|
71 |
+
# turns.append(pos_turn)
|
72 |
+
# turns.append(neg_turn)
|
73 |
+
|
74 |
+
write_jsonl_file(turns, os.path.join(args.output_dir, f"{file}.jsonl"))
|
75 |
+
|
76 |
+
|
77 |
+
def preprocess(args):
|
78 |
+
preprocess_for_train_and_dev(args, "train")
|
79 |
+
preprocess_for_train_and_dev(args, "dev")
|
80 |
+
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
args = parse()
|
84 |
+
preprocess(args)
|
src/preprocess/Banking77.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import write_jsonl_file, read_csv_file, parse
|
2 |
+
|
3 |
+
|
4 |
+
def reformat(args, file):
|
5 |
+
path = args.input_dir + "/" + file + ".csv"
|
6 |
+
data = read_csv_file(path)
|
7 |
+
turns = []
|
8 |
+
for i in range(len(data)):
|
9 |
+
t = {
|
10 |
+
"turn": "single",
|
11 |
+
"locale": "en",
|
12 |
+
"dialog": [
|
13 |
+
{
|
14 |
+
"roles": ["USER"],
|
15 |
+
"utterance": data["text"][i],
|
16 |
+
"active_intents": [data["category"][i]],
|
17 |
+
}
|
18 |
+
],
|
19 |
+
}
|
20 |
+
turns.append(t)
|
21 |
+
|
22 |
+
write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl")
|
23 |
+
|
24 |
+
|
25 |
+
def preprocess(args):
|
26 |
+
reformat(args, "train")
|
27 |
+
reformat(args, "test")
|
28 |
+
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
args = parse()
|
32 |
+
preprocess(args)
|