LightChen2333
commited on
Commit
•
8e002f1
1
Parent(s):
fa9e36d
Upload 3 files
Browse files- config.json +23 -14
config.json
CHANGED
@@ -186,7 +186,7 @@
|
|
186 |
"intent_classifier": {
|
187 |
"_model_target_": "model.decoder.classifier.MLPClassifier",
|
188 |
"dropout_rate": 0.4,
|
189 |
-
"ignore_index":
|
190 |
"input_dim": 384,
|
191 |
"intent_label_num": 7,
|
192 |
"loss_fn": {
|
@@ -195,7 +195,7 @@
|
|
195 |
"mlp": [
|
196 |
{
|
197 |
"_model_target_": "torch.nn.Linear",
|
198 |
-
"in_features":
|
199 |
"out_features": 256
|
200 |
},
|
201 |
{
|
@@ -205,14 +205,14 @@
|
|
205 |
{
|
206 |
"_model_target_": "torch.nn.Linear",
|
207 |
"in_features": 256,
|
208 |
-
"out_features":
|
209 |
}
|
210 |
],
|
211 |
"mode": "token-level-intent",
|
212 |
"multi_threshold": 0.5,
|
213 |
"return_sentence_level": true,
|
214 |
"use_intent": true,
|
215 |
-
"use_multi":
|
216 |
"weight": 0.2
|
217 |
},
|
218 |
"interaction": {
|
@@ -220,25 +220,26 @@
|
|
220 |
"alpha": 0.2,
|
221 |
"dropout_rate": 0.4,
|
222 |
"hidden_dim": 256,
|
223 |
-
"input_dim":
|
224 |
"intent_embedding_dim": 256,
|
225 |
-
"intent_label_num":
|
226 |
"num_heads": 4,
|
227 |
"num_layers": 2,
|
228 |
-
"output_dim":
|
229 |
"row_normalized": true,
|
230 |
-
"slot_graph_window": 1
|
|
|
231 |
},
|
232 |
"slot_classifier": {
|
233 |
"_model_target_": "model.decoder.classifier.MLPClassifier",
|
234 |
"dropout_rate": 0.4,
|
235 |
-
"ignore_index":
|
236 |
"input_dim": 384,
|
237 |
"mlp": [
|
238 |
{
|
239 |
"_model_target_": "torch.nn.Linear",
|
240 |
-
"in_features":
|
241 |
-
"out_features":
|
242 |
},
|
243 |
{
|
244 |
"_model_target_": "torch.nn.LeakyReLU",
|
@@ -246,8 +247,8 @@
|
|
246 |
},
|
247 |
{
|
248 |
"_model_target_": "torch.nn.Linear",
|
249 |
-
"in_features":
|
250 |
-
"out_features":
|
251 |
}
|
252 |
],
|
253 |
"mode": "slot",
|
@@ -268,7 +269,8 @@
|
|
268 |
},
|
269 |
"embedding": {
|
270 |
"dropout_rate": 0.4,
|
271 |
-
"embedding_dim": 128
|
|
|
272 |
},
|
273 |
"encoder_name": "self-attention-lstm",
|
274 |
"lstm": {
|
@@ -283,6 +285,13 @@
|
|
283 |
}
|
284 |
},
|
285 |
"return_dict": false,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
"tokenizer_class": "OpenSLUv1",
|
287 |
"torch_dtype": "float32",
|
288 |
"transformers_version": "4.25.1",
|
|
|
186 |
"intent_classifier": {
|
187 |
"_model_target_": "model.decoder.classifier.MLPClassifier",
|
188 |
"dropout_rate": 0.4,
|
189 |
+
"ignore_index": -100,
|
190 |
"input_dim": 384,
|
191 |
"intent_label_num": 7,
|
192 |
"loss_fn": {
|
|
|
195 |
"mlp": [
|
196 |
{
|
197 |
"_model_target_": "torch.nn.Linear",
|
198 |
+
"in_features": 384,
|
199 |
"out_features": 256
|
200 |
},
|
201 |
{
|
|
|
205 |
{
|
206 |
"_model_target_": "torch.nn.Linear",
|
207 |
"in_features": 256,
|
208 |
+
"out_features": 7
|
209 |
}
|
210 |
],
|
211 |
"mode": "token-level-intent",
|
212 |
"multi_threshold": 0.5,
|
213 |
"return_sentence_level": true,
|
214 |
"use_intent": true,
|
215 |
+
"use_multi": true,
|
216 |
"weight": 0.2
|
217 |
},
|
218 |
"interaction": {
|
|
|
220 |
"alpha": 0.2,
|
221 |
"dropout_rate": 0.4,
|
222 |
"hidden_dim": 256,
|
223 |
+
"input_dim": 384,
|
224 |
"intent_embedding_dim": 256,
|
225 |
+
"intent_label_num": 7,
|
226 |
"num_heads": 4,
|
227 |
"num_layers": 2,
|
228 |
+
"output_dim": 256,
|
229 |
"row_normalized": true,
|
230 |
+
"slot_graph_window": 1,
|
231 |
+
"slot_label_num": 72
|
232 |
},
|
233 |
"slot_classifier": {
|
234 |
"_model_target_": "model.decoder.classifier.MLPClassifier",
|
235 |
"dropout_rate": 0.4,
|
236 |
+
"ignore_index": -100,
|
237 |
"input_dim": 384,
|
238 |
"mlp": [
|
239 |
{
|
240 |
"_model_target_": "torch.nn.Linear",
|
241 |
+
"in_features": 256,
|
242 |
+
"out_features": 256
|
243 |
},
|
244 |
{
|
245 |
"_model_target_": "torch.nn.LeakyReLU",
|
|
|
247 |
},
|
248 |
{
|
249 |
"_model_target_": "torch.nn.Linear",
|
250 |
+
"in_features": 256,
|
251 |
+
"out_features": 72
|
252 |
}
|
253 |
],
|
254 |
"mode": "slot",
|
|
|
269 |
},
|
270 |
"embedding": {
|
271 |
"dropout_rate": 0.4,
|
272 |
+
"embedding_dim": 128,
|
273 |
+
"vocab_size": 11758
|
274 |
},
|
275 |
"encoder_name": "self-attention-lstm",
|
276 |
"lstm": {
|
|
|
285 |
}
|
286 |
},
|
287 |
"return_dict": false,
|
288 |
+
"tokenizer": {
|
289 |
+
"_align_mode_": "fast",
|
290 |
+
"_padding_side_": "right",
|
291 |
+
"_tokenizer_name_": "word_tokenizer",
|
292 |
+
"add_special_tokens": false,
|
293 |
+
"max_length": 512
|
294 |
+
},
|
295 |
"tokenizer_class": "OpenSLUv1",
|
296 |
"torch_dtype": "float32",
|
297 |
"transformers_version": "4.25.1",
|