Update config.json
Browse files- config.json +25 -27
config.json
CHANGED
@@ -4,33 +4,31 @@
|
|
4 |
],
|
5 |
"model_type": "openlm",
|
6 |
"params": null,
|
7 |
-
"
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
"weight_tying": false
|
33 |
-
},
|
34 |
"torch_dtype": "float32",
|
35 |
"transformers_version": "4.41.2"
|
36 |
}
|
|
|
4 |
],
|
5 |
"model_type": "openlm",
|
6 |
"params": null,
|
7 |
+
"apply_qk_norm": true,
|
8 |
+
"attn_activation": null,
|
9 |
+
"attn_name": "auto",
|
10 |
+
"attn_seq_scalar": null,
|
11 |
+
"attn_seq_scalar_alpha": null,
|
12 |
+
"dim": 4096,
|
13 |
+
"ffn_type": "swiglu",
|
14 |
+
"model": "open_lm_7b",
|
15 |
+
"model_norm": "gain_only_lp_layer_norm",
|
16 |
+
"moe_capacity_factor": 1.25,
|
17 |
+
"moe_expert_model_parallelism": false,
|
18 |
+
"moe_freq": 0,
|
19 |
+
"moe_loss_weight": 0.1,
|
20 |
+
"moe_num_experts": null,
|
21 |
+
"moe_top_k": 2,
|
22 |
+
"moe_weight_parallelism": false,
|
23 |
+
"n_heads": 32,
|
24 |
+
"n_layers": 32,
|
25 |
+
"norm_eps": 1e-05,
|
26 |
+
"positional_embedding_type": "rotary",
|
27 |
+
"post_embed_norm": false,
|
28 |
+
"qk_norm": true,
|
29 |
+
"seq_len": 2048,
|
30 |
+
"vocab_size": 50432,
|
31 |
+
"weight_tying": false
|
|
|
|
|
32 |
"torch_dtype": "float32",
|
33 |
"transformers_version": "4.41.2"
|
34 |
}
|