Update config.json
Browse files- config.json +1 -5
config.json
CHANGED
@@ -2,10 +2,6 @@
|
|
2 |
"architectures": [
|
3 |
"HelpingAIForCausalLM"
|
4 |
],
|
5 |
-
"auto_map": {
|
6 |
-
"AutoConfig": "configuration_HelpingAI.HelpingAIConfig",
|
7 |
-
"AutoModelForCausalLM": "modeling_HelpingAI.HelpingAIForCausalLM"
|
8 |
-
},
|
9 |
"attention_dropout": 0.0,
|
10 |
"attention_softmax_in_fp32": true,
|
11 |
"bos_token_id": 1,
|
@@ -15,7 +11,7 @@
|
|
15 |
"initializer_range": 0.02,
|
16 |
"layer_norm_epsilon": 1e-05,
|
17 |
"masked_softmax_fusion": true,
|
18 |
-
"model_type": "
|
19 |
"n_head": 112,
|
20 |
"n_layer": 70,
|
21 |
"pad_token_id": 3,
|
|
|
2 |
"architectures": [
|
3 |
"HelpingAIForCausalLM"
|
4 |
],
|
|
|
|
|
|
|
|
|
5 |
"attention_dropout": 0.0,
|
6 |
"attention_softmax_in_fp32": true,
|
7 |
"bos_token_id": 1,
|
|
|
11 |
"initializer_range": 0.02,
|
12 |
"layer_norm_epsilon": 1e-05,
|
13 |
"masked_softmax_fusion": true,
|
14 |
+
"model_type": "gemma",
|
15 |
"n_head": 112,
|
16 |
"n_layer": 70,
|
17 |
"pad_token_id": 3,
|