Minh2508 commited on
Commit
8a2f1d8
·
verified ·
1 Parent(s): 60bd814

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +38 -38
config.json CHANGED
@@ -1,39 +1,39 @@
1
- {
2
- "model_type": "zdecode",
3
- "architectures": [
4
- "ZDECODE"
5
- ],
6
- "profile_name": "1b_3e_8l_t4x2",
7
- "vocab_size": 200024,
8
- "text_embed_dim": 1024,
9
- "vision_embed_dim": 1024,
10
- "hidden_dim": 1024,
11
- "ffn_dim": 6144,
12
- "num_layers": 8,
13
- "num_heads": 16,
14
- "num_kv_heads": 4,
15
- "num_experts": 3,
16
- "top_k": 2,
17
- "max_position_embeddings": 16384,
18
- "router_aux_loss_coef": 0.01,
19
- "share_experts_across_layers": false,
20
- "gradient_checkpointing": true,
21
- "num_agents": 4,
22
- "moe_capacity_factor": 1.0,
23
- "moe_hierarchy_groups": 1,
24
- "moe_hierarchy_top_k": 1,
25
- "num_shared_experts": 0,
26
- "load_balancing_mode": "aux_free",
27
- "router_bias_update_rate": 0.01,
28
- "kv_latent_dim": 128,
29
- "kv_cache_dtype": "int4",
30
- "rope_training_context": 16384,
31
- "rope_ntk_alpha": 1.0,
32
- "rope_yarn_scale": 1.0,
33
- "ring_attention_chunk_size": 0,
34
- "prefill_chunk_size": 256,
35
- "use_q_former_projector": true,
36
- "q_former_queries": 8,
37
- "q_former_layers": 1,
38
- "tokenizer_name": "ai-tokenizer:GPT-5"
39
  }
 
1
+ {
2
+ "model_type": "afmoe",
3
+ "architectures": [
4
+ "MOE"
5
+ ],
6
+ "profile_name": "1b_3e_8l_t4x2",
7
+ "vocab_size": 200024,
8
+ "text_embed_dim": 1024,
9
+ "vision_embed_dim": 1024,
10
+ "hidden_dim": 1024,
11
+ "ffn_dim": 6144,
12
+ "num_layers": 8,
13
+ "num_heads": 16,
14
+ "num_kv_heads": 4,
15
+ "num_experts": 3,
16
+ "top_k": 2,
17
+ "max_position_embeddings": 16384,
18
+ "router_aux_loss_coef": 0.01,
19
+ "share_experts_across_layers": false,
20
+ "gradient_checkpointing": true,
21
+ "num_agents": 4,
22
+ "moe_capacity_factor": 1.0,
23
+ "moe_hierarchy_groups": 1,
24
+ "moe_hierarchy_top_k": 1,
25
+ "num_shared_experts": 0,
26
+ "load_balancing_mode": "aux_free",
27
+ "router_bias_update_rate": 0.01,
28
+ "kv_latent_dim": 128,
29
+ "kv_cache_dtype": "int4",
30
+ "rope_training_context": 16384,
31
+ "rope_ntk_alpha": 1.0,
32
+ "rope_yarn_scale": 1.0,
33
+ "ring_attention_chunk_size": 0,
34
+ "prefill_chunk_size": 256,
35
+ "use_q_former_projector": true,
36
+ "q_former_queries": 8,
37
+ "q_former_layers": 1,
38
+ "tokenizer_name": "ai-tokenizer:GPT-5"
39
  }