mlabonne commited on
Commit
e32b161
1 Parent(s): 6fcd04e

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +24 -6
  2. mergekit_moe_config.yml +1 -1
  3. modeling_phi.py +1 -1
config.json CHANGED
@@ -1,32 +1,50 @@
1
  {
2
- "_name_or_path": "microsoft/phi-2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
- "PhiForCausalLM"
6
  ],
 
7
  "attn_pdrop": 0.0,
8
  "auto_map": {
9
- "AutoConfig": "configuration_phi.PhiConfig",
10
- "AutoModelForCausalLM": "modeling_phi.PhiForCausalLM"
11
  },
 
12
  "embd_pdrop": 0.0,
 
13
  "flash_attn": false,
14
  "flash_rotary": false,
15
  "fused_dense": false,
 
 
16
  "img_processor": null,
17
  "initializer_range": 0.02,
 
18
  "layer_norm_epsilon": 1e-05,
19
- "model_type": "phi-msft",
 
20
  "n_embd": 2560,
21
  "n_head": 32,
22
  "n_head_kv": null,
23
  "n_inner": null,
24
  "n_layer": 32,
25
  "n_positions": 2048,
 
 
 
 
 
 
26
  "resid_pdrop": 0.1,
 
 
27
  "rotary_dim": 32,
 
 
28
  "tie_word_embeddings": false,
29
  "torch_dtype": "float16",
30
- "transformers_version": "4.35.2",
 
31
  "vocab_size": 51200
32
  }
 
1
  {
2
+ "_name_or_path": "cognitivecomputations/dolphin-2_6-phi-2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
+ "MixtralForCausalLM"
6
  ],
7
+ "attention_dropout": 0.0,
8
  "attn_pdrop": 0.0,
9
  "auto_map": {
10
+ "AutoConfig": "cognitivecomputations/dolphin-2_6-phi-2--configuration_phi.PhiConfig",
11
+ "AutoModelForCausalLM": "cognitivecomputations/dolphin-2_6-phi-2--modeling_phi.PhiForCausalLM"
12
  },
13
+ "bos_token_id": null,
14
  "embd_pdrop": 0.0,
15
+ "eos_token_id": null,
16
  "flash_attn": false,
17
  "flash_rotary": false,
18
  "fused_dense": false,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 4096,
21
  "img_processor": null,
22
  "initializer_range": 0.02,
23
+ "intermediate_size": 14336,
24
  "layer_norm_epsilon": 1e-05,
25
+ "max_position_embeddings": 2048,
26
+ "model_type": "mixtral",
27
  "n_embd": 2560,
28
  "n_head": 32,
29
  "n_head_kv": null,
30
  "n_inner": null,
31
  "n_layer": 32,
32
  "n_positions": 2048,
33
+ "num_attention_heads": 32,
34
+ "num_experts_per_tok": 2,
35
+ "num_hidden_layers": 32,
36
+ "num_key_value_heads": 8,
37
+ "num_local_experts": 2,
38
+ "output_router_logits": false,
39
  "resid_pdrop": 0.1,
40
+ "rms_norm_eps": 1e-06,
41
+ "rope_theta": 10000.0,
42
  "rotary_dim": 32,
43
+ "router_aux_loss_coef": 0.001,
44
+ "sliding_window": null,
45
  "tie_word_embeddings": false,
46
  "torch_dtype": "float16",
47
+ "transformers_version": "4.36.2",
48
+ "use_cache": false,
49
  "vocab_size": 51200
50
  }
mergekit_moe_config.yml CHANGED
@@ -4,4 +4,4 @@ experts:
4
  - source_model: cognitivecomputations/dolphin-2_6-phi-2
5
  positive_prompts: [""]
6
  - source_model: lxuechen/phi-2-dpo
7
- positive_prompts: [""]
 
4
  - source_model: cognitivecomputations/dolphin-2_6-phi-2
5
  positive_prompts: [""]
6
  - source_model: lxuechen/phi-2-dpo
7
+ positive_prompts: [""]
modeling_phi.py CHANGED
@@ -294,7 +294,7 @@ class MoE(nn.Module):
294
  def __init__(
295
  self,
296
  config: PretrainedConfig,
297
- num_experts=4,
298
  num_experts_per_tok=2,
299
  num_shards=1,
300
  **kwargs,
 
294
  def __init__(
295
  self,
296
  config: PretrainedConfig,
297
+ num_experts=2,
298
  num_experts_per_tok=2,
299
  num_shards=1,
300
  **kwargs,