Update the model type to make it compatible with mlx-lm's model mapping.

#14
Files changed (2) hide show
  1. config.json +1 -1
  2. configuration_phi.py +1 -1
config.json CHANGED
@@ -16,7 +16,7 @@
16
  "img_processor": null,
17
  "initializer_range": 0.02,
18
  "layer_norm_epsilon": 1e-05,
19
- "model_type": "phi-msft",
20
  "n_embd": 2560,
21
  "n_head": 32,
22
  "n_head_kv": null,
 
16
  "img_processor": null,
17
  "initializer_range": 0.02,
18
  "layer_norm_epsilon": 1e-05,
19
+ "model_type": "phixtral",
20
  "n_embd": 2560,
21
  "n_head": 32,
22
  "n_head_kv": null,
configuration_phi.py CHANGED
@@ -10,7 +10,7 @@ from transformers import PretrainedConfig
10
  class PhiConfig(PretrainedConfig):
11
  """Phi configuration."""
12
 
13
- model_type = "phi-msft"
14
  attribute_map = {
15
  "max_position_embeddings": "n_positions",
16
  "hidden_size": "n_embd",
 
10
  class PhiConfig(PretrainedConfig):
11
  """Phi configuration."""
12
 
13
+ model_type = "phixtral"
14
  attribute_map = {
15
  "max_position_embeddings": "n_positions",
16
  "hidden_size": "n_embd",