gugarosa commited on
Commit
b3ebf08
1 Parent(s): 304b058

Upload 4 files

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. config.json +1 -1
  3. configuration_phi.py +1 -1
README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- inference: false
3
  license: other
4
  license_name: microsoft-research-license
5
  license_link: https://huggingface.co/microsoft/phi-1/resolve/main/Research%20License.docx
@@ -17,6 +16,7 @@ The language model Phi-1 is a Transformer with 1.3 billion parameters, specializ
17
  Given the nature of the training data, Phi-1 is best suited for prompts using the code format:
18
 
19
  ### Code Format:
 
20
  ```python
21
  def print_prime(n):
22
  """
 
1
  ---
 
2
  license: other
3
  license_name: microsoft-research-license
4
  license_link: https://huggingface.co/microsoft/phi-1/resolve/main/Research%20License.docx
 
16
  Given the nature of the training data, Phi-1 is best suited for prompts using the code format:
17
 
18
  ### Code Format:
19
+
20
  ```python
21
  def print_prime(n):
22
  """
config.json CHANGED
@@ -15,7 +15,7 @@
15
  "fused_dense": false,
16
  "initializer_range": 0.02,
17
  "layer_norm_epsilon": 1e-05,
18
- "model_type": "phi",
19
  "n_embd": 2048,
20
  "n_head": 32,
21
  "n_head_kv": null,
 
15
  "fused_dense": false,
16
  "initializer_range": 0.02,
17
  "layer_norm_epsilon": 1e-05,
18
+ "model_type": "phi-msft",
19
  "n_embd": 2048,
20
  "n_head": 32,
21
  "n_head_kv": null,
configuration_phi.py CHANGED
@@ -10,7 +10,7 @@ from transformers import PretrainedConfig
10
  class PhiConfig(PretrainedConfig):
11
  """Phi configuration."""
12
 
13
- model_type = "phi"
14
  attribute_map = {
15
  "max_position_embeddings": "n_positions",
16
  "hidden_size": "n_embd",
 
10
  class PhiConfig(PretrainedConfig):
11
  """Phi configuration."""
12
 
13
+ model_type = "phi-msft"
14
  attribute_map = {
15
  "max_position_embeddings": "n_positions",
16
  "hidden_size": "n_embd",