dangvansam commited on
Commit
67387c4
1 Parent(s): ec6d15d

Update configuration_stablelm_epoch.py

Browse files
Files changed (1) hide show
  1. configuration_stablelm_epoch.py +35 -105
configuration_stablelm_epoch.py CHANGED
@@ -1,5 +1,4 @@
1
- # coding=utf-8
2
- # Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -12,45 +11,32 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """ StableLM model configuration """
16
-
17
- from transformers.configuration_utils import PretrainedConfig
18
  from transformers.utils import logging
19
 
20
 
21
  logger = logging.get_logger(__name__)
22
 
23
- STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
- "stabilityai/stablelm-3b-4e1t": "https://huggingface.co/stabilityai/stablelm-3b-4e1t/resolve/main/config.json",
25
- # See all StableLM models at https://huggingface.co/models?filter=stablelm
26
- }
27
-
28
 
29
- class StableLmConfig(PretrainedConfig):
30
  r"""
31
- This is the configuration class to store the configuration of a [`~StableLmModel`].
32
- It is used to instantiate an StableLM model according to the specified arguments, defining the model
33
- architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
34
- the StableLM [stabilityai/stablelm-3b-4e1t](https://huggingface.co/stabilityai/stablelm-3b-4e1t) architecture.
35
-
36
- Configuration objects inherit from [`PretrainedConfig`] and can be used
37
- to control the model outputs. Read the documentation from [`PretrainedConfig`]
38
- for more information.
39
-
40
 
41
  Args:
42
- vocab_size (`int`, *optional*, defaults to 50304):
43
  Vocabulary size of the StableLM model. Defines the number of different tokens that
44
- can be represented by the `inputs_ids` passed when calling [`StableLmModel`].
45
  intermediate_size (`int`, *optional*, defaults to 6912):
46
  Dimension of the MLP representations.
47
  hidden_size (`int`, *optional*, defaults to 2560):
48
- Number of hidden layers in the Transformer decoder.
49
  num_hidden_layers (`int`, *optional*, defaults to 32):
50
  Number of hidden layers in the Transformer decoder.
51
  num_attention_heads (`int`, *optional*, defaults to 32):
52
  Number of attention heads for each attention layer in the Transformer encoder.
53
- num_key_value_heads (`int`, *optional*, defaults to 32):
54
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
  `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
@@ -60,124 +46,68 @@ class StableLmConfig(PretrainedConfig):
60
  `num_attention_heads`.
61
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
  The non-linear activation function (function or string).
63
- max_position_embeddings (`int`, *optional*, defaults to 4096):
 
 
 
 
64
  The maximum sequence length that this model might ever be used with.
65
  Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
66
- initializer_range (`float`, *optional*, defaults to 0.02):
67
  The standard deviation of the truncated_normal_initializer for initializing
68
  all weight matrices.
69
- layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
  The epsilon used by the normalization layers.
71
  use_cache (`bool`, *optional*, defaults to `True`):
72
  Whether or not the model should return the last key/values attentions
73
  (not used by all models). Only relevant if `config.is_decoder=True`.
74
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
75
- Whether the model's input and output word embeddings should be tied.
76
- rope_theta (`float`, *optional*, defaults to `10000.0`):
77
- The base period of the RoPE embeddings.
78
- rope_scaling (`Dict`, *optional*):
79
- Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
80
- strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
81
- `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
82
- `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
83
- these scaling strategies behave:
84
- https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
85
- is an experimental feature, subject to breaking API changes in future versions.
86
- use_qkv_bias (`bool`, *optional*, defaults to `False`):
87
  Whether or not the model should use bias for qkv layers.
88
- hidden_dropout (`float`, *optional*, defaults to 0.0):
89
- The dropout ratio after applying the MLP to the hidden states.
90
- attention_dropout (`float`, *optional*, defaults to 0.0):
91
- The dropout ratio for the attention probabilities.
92
- partial_rotary_factor (`float`, *optional*, defaults to 0.25):
93
- Percentage of the query and keys which will have rotary embedding.
94
- bos_token_id (int, *optional*, defaults to 0):
95
- The id of the `BOS` token in the vocabulary.
96
- eos_token_id (int, *optional*, defaults to 0):
97
- The id of the `EOS` token in the vocabulary.
98
-
99
- Example:
100
-
101
- ```python
102
- >>> from transformers import StableLmModel, StableLmConfig
103
-
104
- >>> # Initializing a StableLM stablelm-3b style configuration
105
- >>> configuration = StableLmConfig()
106
- ```"""
107
-
108
- model_type = "stablelm"
109
  keys_to_ignore_at_inference = ["past_key_values"]
110
 
111
  def __init__(
112
  self,
113
- vocab_size=50304,
114
  intermediate_size=6912,
115
  hidden_size=2560,
116
  num_hidden_layers=32,
117
  num_attention_heads=32,
118
  num_key_value_heads=32,
119
  hidden_act="silu",
 
 
120
  max_position_embeddings=4096,
121
  initializer_range=0.02,
122
- layer_norm_eps=1.0e-5,
123
  use_cache=True,
124
- tie_word_embeddings=False,
125
- rope_theta=10_000,
126
- rope_scaling=None,
127
- use_qkv_bias=False,
128
- hidden_dropout=0.0,
129
- attention_dropout=0.0,
130
- partial_rotary_factor=0.25,
131
  bos_token_id=0,
132
- eos_token_id=0,
 
133
  **kwargs,
134
  ):
135
  self.vocab_size = vocab_size
136
  self.max_position_embeddings = max_position_embeddings
137
-
138
- self.hidden_size = hidden_size
139
  self.intermediate_size = intermediate_size
 
140
  self.num_hidden_layers = num_hidden_layers
141
  self.num_attention_heads = num_attention_heads
142
  self.num_key_value_heads = num_key_value_heads
143
  self.hidden_act = hidden_act
144
-
 
145
  self.initializer_range = initializer_range
146
- self.layer_norm_eps = layer_norm_eps
147
  self.use_cache = use_cache
148
- self.rope_theta = rope_theta
149
- self.rope_scaling = rope_scaling
150
  self.use_qkv_bias = use_qkv_bias
151
- self.hidden_dropout = hidden_dropout
152
- self.attention_dropout = attention_dropout
153
- self.partial_rotary_factor = partial_rotary_factor
154
- self._rope_scaling_validation()
155
-
156
  super().__init__(
157
  bos_token_id=bos_token_id,
158
  eos_token_id=eos_token_id,
159
  tie_word_embeddings=tie_word_embeddings,
160
  **kwargs,
161
- )
162
-
163
- # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
164
- def _rope_scaling_validation(self):
165
- """
166
- Validate the `rope_scaling` configuration.
167
- """
168
- if self.rope_scaling is None:
169
- return
170
-
171
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
172
- raise ValueError(
173
- "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
174
- f"got {self.rope_scaling}"
175
- )
176
- rope_scaling_type = self.rope_scaling.get("type", None)
177
- rope_scaling_factor = self.rope_scaling.get("factor", None)
178
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
179
- raise ValueError(
180
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
181
- )
182
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
183
- raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
 
1
+ # Copyright 2023 Stability and The HuggingFace Inc. team. All rights reserved.
 
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """ StableLM Epoch model configuration"""
15
+ from transformers import PretrainedConfig
 
16
  from transformers.utils import logging
17
 
18
 
19
  logger = logging.get_logger(__name__)
20
 
 
 
 
 
 
21
 
22
+ class StableLMEpochConfig(PretrainedConfig):
23
  r"""
24
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
25
+ documentation from [`PretrainedConfig`] for more information.
 
 
 
 
 
 
 
26
 
27
  Args:
28
+ vocab_size (`int`, *optional*, defaults to 50_304):
29
  Vocabulary size of the StableLM model. Defines the number of different tokens that
30
+ can be represented by the `inputs_ids` passed when calling [`StableLMEpochModel`].
31
  intermediate_size (`int`, *optional*, defaults to 6912):
32
  Dimension of the MLP representations.
33
  hidden_size (`int`, *optional*, defaults to 2560):
34
+ Dimension of the decoder layers and the pooler layer.
35
  num_hidden_layers (`int`, *optional*, defaults to 32):
36
  Number of hidden layers in the Transformer decoder.
37
  num_attention_heads (`int`, *optional*, defaults to 32):
38
  Number of attention heads for each attention layer in the Transformer encoder.
39
+ num_key_value_heads (`int`, *optional*):
40
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
41
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
42
  `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
 
46
  `num_attention_heads`.
47
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
48
  The non-linear activation function (function or string).
49
+ rope_pct (`float`, *optional*, defaults to 1.0):
50
+ Percentage of hidden dimensions to allocate to rotary embeddings.
51
+ rope_theta (`float`, *optional*, defaults to 10000.0):
52
+ The base period of the RoPE embeddings.
53
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
54
  The maximum sequence length that this model might ever be used with.
55
  Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
56
+ initializer_range (`float`, *optional*, defaults to 1e-5):
57
  The standard deviation of the truncated_normal_initializer for initializing
58
  all weight matrices.
59
+ norm_eps (`float`, *optional*, defaults to 1e-8):
60
  The epsilon used by the normalization layers.
61
  use_cache (`bool`, *optional*, defaults to `True`):
62
  Whether or not the model should return the last key/values attentions
63
  (not used by all models). Only relevant if `config.is_decoder=True`.
64
+ use_qkv_bias (`bool`, *optional*, defaults to `True`):
 
 
 
 
 
 
 
 
 
 
 
 
65
  Whether or not the model should use bias for qkv layers.
66
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
67
+ Whether to tie weight embeddings
68
+ """
69
+ model_type = "stablelm_epoch"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  keys_to_ignore_at_inference = ["past_key_values"]
71
 
72
  def __init__(
73
  self,
74
+ vocab_size=50_304,
75
  intermediate_size=6912,
76
  hidden_size=2560,
77
  num_hidden_layers=32,
78
  num_attention_heads=32,
79
  num_key_value_heads=32,
80
  hidden_act="silu",
81
+ rope_pct=0.25,
82
+ rope_theta=10_000,
83
  max_position_embeddings=4096,
84
  initializer_range=0.02,
85
+ norm_eps=1.0e-5,
86
  use_cache=True,
87
+ use_qkv_bias=True,
 
 
 
 
 
 
88
  bos_token_id=0,
89
+ eos_token_id=2,
90
+ tie_word_embeddings=False,
91
  **kwargs,
92
  ):
93
  self.vocab_size = vocab_size
94
  self.max_position_embeddings = max_position_embeddings
 
 
95
  self.intermediate_size = intermediate_size
96
+ self.hidden_size = hidden_size
97
  self.num_hidden_layers = num_hidden_layers
98
  self.num_attention_heads = num_attention_heads
99
  self.num_key_value_heads = num_key_value_heads
100
  self.hidden_act = hidden_act
101
+ self.rope_pct = rope_pct
102
+ self.rope_theta = rope_theta
103
  self.initializer_range = initializer_range
104
+ self.norm_eps = norm_eps
105
  self.use_cache = use_cache
 
 
106
  self.use_qkv_bias = use_qkv_bias
107
+ self.tie_word_embeddings = tie_word_embeddings
 
 
 
 
108
  super().__init__(
109
  bos_token_id=bos_token_id,
110
  eos_token_id=eos_token_id,
111
  tie_word_embeddings=tie_word_embeddings,
112
  **kwargs,
113
+ )