dangvansam commited on
Commit
3457f13
1 Parent(s): f00c2bf

Create configuration_stablelm.py

Browse files
Files changed (1) hide show
  1. configuration_stablelm.py +183 -0
configuration_stablelm.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Stability AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ StableLM model configuration """
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "stabilityai/stablelm-3b-4e1t": "https://huggingface.co/stabilityai/stablelm-3b-4e1t/resolve/main/config.json",
25
+ # See all StableLM models at https://huggingface.co/models?filter=stablelm
26
+ }
27
+
28
+
29
+ class StableLmConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`~StableLmModel`].
32
+ It is used to instantiate an StableLM model according to the specified arguments, defining the model
33
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
34
+ the StableLM [stabilityai/stablelm-3b-4e1t](https://huggingface.co/stabilityai/stablelm-3b-4e1t) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used
37
+ to control the model outputs. Read the documentation from [`PretrainedConfig`]
38
+ for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 50304):
43
+ Vocabulary size of the StableLM model. Defines the number of different tokens that
44
+ can be represented by the `inputs_ids` passed when calling [`StableLmModel`].
45
+ intermediate_size (`int`, *optional*, defaults to 6912):
46
+ Dimension of the MLP representations.
47
+ hidden_size (`int`, *optional*, defaults to 2560):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ num_key_value_heads (`int`, *optional*, defaults to 32):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string).
63
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
64
+ The maximum sequence length that this model might ever be used with.
65
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing
68
+ all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
70
+ The epsilon used by the normalization layers.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions
73
+ (not used by all models). Only relevant if `config.is_decoder=True`.
74
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
75
+ Whether the model's input and output word embeddings should be tied.
76
+ rope_theta (`float`, *optional*, defaults to `10000.0`):
77
+ The base period of the RoPE embeddings.
78
+ rope_scaling (`Dict`, *optional*):
79
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
80
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
81
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
82
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
83
+ these scaling strategies behave:
84
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
85
+ is an experimental feature, subject to breaking API changes in future versions.
86
+ use_qkv_bias (`bool`, *optional*, defaults to `False`):
87
+ Whether or not the model should use bias for qkv layers.
88
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
89
+ The dropout ratio after applying the MLP to the hidden states.
90
+ attention_dropout (`float`, *optional*, defaults to 0.0):
91
+ The dropout ratio for the attention probabilities.
92
+ partial_rotary_factor (`float`, *optional*, defaults to 0.25):
93
+ Percentage of the query and keys which will have rotary embedding.
94
+ bos_token_id (int, *optional*, defaults to 0):
95
+ The id of the `BOS` token in the vocabulary.
96
+ eos_token_id (int, *optional*, defaults to 0):
97
+ The id of the `EOS` token in the vocabulary.
98
+
99
+ Example:
100
+
101
+ ```python
102
+ >>> from transformers import StableLmModel, StableLmConfig
103
+
104
+ >>> # Initializing a StableLM stablelm-3b style configuration
105
+ >>> configuration = StableLmConfig()
106
+ ```"""
107
+
108
+ model_type = "stablelm"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=50304,
114
+ intermediate_size=6912,
115
+ hidden_size=2560,
116
+ num_hidden_layers=32,
117
+ num_attention_heads=32,
118
+ num_key_value_heads=32,
119
+ hidden_act="silu",
120
+ max_position_embeddings=4096,
121
+ initializer_range=0.02,
122
+ layer_norm_eps=1.0e-5,
123
+ use_cache=True,
124
+ tie_word_embeddings=False,
125
+ rope_theta=10_000,
126
+ rope_scaling=None,
127
+ use_qkv_bias=False,
128
+ hidden_dropout=0.0,
129
+ attention_dropout=0.0,
130
+ partial_rotary_factor=0.25,
131
+ bos_token_id=0,
132
+ eos_token_id=0,
133
+ **kwargs,
134
+ ):
135
+ self.vocab_size = vocab_size
136
+ self.max_position_embeddings = max_position_embeddings
137
+
138
+ self.hidden_size = hidden_size
139
+ self.intermediate_size = intermediate_size
140
+ self.num_hidden_layers = num_hidden_layers
141
+ self.num_attention_heads = num_attention_heads
142
+ self.num_key_value_heads = num_key_value_heads
143
+ self.hidden_act = hidden_act
144
+
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
147
+ self.use_cache = use_cache
148
+ self.rope_theta = rope_theta
149
+ self.rope_scaling = rope_scaling
150
+ self.use_qkv_bias = use_qkv_bias
151
+ self.hidden_dropout = hidden_dropout
152
+ self.attention_dropout = attention_dropout
153
+ self.partial_rotary_factor = partial_rotary_factor
154
+ self._rope_scaling_validation()
155
+
156
+ super().__init__(
157
+ bos_token_id=bos_token_id,
158
+ eos_token_id=eos_token_id,
159
+ tie_word_embeddings=tie_word_embeddings,
160
+ **kwargs,
161
+ )
162
+
163
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
164
+ def _rope_scaling_validation(self):
165
+ """
166
+ Validate the `rope_scaling` configuration.
167
+ """
168
+ if self.rope_scaling is None:
169
+ return
170
+
171
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
172
+ raise ValueError(
173
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
174
+ f"got {self.rope_scaling}"
175
+ )
176
+ rope_scaling_type = self.rope_scaling.get("type", None)
177
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
178
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
179
+ raise ValueError(
180
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
181
+ )
182
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
183
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")