Crystalcareai commited on
Commit
d66d4ca
1 Parent(s): 9d9c0e7

Update configuration_gemmoe.py

Browse files
Files changed (1) hide show
  1. configuration_gemmoe.py +119 -110
configuration_gemmoe.py CHANGED
@@ -1,103 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers.configuration_utils import PretrainedConfig
2
  from transformers.utils import logging
3
 
 
4
  logger = logging.get_logger(__name__)
5
 
6
- GEMMOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
 
 
 
 
7
  class GemmoeConfig(PretrainedConfig):
8
  r"""
9
- This is the configuration class to store the configuration of a [`DeepseekModel`]. It is used to instantiate an DeepSeek
10
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
11
- defaults will yield a similar configuration to that of the DeepSeek-7B.
 
 
12
 
13
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
14
  documentation from [`PretrainedConfig`] for more information.
15
 
16
-
17
  Args:
18
- vocab_size (`int`, *optional*, defaults to 102400):
19
- Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
20
- `inputs_ids` passed when calling [`DeepseekModel`]
21
- hidden_size (`int`, *optional*, defaults to 4096):
22
  Dimension of the hidden representations.
23
- intermediate_size (`int`, *optional*, defaults to 11008):
24
  Dimension of the MLP representations.
25
- moe_intermediate_size (`int`, *optional*, defaults to 1407):
26
- Dimension of the MoE representations.
27
- num_hidden_layers (`int`, *optional*, defaults to 32):
28
  Number of hidden layers in the Transformer decoder.
29
- num_attention_heads (`int`, *optional*, defaults to 32):
30
  Number of attention heads for each attention layer in the Transformer decoder.
31
- n_shared_experts (`int`, *optional*, defaults to None):
32
- Number of shared experts, None means dense model.
33
- n_routed_experts (`int`, *optional*, defaults to None):
34
- Number of routed experts, None means dense model.
35
- num_experts_per_tok (`int`, *optional*, defaults to None):
36
- Number of selected experts, None means dense model.
37
- moe_layer_freq (`int`, *optional*, defaults to 1):
38
- The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
39
- first_k_dense_replace (`int`, *optional*, defaults to 0):
40
- Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
41
- \--k dense layers--/
42
- norm_topk_prob (`bool`, *optional*, defaults to False):
43
- Whether to normalize the weights of the routed experts.
44
- scoring_func (`str`, *optional*, defaults to 'softmax'):
45
- Method of computing expert weights.
46
- aux_loss_alpha (`float`, *optional*, defaults to 0.001):
47
- Auxiliary loss weight coefficient.
48
- seq_aux = (`bool`, *optional*, defaults to True):
49
- Whether to compute the auxiliary loss for each individual sample.
50
- num_key_value_heads (`int`, *optional*):
51
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
  `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
  converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
  by meanpooling all the original heads within that group. For more details checkout [this
56
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
  `num_attention_heads`.
58
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
- The non-linear activation function (function or string) in the decoder.
60
- max_position_embeddings (`int`, *optional*, defaults to 2048):
 
 
61
  The maximum sequence length that this model might ever be used with.
62
  initializer_range (`float`, *optional*, defaults to 0.02):
63
  The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
  The epsilon used by the rms normalization layers.
66
  use_cache (`bool`, *optional*, defaults to `True`):
67
  Whether or not the model should return the last key/values attentions (not used by all models). Only
68
  relevant if `config.is_decoder=True`.
69
- pad_token_id (`int`, *optional*):
70
  Padding token id.
71
- bos_token_id (`int`, *optional*, defaults to 1):
 
 
72
  Beginning of stream token id.
73
- eos_token_id (`int`, *optional*, defaults to 2):
74
- End of stream token id.
75
- pretraining_tp (`int`, *optional*, defaults to 1):
76
- Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
77
- document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
78
- necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
79
- issue](https://github.com/pytorch/pytorch/issues/76232).
80
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
81
  Whether to tie weight embeddings
82
  rope_theta (`float`, *optional*, defaults to 10000.0):
83
  The base period of the RoPE embeddings.
84
- rope_scaling (`Dict`, *optional*):
85
- Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
86
- strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
87
- `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
88
- `max_position_embeddings` to the expected new maximum.
89
  attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
90
  Whether to use a bias in the query, key, value and output projection layers during self-attention.
91
  attention_dropout (`float`, *optional*, defaults to 0.0):
92
  The dropout ratio for the attention probabilities.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  ```python
95
- >>> from transformers import DeepseekModel, DeepseekConfig
96
 
97
- >>> # Initializing a Deepseek deepseek-7b style configuration
98
- >>> configuration = DeepseekConfig()
99
 
100
- >>> # Accessing the model configuration
 
 
 
101
  >>> configuration = model.config
102
  ```"""
103
 
@@ -109,92 +138,72 @@ class GemmoeConfig(PretrainedConfig):
109
  vocab_size=256000,
110
  hidden_size=3072,
111
  intermediate_size=24576,
112
- moe_intermediate_size = 1407,
113
  num_hidden_layers=28,
114
  num_attention_heads=16,
115
  num_key_value_heads=16,
116
- n_shared_experts = None,
117
- n_routed_experts = None,
118
- num_experts_per_tok = None,
119
- moe_layer_freq = 1,
120
- first_k_dense_replace = 0,
121
- norm_topk_prob = False,
122
- scoring_func = 'softmax',
123
- aux_loss_alpha = 0.001,
124
- seq_aux = True,
125
  hidden_act="gelu",
126
- max_position_embeddings=2048,
127
  initializer_range=0.02,
128
  rms_norm_eps=1e-6,
129
  use_cache=True,
130
  pad_token_id=0,
131
- bos_token_id=2,
132
  eos_token_id=1,
133
- pretraining_tp=1,
134
  tie_word_embeddings=True,
135
  rope_theta=10000.0,
136
- rope_scaling=None,
137
  attention_bias=False,
138
  attention_dropout=0.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  **kwargs,
140
  ):
141
  self.vocab_size = vocab_size
142
- self.max_position_embeddings = max_position_embeddings
143
  self.hidden_size = hidden_size
144
  self.intermediate_size = intermediate_size
145
- self.moe_intermediate_size = moe_intermediate_size
146
  self.num_hidden_layers = num_hidden_layers
147
  self.num_attention_heads = num_attention_heads
 
 
 
 
 
 
 
 
 
 
 
148
  self.n_shared_experts = n_shared_experts
149
  self.n_routed_experts = n_routed_experts
150
- self.num_experts_per_tok = num_experts_per_tok
151
  self.moe_layer_freq = moe_layer_freq
152
  self.first_k_dense_replace = first_k_dense_replace
153
  self.norm_topk_prob = norm_topk_prob
154
  self.scoring_func = scoring_func
155
  self.aux_loss_alpha = aux_loss_alpha
156
  self.seq_aux = seq_aux
157
- # for backward compatibility
158
- if num_key_value_heads is None:
159
- num_key_value_heads = num_attention_heads
160
-
161
- self.num_key_value_heads = num_key_value_heads
162
- self.hidden_act = hidden_act
163
- self.initializer_range = initializer_range
164
- self.rms_norm_eps = rms_norm_eps
165
  self.pretraining_tp = pretraining_tp
166
- self.use_cache = use_cache
167
- self.rope_theta = rope_theta
168
  self.rope_scaling = rope_scaling
169
- self._rope_scaling_validation()
170
- self.attention_bias = attention_bias
171
- self.attention_dropout = attention_dropout
172
-
173
  super().__init__(
174
  pad_token_id=pad_token_id,
175
  bos_token_id=bos_token_id,
176
  eos_token_id=eos_token_id,
177
  tie_word_embeddings=tie_word_embeddings,
178
  **kwargs,
179
- )
180
-
181
- def _rope_scaling_validation(self):
182
- """
183
- Validate the `rope_scaling` configuration.
184
- """
185
- if self.rope_scaling is None:
186
- return
187
-
188
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
189
- raise ValueError(
190
- "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
191
- f"got {self.rope_scaling}"
192
- )
193
- rope_scaling_type = self.rope_scaling.get("type", None)
194
- rope_scaling_factor = self.rope_scaling.get("factor", None)
195
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
196
- raise ValueError(
197
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
198
- )
199
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
200
- raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Gemmoe model configuration"""
16
+
17
  from transformers.configuration_utils import PretrainedConfig
18
  from transformers.utils import logging
19
 
20
+
21
  logger = logging.get_logger(__name__)
22
 
23
+ GEMMOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "Crystalcareai/GemMoE-Beta-1": "https://huggingface.co/Crystalcareai/GemMoE-Beta-1/resolve/main/config.json",
25
+ }
26
+
27
+
28
  class GemmoeConfig(PretrainedConfig):
29
  r"""
30
+ This is the configuration class to store the configuration of a [`GemmoeModel`]. It is used to instantiate a Gemmoe
31
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the Gemmoe-7B.
33
+
34
+ e.g. [mhenrichsen/gemmoe-7b](https://huggingface.co/mhenrichsen/gemmoe-7b)
35
 
36
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
  documentation from [`PretrainedConfig`] for more information.
38
 
 
39
  Args:
40
+ vocab_size (`int`, *optional*, defaults to 256000):
41
+ Vocabulary size of the Gemmoe model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`GemmoeModel`]
43
+ hidden_size (`int`, *optional*, defaults to 3072):
44
  Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 24576):
46
  Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 28):
 
 
48
  Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
  Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 16):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
  `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
  `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
  converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
  by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
  `num_attention_heads`.
59
+ head_dim (`int`, *optional*, defaults to 256):
60
+ The attention head dimension.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
64
  The maximum sequence length that this model might ever be used with.
65
  initializer_range (`float`, *optional*, defaults to 0.02):
66
  The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-6):
68
  The epsilon used by the rms normalization layers.
69
  use_cache (`bool`, *optional*, defaults to `True`):
70
  Whether or not the model should return the last key/values attentions (not used by all models). Only
71
  relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*, defaults to 0):
73
  Padding token id.
74
+ eos_token_id (`int`, *optional*, defaults to 1):
75
+ End of stream token id.
76
+ bos_token_id (`int`, *optional*, defaults to 2):
77
  Beginning of stream token id.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
 
 
 
 
 
 
 
79
  Whether to tie weight embeddings
80
  rope_theta (`float`, *optional*, defaults to 10000.0):
81
  The base period of the RoPE embeddings.
 
 
 
 
 
82
  attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
83
  Whether to use a bias in the query, key, value and output projection layers during self-attention.
84
  attention_dropout (`float`, *optional*, defaults to 0.0):
85
  The dropout ratio for the attention probabilities.
86
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
87
+ The number of experts used in the sparse mixture of experts layer.
88
+ num_local_experts (`int`, *optional*, defaults to 8):
89
+ The number of local experts used in the sparse mixture of experts layer.
90
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
91
+ The coefficient for the auxiliary loss of the router.
92
+ output_router_logits (`bool`, *optional*, defaults to `False`):
93
+ Whether or not to output the logits of the routers. They are useful for computing the router loss, and
94
+ should not be returned during inference.
95
+ n_shared_experts (`int`, *optional*, defaults to `None`):
96
+ The number of shared experts used in the sparse mixture of experts layer. If set to `None`, no shared
97
+ experts are used.
98
+ n_routed_experts (`int`, *optional*, defaults to `None`):
99
+ The number of routed experts used in the sparse mixture of experts layer. If set to `None`, all experts are
100
+ routed experts.
101
+ moe_layer_freq (`int`, *optional*, defaults to 1):
102
+ The frequency of MoE layers in the model. A value of 1 means MoE layers are used in every layer, a value of
103
+ 2 means MoE layers are used in every other layer, and so on.
104
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
105
+ The number of initial dense layers to replace with MoE layers. If set to 0 (default), no dense layers are
106
+ replaced.
107
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
108
+ Whether to normalize the top-k probabilities of the router during training.
109
+ scoring_func (`str`, *optional*, defaults to `'softmax'`):
110
+ The scoring function used by the router. Can be 'softmax' or 'remap'.
111
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
112
+ The weight of the auxiliary loss used for training the router.
113
+ seq_aux (`bool`, *optional*, defaults to `True`):
114
+ Whether to use sequence-level auxiliary loss for training the router.
115
+ pretraining_tp (`int`, *optional*, defaults to 1):
116
+ The tensor parallelism used for pretraining.
117
+ rope_scaling (`float`, *optional*, defaults to `None`):
118
+ The scaling factor for the Rotary Position Embedding (RoPE). If set to `None`, no scaling is applied.
119
 
120
  ```python
121
+ >>> from transformers import GemmoeModel, GemmoeConfig
122
 
123
+ >>> # Initializing a Gemmoe gemmoe-7b style configuration
124
+ >>> configuration = GemmoeConfig()
125
 
126
+ >>> # Initializing a model from the gemmoe-7b style configuration
127
+ >>> model = GemmoeModel(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
  >>> configuration = model.config
131
  ```"""
132
 
 
138
  vocab_size=256000,
139
  hidden_size=3072,
140
  intermediate_size=24576,
 
141
  num_hidden_layers=28,
142
  num_attention_heads=16,
143
  num_key_value_heads=16,
144
+ head_dim=256,
 
 
 
 
 
 
 
 
145
  hidden_act="gelu",
146
+ max_position_embeddings=8192,
147
  initializer_range=0.02,
148
  rms_norm_eps=1e-6,
149
  use_cache=True,
150
  pad_token_id=0,
 
151
  eos_token_id=1,
152
+ bos_token_id=2,
153
  tie_word_embeddings=True,
154
  rope_theta=10000.0,
 
155
  attention_bias=False,
156
  attention_dropout=0.0,
157
+ num_experts_per_tok=2,
158
+ num_local_experts=8,
159
+ n_shared_experts=None,
160
+ n_routed_experts=None,
161
+ moe_layer_freq=1,
162
+ first_k_dense_replace=0,
163
+ norm_topk_prob=False,
164
+ scoring_func='softmax',
165
+ aux_loss_alpha=0.001,
166
+ seq_aux=True,
167
+ pretraining_tp=1,
168
+ rope_scaling=None,
169
+ router_aux_loss_coef=0.02,
170
+ output_router_logits=False,
171
  **kwargs,
172
  ):
173
  self.vocab_size = vocab_size
174
+ self.max_position_embeddings = max_position_embeddings
175
  self.hidden_size = hidden_size
176
  self.intermediate_size = intermediate_size
 
177
  self.num_hidden_layers = num_hidden_layers
178
  self.num_attention_heads = num_attention_heads
179
+ self.head_dim = head_dim
180
+ self.num_key_value_heads = num_key_value_heads
181
+ self.hidden_act = hidden_act
182
+ self.initializer_range = initializer_range
183
+ self.rms_norm_eps = rms_norm_eps
184
+ self.use_cache = use_cache
185
+ self.rope_theta = rope_theta
186
+ self.attention_bias = attention_bias
187
+ self.attention_dropout = attention_dropout
188
+ self.num_experts_per_tok = num_experts_per_tok
189
+ self.num_local_experts = num_local_experts
190
  self.n_shared_experts = n_shared_experts
191
  self.n_routed_experts = n_routed_experts
 
192
  self.moe_layer_freq = moe_layer_freq
193
  self.first_k_dense_replace = first_k_dense_replace
194
  self.norm_topk_prob = norm_topk_prob
195
  self.scoring_func = scoring_func
196
  self.aux_loss_alpha = aux_loss_alpha
197
  self.seq_aux = seq_aux
 
 
 
 
 
 
 
 
198
  self.pretraining_tp = pretraining_tp
 
 
199
  self.rope_scaling = rope_scaling
200
+ self.router_aux_loss_coef = router_aux_loss_coef
201
+ self.output_router_logits = output_router_logits
202
+
 
203
  super().__init__(
204
  pad_token_id=pad_token_id,
205
  bos_token_id=bos_token_id,
206
  eos_token_id=eos_token_id,
207
  tie_word_embeddings=tie_word_embeddings,
208
  **kwargs,
209
+ )