Crystalcareai commited on
Commit
7a9d077
1 Parent(s): ec7309a

Delete configuration_gemmoe.py

Browse files
Files changed (1) hide show
  1. configuration_gemmoe.py +0 -165
configuration_gemmoe.py DELETED
@@ -1,165 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Gemmoe model configuration"""
16
-
17
- from transformers.configuration_utils import PretrainedConfig
18
- from transformers.utils import logging
19
-
20
-
21
- logger = logging.get_logger(__name__)
22
-
23
- GEMMOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
- "Crystalcareai/GemMoE-Beta-1": "https://huggingface.co/Crystalcareai/GemMoE-Beta-1/resolve/main/config.json",
25
- }
26
-
27
-
28
- class GemmoeConfig(PretrainedConfig):
29
- r"""
30
- This is the configuration class to store the configuration of a [`GemmoeModel`]. It is used to instantiate a Gemmoe
31
- model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
- defaults will yield a similar configuration to that of the Gemmoe-7B.
33
-
34
- e.g. [mhenrichsen/gemmoe-7b](https://huggingface.co/mhenrichsen/gemmoe-7b)
35
-
36
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
- documentation from [`PretrainedConfig`] for more information.
38
-
39
- Args:
40
- vocab_size (`int`, *optional*, defaults to 256000):
41
- Vocabulary size of the Gemmoe model. Defines the number of different tokens that can be represented by the
42
- `inputs_ids` passed when calling [`GemmoeModel`]
43
- hidden_size (`int`, *optional*, defaults to 3072):
44
- Dimension of the hidden representations.
45
- intermediate_size (`int`, *optional*, defaults to 24576):
46
- Dimension of the MLP representations.
47
- num_hidden_layers (`int`, *optional*, defaults to 28):
48
- Number of hidden layers in the Transformer decoder.
49
- num_attention_heads (`int`, *optional*, defaults to 16):
50
- Number of attention heads for each attention layer in the Transformer decoder.
51
- num_key_value_heads (`int`, *optional*, defaults to 16):
52
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
- `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
- by meanpooling all the original heads within that group. For more details checkout [this
57
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
- `num_attention_heads`.
59
- head_dim (`int`, *optional*, defaults to 256):
60
- The attention head dimension.
61
- hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
62
- The non-linear activation function (function or string) in the decoder.
63
- max_position_embeddings (`int`, *optional*, defaults to 8192):
64
- The maximum sequence length that this model might ever be used with.
65
- initializer_range (`float`, *optional*, defaults to 0.02):
66
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
- rms_norm_eps (`float`, *optional*, defaults to 1e-6):
68
- The epsilon used by the rms normalization layers.
69
- use_cache (`bool`, *optional*, defaults to `True`):
70
- Whether or not the model should return the last key/values attentions (not used by all models). Only
71
- relevant if `config.is_decoder=True`.
72
- pad_token_id (`int`, *optional*, defaults to 0):
73
- Padding token id.
74
- eos_token_id (`int`, *optional*, defaults to 1):
75
- End of stream token id.
76
- bos_token_id (`int`, *optional*, defaults to 2):
77
- Beginning of stream token id.
78
- tie_word_embeddings (`bool`, *optional*, defaults to `True`):
79
- Whether to tie weight embeddings
80
- rope_theta (`float`, *optional*, defaults to 10000.0):
81
- The base period of the RoPE embeddings.
82
- attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
83
- Whether to use a bias in the query, key, value and output projection layers during self-attention.
84
- attention_dropout (`float`, *optional*, defaults to 0.0):
85
- The dropout ratio for the attention probabilities.
86
- num_experts_per_tok (`int`, *optional*, defaults to 2):
87
- The number of experts used in the sparse mixture of experts layer.
88
- num_local_experts (`int`, *optional*, defaults to 8):
89
- The number of local experts used in the sparse mixture of experts layer.
90
- router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
91
- The coefficient for the auxiliary loss of the router.
92
- output_router_logits (`bool`, *optional*, defaults to `False`):
93
- Whether or not to output the logits of the routers. They are useful for computing the router loss, and
94
- should not be returned during inference.
95
-
96
- ```python
97
- >>> from transformers import GemmoeModel, GemmoeConfig
98
-
99
- >>> # Initializing a Gemmoe gemmoe-7b style configuration
100
- >>> configuration = GemmoeConfig()
101
-
102
- >>> # Initializing a model from the gemmoe-7b style configuration
103
- >>> model = GemmoeModel(configuration)
104
-
105
- >>> # Accessing the model configuration
106
- >>> configuration = model.config
107
- ```"""
108
-
109
- model_type = "gemmoe"
110
- keys_to_ignore_at_inference = ["past_key_values"]
111
-
112
- def __init__(
113
- self,
114
- vocab_size=256000,
115
- hidden_size=3072,
116
- intermediate_size=24576,
117
- num_hidden_layers=28,
118
- num_attention_heads=16,
119
- num_key_value_heads=16,
120
- head_dim=256,
121
- hidden_act="gelu",
122
- max_position_embeddings=8192,
123
- initializer_range=0.02,
124
- rms_norm_eps=1e-6,
125
- use_cache=True,
126
- pad_token_id=0,
127
- eos_token_id=1,
128
- bos_token_id=2,
129
- tie_word_embeddings=True,
130
- rope_theta=10000.0,
131
- attention_bias=False,
132
- attention_dropout=0.0,
133
- num_experts_per_tok=2,
134
- num_local_experts=8,
135
- router_aux_loss_coef=0.02,
136
- output_router_logits=False,
137
- **kwargs,
138
- ):
139
- self.vocab_size = vocab_size
140
- self.max_position_embeddings = max_position_embeddings
141
- self.hidden_size = hidden_size
142
- self.intermediate_size = intermediate_size
143
- self.num_hidden_layers = num_hidden_layers
144
- self.num_attention_heads = num_attention_heads
145
- self.head_dim = head_dim
146
- self.num_key_value_heads = num_key_value_heads
147
- self.hidden_act = hidden_act
148
- self.initializer_range = initializer_range
149
- self.rms_norm_eps = rms_norm_eps
150
- self.use_cache = use_cache
151
- self.rope_theta = rope_theta
152
- self.attention_bias = attention_bias
153
- self.attention_dropout = attention_dropout
154
- self.num_experts_per_tok = num_experts_per_tok
155
- self.num_local_experts = num_local_experts
156
- self.router_aux_loss_coef = router_aux_loss_coef
157
- self.output_router_logits = output_router_logits
158
-
159
- super().__init__(
160
- pad_token_id=pad_token_id,
161
- bos_token_id=bos_token_id,
162
- eos_token_id=eos_token_id,
163
- tie_word_embeddings=tie_word_embeddings,
164
- **kwargs,
165
- )