LeroyDyer commited on
Commit
441ef99
1 Parent(s): d8847af

Upload 2 files

Browse files
Files changed (2) hide show
  1. configuration_mistral.py +297 -0
  2. modeling_mistral.py +0 -0
configuration_mistral.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Mistral model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
25
+ "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json",
26
+ }
27
+
28
+
29
+ class MistralThoughtsConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
32
+ Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
34
+
35
+ [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
36
+ [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 32000):
44
+ Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`MistralModel`]
46
+ hidden_size (`int`, *optional*, defaults to 4096):
47
+ Dimension of the hidden representations.
48
+ intermediate_size (`int`, *optional*, defaults to 14336):
49
+ Dimension of the MLP representations.
50
+ num_hidden_layers (`int`, *optional*, defaults to 32):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 32):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ num_key_value_heads (`int`, *optional*, defaults to 8):
55
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
56
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
57
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
58
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
59
+ by meanpooling all the original heads within that group. For more details checkout [this
60
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
64
+ The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
65
+ allows sequence of up to 4096*32 tokens.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the rms normalization layers.
70
+ use_cache (`bool`, *optional*, defaults to `True`):
71
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
72
+ relevant if `config.is_decoder=True`.
73
+ pad_token_id (`int`, *optional*):
74
+ The id of the padding token.
75
+ bos_token_id (`int`, *optional*, defaults to 1):
76
+ The id of the "beginning-of-sequence" token.
77
+ eos_token_id (`int`, *optional*, defaults to 2):
78
+ The id of the "end-of-sequence" token.
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether the model's input and output word embeddings should be tied.
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ sliding_window (`int`, *optional*, defaults to 4096):
84
+ Sliding window attention window size. If not specified, will default to `4096`.
85
+ attention_dropout (`float`, *optional*, defaults to 0.0):
86
+ The dropout ratio for the attention probabilities.
87
+
88
+ ```python
89
+ >>> from transformers import MistralModel, MistralConfig
90
+
91
+ >>> # Initializing a Mistral 7B style configuration
92
+ >>> configuration = MistralConfig()
93
+
94
+ >>> # Initializing a model from the Mistral 7B style configuration
95
+ >>> model = MistralModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "mistral"
102
+ keys_to_ignore_at_inference = ["past_key_values"]
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=32000,
107
+ hidden_size=4096,
108
+ intermediate_size=14336,
109
+ num_hidden_layers=32,
110
+ num_attention_heads=32,
111
+ num_key_value_heads=8,
112
+ hidden_act="silu",
113
+ max_position_embeddings=4096 * 32,
114
+ initializer_range=0.02,
115
+ rms_norm_eps=1e-6,
116
+ use_cache=True,
117
+ pad_token_id=None,
118
+ bos_token_id=1,
119
+ eos_token_id=2,
120
+ tie_word_embeddings=False,
121
+ rope_theta=10000.0,
122
+ sliding_window=4096,
123
+ attention_dropout=0.0,
124
+ max_thoughts=16,
125
+ merged_talk_heads=True,
126
+ merged_lm_and_talk_heads=False,
127
+ merged_lm_and_think_heads=True,
128
+ use_concat_talk_head=True,
129
+ use_shallow_think=True,
130
+ use_shallow_talk=False,
131
+ use_complex_think_head=False,
132
+ use_complex_talk_head=True,
133
+ use_weighted_talk_head=True,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.hidden_size = hidden_size
139
+ self.intermediate_size = intermediate_size
140
+ self.num_hidden_layers = num_hidden_layers
141
+ self.num_attention_heads = num_attention_heads
142
+ self.sliding_window = sliding_window
143
+
144
+ # for backward compatibility
145
+ if num_key_value_heads is None:
146
+ num_key_value_heads = num_attention_heads
147
+
148
+ self.num_key_value_heads = num_key_value_heads
149
+ self.hidden_act = hidden_act
150
+ self.initializer_range = initializer_range
151
+ self.rms_norm_eps = rms_norm_eps
152
+ self.use_cache = use_cache
153
+ self.rope_theta = rope_theta
154
+ self.attention_dropout = attention_dropout
155
+ self.max_thoughts = max_thoughts
156
+ self.merged_talk_heads = merged_talk_heads
157
+ self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
158
+ self.merged_lm_and_think_heads = merged_lm_and_think_heads
159
+ self.use_concat_talk_head = use_concat_talk_head
160
+ self.use_shallow_think = use_shallow_think
161
+ self.use_shallow_talk = use_shallow_talk
162
+ self.use_complex_think_head = use_complex_think_head
163
+ self.use_complex_talk_head = use_complex_talk_head
164
+ self.use_weighted_talk_head = use_weighted_talk_head
165
+
166
+ super().__init__(
167
+ pad_token_id=pad_token_id,
168
+ bos_token_id=bos_token_id,
169
+ eos_token_id=eos_token_id,
170
+ tie_word_embeddings=tie_word_embeddings,
171
+ **kwargs,
172
+ )
173
+
174
+ class MistralConfig(PretrainedConfig):
175
+ r"""
176
+ This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
177
+ Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
178
+ with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
179
+
180
+ [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
181
+ [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
182
+
183
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
184
+ documentation from [`PretrainedConfig`] for more information.
185
+
186
+
187
+ Args:
188
+ vocab_size (`int`, *optional*, defaults to 32000):
189
+ Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
190
+ `inputs_ids` passed when calling [`MistralModel`]
191
+ hidden_size (`int`, *optional*, defaults to 4096):
192
+ Dimension of the hidden representations.
193
+ intermediate_size (`int`, *optional*, defaults to 14336):
194
+ Dimension of the MLP representations.
195
+ num_hidden_layers (`int`, *optional*, defaults to 32):
196
+ Number of hidden layers in the Transformer encoder.
197
+ num_attention_heads (`int`, *optional*, defaults to 32):
198
+ Number of attention heads for each attention layer in the Transformer encoder.
199
+ num_key_value_heads (`int`, *optional*, defaults to 8):
200
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
201
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
202
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
203
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
204
+ by meanpooling all the original heads within that group. For more details checkout [this
205
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
206
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
207
+ The non-linear activation function (function or string) in the decoder.
208
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
209
+ The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
210
+ allows sequence of up to 4096*32 tokens.
211
+ initializer_range (`float`, *optional*, defaults to 0.02):
212
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
213
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
214
+ The epsilon used by the rms normalization layers.
215
+ use_cache (`bool`, *optional*, defaults to `True`):
216
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
217
+ relevant if `config.is_decoder=True`.
218
+ pad_token_id (`int`, *optional*):
219
+ The id of the padding token.
220
+ bos_token_id (`int`, *optional*, defaults to 1):
221
+ The id of the "beginning-of-sequence" token.
222
+ eos_token_id (`int`, *optional*, defaults to 2):
223
+ The id of the "end-of-sequence" token.
224
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
225
+ Whether the model's input and output word embeddings should be tied.
226
+ rope_theta (`float`, *optional*, defaults to 10000.0):
227
+ The base period of the RoPE embeddings.
228
+ sliding_window (`int`, *optional*, defaults to 4096):
229
+ Sliding window attention window size. If not specified, will default to `4096`.
230
+ attention_dropout (`float`, *optional*, defaults to 0.0):
231
+ The dropout ratio for the attention probabilities.
232
+
233
+ ```python
234
+ >>> from transformers import MistralModel, MistralConfig
235
+
236
+ >>> # Initializing a Mistral 7B style configuration
237
+ >>> configuration = MistralConfig()
238
+
239
+ >>> # Initializing a model from the Mistral 7B style configuration
240
+ >>> model = MistralModel(configuration)
241
+
242
+ >>> # Accessing the model configuration
243
+ >>> configuration = model.config
244
+ ```"""
245
+
246
+ model_type = "mistral"
247
+ keys_to_ignore_at_inference = ["past_key_values"]
248
+
249
+ def __init__(
250
+ self,
251
+ vocab_size=32000,
252
+ hidden_size=4096,
253
+ intermediate_size=14336,
254
+ num_hidden_layers=32,
255
+ num_attention_heads=32,
256
+ num_key_value_heads=8,
257
+ hidden_act="silu",
258
+ max_position_embeddings=4096 * 32,
259
+ initializer_range=0.02,
260
+ rms_norm_eps=1e-6,
261
+ use_cache=True,
262
+ pad_token_id=None,
263
+ bos_token_id=1,
264
+ eos_token_id=2,
265
+ tie_word_embeddings=False,
266
+ rope_theta=10000.0,
267
+ sliding_window=4096,
268
+ attention_dropout=0.0,
269
+ **kwargs,
270
+ ):
271
+ self.vocab_size = vocab_size
272
+ self.max_position_embeddings = max_position_embeddings
273
+ self.hidden_size = hidden_size
274
+ self.intermediate_size = intermediate_size
275
+ self.num_hidden_layers = num_hidden_layers
276
+ self.num_attention_heads = num_attention_heads
277
+ self.sliding_window = sliding_window
278
+
279
+ # for backward compatibility
280
+ if num_key_value_heads is None:
281
+ num_key_value_heads = num_attention_heads
282
+
283
+ self.num_key_value_heads = num_key_value_heads
284
+ self.hidden_act = hidden_act
285
+ self.initializer_range = initializer_range
286
+ self.rms_norm_eps = rms_norm_eps
287
+ self.use_cache = use_cache
288
+ self.rope_theta = rope_theta
289
+ self.attention_dropout = attention_dropout
290
+
291
+ super().__init__(
292
+ pad_token_id=pad_token_id,
293
+ bos_token_id=bos_token_id,
294
+ eos_token_id=eos_token_id,
295
+ tie_word_embeddings=tie_word_embeddings,
296
+ **kwargs,
297
+ )
modeling_mistral.py ADDED
The diff for this file is too large to render. See raw diff