jonathanjordan21 commited on
Commit
a3b7cad
1 Parent(s): 917caa7

Rename configuration_qwen.py to configuration_qwen2.py

Browse files
Files changed (2) hide show
  1. configuration_qwen.py +0 -71
  2. configuration_qwen2.py +185 -0
configuration_qwen.py DELETED
@@ -1,71 +0,0 @@
1
- # Copyright (c) Alibaba Cloud.
2
- #
3
- # This source code is licensed under the license found in the
4
- # LICENSE file in the root directory of this source tree.
5
-
6
- from transformers import PretrainedConfig
7
-
8
-
9
- class QWenConfig(PretrainedConfig):
10
- model_type = "qwen"
11
- keys_to_ignore_at_inference = ["past_key_values"]
12
-
13
- def __init__(
14
- self,
15
- vocab_size=151936,
16
- hidden_size=4096,
17
- num_hidden_layers=32,
18
- num_attention_heads=32,
19
- emb_dropout_prob=0.0,
20
- attn_dropout_prob=0.0,
21
- layer_norm_epsilon=1e-6,
22
- initializer_range=0.02,
23
- max_position_embeddings=8192,
24
- scale_attn_weights=True,
25
- use_cache=True,
26
- bf16=False,
27
- fp16=False,
28
- fp32=False,
29
- kv_channels=128,
30
- rotary_pct=1.0,
31
- rotary_emb_base=10000,
32
- use_dynamic_ntk=True,
33
- use_logn_attn=True,
34
- use_flash_attn="auto",
35
- intermediate_size=22016,
36
- no_bias=True,
37
- tie_word_embeddings=False,
38
- use_cache_quantization=False,
39
- use_cache_kernel=False,
40
- softmax_in_fp32=False,
41
- **kwargs,
42
- ):
43
- self.vocab_size = vocab_size
44
- self.hidden_size = hidden_size
45
- self.intermediate_size = intermediate_size
46
- self.num_hidden_layers = num_hidden_layers
47
- self.num_attention_heads = num_attention_heads
48
- self.emb_dropout_prob = emb_dropout_prob
49
- self.attn_dropout_prob = attn_dropout_prob
50
- self.layer_norm_epsilon = layer_norm_epsilon
51
- self.initializer_range = initializer_range
52
- self.scale_attn_weights = scale_attn_weights
53
- self.use_cache = use_cache
54
- self.max_position_embeddings = max_position_embeddings
55
- self.bf16 = bf16
56
- self.fp16 = fp16
57
- self.fp32 = fp32
58
- self.kv_channels = kv_channels
59
- self.rotary_pct = rotary_pct
60
- self.rotary_emb_base = rotary_emb_base
61
- self.use_dynamic_ntk = use_dynamic_ntk
62
- self.use_logn_attn = use_logn_attn
63
- self.use_flash_attn = use_flash_attn
64
- self.no_bias = no_bias
65
- self.use_cache_quantization = use_cache_quantization
66
- self.use_cache_kernel = use_cache_kernel
67
- self.softmax_in_fp32 = softmax_in_fp32
68
- super().__init__(
69
- tie_word_embeddings=tie_word_embeddings,
70
- **kwargs
71
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configuration_qwen2.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen2 model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Qwen2Config(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a
28
+ Qwen2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of
30
+ Qwen2-7B-beta [Qwen/Qwen2-7B-beta](https://huggingface.co/Qwen/Qwen2-7B-beta).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 151936):
38
+ Vocabulary size of the Qwen2 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`Qwen2Model`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22016):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 32):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
56
+ The non-linear activation function (function or string) in the decoder.
57
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
58
+ The maximum sequence length that this model might ever be used with.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
62
+ The epsilon used by the rms normalization layers.
63
+ use_cache (`bool`, *optional*, defaults to `True`):
64
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
65
+ relevant if `config.is_decoder=True`.
66
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
67
+ Whether the model's input and output word embeddings should be tied.
68
+ rope_theta (`float`, *optional*, defaults to 10000.0):
69
+ The base period of the RoPE embeddings.
70
+ rope_scaling (`Dict`, *optional*):
71
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
72
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
73
+ accordingly.
74
+ Expected contents:
75
+ `rope_type` (`str`):
76
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
77
+ 'llama3'], with 'default' being the original RoPE implementation.
78
+ `factor` (`float`, *optional*):
79
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
80
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
81
+ original maximum pre-trained length.
82
+ `original_max_position_embeddings` (`int`, *optional*):
83
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
84
+ pretraining.
85
+ `attention_factor` (`float`, *optional*):
86
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
87
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
88
+ `factor` field to infer the suggested value.
89
+ `beta_fast` (`float`, *optional*):
90
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
91
+ ramp function. If unspecified, it defaults to 32.
92
+ `beta_slow` (`float`, *optional*):
93
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
94
+ ramp function. If unspecified, it defaults to 1.
95
+ `short_factor` (`List[float]`, *optional*):
96
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
97
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
98
+ size divided by the number of attention heads divided by 2
99
+ `long_factor` (`List[float]`, *optional*):
100
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
101
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
102
+ size divided by the number of attention heads divided by 2
103
+ `low_freq_factor` (`float`, *optional*):
104
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
105
+ `high_freq_factor` (`float`, *optional*):
106
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
107
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
108
+ Whether to use sliding window attention.
109
+ sliding_window (`int`, *optional*, defaults to 4096):
110
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
111
+ max_window_layers (`int`, *optional*, defaults to 28):
112
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
113
+ attention_dropout (`float`, *optional*, defaults to 0.0):
114
+ The dropout ratio for the attention probabilities.
115
+
116
+ ```python
117
+ >>> from transformers import Qwen2Model, Qwen2Config
118
+
119
+ >>> # Initializing a Qwen2 style configuration
120
+ >>> configuration = Qwen2Config()
121
+
122
+ >>> # Initializing a model from the Qwen2-7B style configuration
123
+ >>> model = Qwen2Model(configuration)
124
+
125
+ >>> # Accessing the model configuration
126
+ >>> configuration = model.config
127
+ ```"""
128
+
129
+ model_type = "qwen2"
130
+ keys_to_ignore_at_inference = ["past_key_values"]
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_size=151936,
135
+ hidden_size=4096,
136
+ intermediate_size=22016,
137
+ num_hidden_layers=32,
138
+ num_attention_heads=32,
139
+ num_key_value_heads=32,
140
+ hidden_act="silu",
141
+ max_position_embeddings=32768,
142
+ initializer_range=0.02,
143
+ rms_norm_eps=1e-6,
144
+ use_cache=True,
145
+ tie_word_embeddings=False,
146
+ rope_theta=10000.0,
147
+ rope_scaling=None,
148
+ use_sliding_window=False,
149
+ sliding_window=4096,
150
+ max_window_layers=28,
151
+ attention_dropout=0.0,
152
+ **kwargs,
153
+ ):
154
+ self.vocab_size = vocab_size
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.hidden_size = hidden_size
157
+ self.intermediate_size = intermediate_size
158
+ self.num_hidden_layers = num_hidden_layers
159
+ self.num_attention_heads = num_attention_heads
160
+ self.use_sliding_window = use_sliding_window
161
+ self.sliding_window = sliding_window if use_sliding_window else None
162
+ self.max_window_layers = max_window_layers
163
+
164
+ # for backward compatibility
165
+ if num_key_value_heads is None:
166
+ num_key_value_heads = num_attention_heads
167
+
168
+ self.num_key_value_heads = num_key_value_heads
169
+ self.hidden_act = hidden_act
170
+ self.initializer_range = initializer_range
171
+ self.rms_norm_eps = rms_norm_eps
172
+ self.use_cache = use_cache
173
+ self.rope_theta = rope_theta
174
+ self.rope_scaling = rope_scaling
175
+ self.attention_dropout = attention_dropout
176
+ # Validate the correctness of rotary position embeddings parameters
177
+ # BC: if there is a 'type' field, move it to 'rope_type'.
178
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
179
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
180
+ rope_config_validation(self)
181
+
182
+ super().__init__(
183
+ tie_word_embeddings=tie_word_embeddings,
184
+ **kwargs,
185
+ )