huu-ontocord commited on
Commit
74d319b
1 Parent(s): e558c61

Create configuration_phi3_v.py

Browse files
Files changed (1) hide show
  1. configuration_phi3_v.py +210 -0
configuration_phi3_v.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Phi-3-V model configuration"""
17
+
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ PHI3V_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "microsoft/Phi-3-vision-128k-instruct": "https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/resolve/main/config.json",
27
+ }
28
+
29
+
30
+ class Phi3VConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Phi3VModel`]. It is used to instantiate a Phi-3
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the
35
+ [microsoft/Phi-3-vision-128k-instruct](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct).
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 32064):
40
+ Vocabulary size of the Phi-3-V model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`Phi3VModel`].
42
+ hidden_size (`int`, *optional*, defaults to 3072):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 8192):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer decoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
59
+ Dropout probability for mlp outputs.
60
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
61
+ The dropout ratio for the embeddings.
62
+ attention_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio after computing the attention scores.
64
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
65
+ The non-linear activation function (function or string) in the decoder.
66
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
67
+ The maximum sequence length that this model might ever be used with.
68
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
69
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
70
+ original RoPE embeddings when using long scaling.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
74
+ The epsilon value used for the RMSNorm.
75
+ use_cache (`bool`, *optional*, defaults to `True`):
76
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
77
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
79
+ Whether to tie weight embeddings
80
+ rope_theta (`float`, *optional*, defaults to 10000.0):
81
+ The base period of the RoPE embeddings.
82
+ rope_scaling (`dict`, *optional*):
83
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
84
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
85
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
86
+ divided by the number of attention heads divided by 2.
87
+ bos_token_id (`int`, *optional*, defaults to 1):
88
+ The id of the "beginning-of-sequence" token.
89
+ eos_token_id (`int`, *optional*, defaults to 32000):
90
+ The id of the "end-of-sequence" token.
91
+ pad_token_id (`int`, *optional*, defaults to 32000):
92
+ The id of the padding token.
93
+ sliding_window (`int`, *optional*):
94
+ Sliding window attention window size. If `None`, no sliding window is applied.
95
+ embd_layer (`str`, *optional*, defaults to `"default"`):
96
+ The embedding layer to use. Can be either `"default"` or `"image"`. "default" uses the standard embedding for text.
97
+ Example:
98
+ ```python
99
+ >>> from transformers import Phi3VModel, Phi3VConfig
100
+ >>> # Initializing a Phi-3-V style configuration
101
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-vision-128k-instruct")
102
+ >>> # Initializing a model from the configuration
103
+ >>> model = Phi3VModel(configuration)
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+
108
+ model_type = "phi3_v"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=32064,
114
+ hidden_size=3072,
115
+ intermediate_size=8192,
116
+ num_hidden_layers=32,
117
+ num_attention_heads=32,
118
+ num_key_value_heads=None,
119
+ resid_pdrop=0.0,
120
+ embd_pdrop=0.0,
121
+ attention_dropout=0.0,
122
+ hidden_act="silu",
123
+ max_position_embeddings=4096,
124
+ original_max_position_embeddings=4096,
125
+ initializer_range=0.02,
126
+ rms_norm_eps=1e-5,
127
+ use_cache=True,
128
+ tie_word_embeddings=False,
129
+ rope_theta=10000.0,
130
+ rope_scaling=None,
131
+ bos_token_id=1,
132
+ eos_token_id=32000,
133
+ pad_token_id=32000,
134
+ sliding_window=None,
135
+ embd_layer: str = "default",
136
+ **kwargs,
137
+ ):
138
+ self.vocab_size = vocab_size
139
+ self.hidden_size = hidden_size
140
+ self.intermediate_size = intermediate_size
141
+ self.num_hidden_layers = num_hidden_layers
142
+ self.num_attention_heads = num_attention_heads
143
+
144
+ if num_key_value_heads is None:
145
+ num_key_value_heads = num_attention_heads
146
+
147
+ self.num_key_value_heads = num_key_value_heads
148
+ self.resid_pdrop = resid_pdrop
149
+ self.embd_pdrop = embd_pdrop
150
+ self.attention_dropout = attention_dropout
151
+ self.hidden_act = hidden_act
152
+ self.max_position_embeddings = max_position_embeddings
153
+ self.original_max_position_embeddings = original_max_position_embeddings
154
+ self.initializer_range = initializer_range
155
+ self.rms_norm_eps = rms_norm_eps
156
+ self.use_cache = use_cache
157
+ self.rope_theta = rope_theta
158
+ self.rope_scaling = rope_scaling
159
+ self._rope_scaling_validation()
160
+ self.sliding_window = sliding_window
161
+ self.embd_layer = embd_layer
162
+
163
+
164
+ super().__init__(
165
+ bos_token_id=bos_token_id,
166
+ eos_token_id=eos_token_id,
167
+ pad_token_id=pad_token_id,
168
+ tie_word_embeddings=tie_word_embeddings,
169
+ **kwargs,
170
+ )
171
+
172
+ def _rope_scaling_validation(self):
173
+ """
174
+ Validate the `rope_scaling` configuration.
175
+ """
176
+ if self.rope_scaling is None:
177
+ return
178
+
179
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
180
+ raise ValueError(
181
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
182
+ f"got {self.rope_scaling}"
183
+ )
184
+ rope_scaling_type = self.rope_scaling.get("type", None)
185
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
186
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
187
+ if rope_scaling_type is None or rope_scaling_type not in ["su", "yarn"]:
188
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
189
+ if not (
190
+ isinstance(rope_scaling_short_factor, list)
191
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
192
+ ):
193
+ raise ValueError(
194
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
195
+ )
196
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
197
+ raise ValueError(
198
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
199
+ )
200
+ if not (
201
+ isinstance(rope_scaling_long_factor, list)
202
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
203
+ ):
204
+ raise ValueError(
205
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
206
+ )
207
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
208
+ raise ValueError(
209
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
210
+ )