Update configuration_intern_vit.py
Browse files- configuration_intern_vit.py +119 -117
configuration_intern_vit.py
CHANGED
@@ -1,117 +1,119 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# InternVL
|
3 |
-
# Copyright (c) 2023 OpenGVLab
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# --------------------------------------------------------
|
6 |
-
import os
|
7 |
-
from typing import Union
|
8 |
-
|
9 |
-
from transformers.configuration_utils import PretrainedConfig
|
10 |
-
from transformers.utils import logging
|
11 |
-
|
12 |
-
logger = logging.get_logger(__name__)
|
13 |
-
|
14 |
-
|
15 |
-
class InternVisionConfig(PretrainedConfig):
|
16 |
-
r"""
|
17 |
-
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
18 |
-
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
19 |
-
|
20 |
-
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
21 |
-
documentation from [`PretrainedConfig`] for more information.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
num_channels (`int`, *optional*, defaults to 3):
|
25 |
-
Number of color channels in the input images (e.g., 3 for RGB).
|
26 |
-
patch_size (`int`, *optional*, defaults to 14):
|
27 |
-
The size (resolution) of each patch.
|
28 |
-
image_size (`int`, *optional*, defaults to 224):
|
29 |
-
The size (resolution) of each image.
|
30 |
-
qkv_bias (`bool`, *optional*, defaults to `False`):
|
31 |
-
Whether to add a bias to the queries and values in the self-attention layers.
|
32 |
-
hidden_size (`int`, *optional*, defaults to 3200):
|
33 |
-
Dimensionality of the encoder layers and the pooler layer.
|
34 |
-
num_attention_heads (`int`, *optional*, defaults to 25):
|
35 |
-
Number of attention heads for each attention layer in the Transformer encoder.
|
36 |
-
intermediate_size (`int`, *optional*, defaults to 12800):
|
37 |
-
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
38 |
-
qk_normalization (`bool`, *optional*, defaults to `True`):
|
39 |
-
Whether to normalize the queries and keys in the self-attention layers.
|
40 |
-
num_hidden_layers (`int`, *optional*, defaults to 48):
|
41 |
-
Number of hidden layers in the Transformer encoder.
|
42 |
-
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
43 |
-
Whether to use flash attention mechanism.
|
44 |
-
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
45 |
-
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
46 |
-
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
47 |
-
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
48 |
-
The epsilon used by the layer normalization layers.
|
49 |
-
dropout (`float`, *optional*, defaults to 0.0):
|
50 |
-
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
51 |
-
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
52 |
-
Dropout rate for stochastic depth.
|
53 |
-
attention_dropout (`float`, *optional*, defaults to 0.0):
|
54 |
-
The dropout ratio for the attention probabilities.
|
55 |
-
initializer_range (`float`, *optional*, defaults to 0.02):
|
56 |
-
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
57 |
-
initializer_factor (`float`, *optional*, defaults to 0.1):
|
58 |
-
A factor for layer scale.
|
59 |
-
"""
|
60 |
-
|
61 |
-
model_type = 'intern_vit_6b'
|
62 |
-
|
63 |
-
def __init__(
|
64 |
-
self,
|
65 |
-
num_channels=3,
|
66 |
-
patch_size=14,
|
67 |
-
image_size=224,
|
68 |
-
qkv_bias=False,
|
69 |
-
hidden_size=3200,
|
70 |
-
num_attention_heads=25,
|
71 |
-
intermediate_size=12800,
|
72 |
-
qk_normalization=True,
|
73 |
-
num_hidden_layers=48,
|
74 |
-
use_flash_attn=True,
|
75 |
-
hidden_act='gelu',
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
self.
|
88 |
-
self.
|
89 |
-
self.
|
90 |
-
self.
|
91 |
-
self.
|
92 |
-
self.
|
93 |
-
self.
|
94 |
-
self.
|
95 |
-
self.
|
96 |
-
self.
|
97 |
-
self.
|
98 |
-
self.
|
99 |
-
self.
|
100 |
-
self.
|
101 |
-
self.
|
102 |
-
self.
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# InternVL
|
3 |
+
# Copyright (c) 2023 OpenGVLab
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# --------------------------------------------------------
|
6 |
+
import os
|
7 |
+
from typing import Union
|
8 |
+
|
9 |
+
from transformers.configuration_utils import PretrainedConfig
|
10 |
+
from transformers.utils import logging
|
11 |
+
|
12 |
+
logger = logging.get_logger(__name__)
|
13 |
+
|
14 |
+
|
15 |
+
class InternVisionConfig(PretrainedConfig):
|
16 |
+
r"""
|
17 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
18 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
19 |
+
|
20 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
21 |
+
documentation from [`PretrainedConfig`] for more information.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
num_channels (`int`, *optional*, defaults to 3):
|
25 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
26 |
+
patch_size (`int`, *optional*, defaults to 14):
|
27 |
+
The size (resolution) of each patch.
|
28 |
+
image_size (`int`, *optional*, defaults to 224):
|
29 |
+
The size (resolution) of each image.
|
30 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
31 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
32 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
33 |
+
Dimensionality of the encoder layers and the pooler layer.
|
34 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
35 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
36 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
37 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
38 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
39 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
40 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
41 |
+
Number of hidden layers in the Transformer encoder.
|
42 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
43 |
+
Whether to use flash attention mechanism.
|
44 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
45 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
46 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
47 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
48 |
+
The epsilon used by the layer normalization layers.
|
49 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
51 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
52 |
+
Dropout rate for stochastic depth.
|
53 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
54 |
+
The dropout ratio for the attention probabilities.
|
55 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
56 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
57 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
58 |
+
A factor for layer scale.
|
59 |
+
"""
|
60 |
+
|
61 |
+
model_type = 'intern_vit_6b'
|
62 |
+
|
63 |
+
def __init__(
|
64 |
+
self,
|
65 |
+
num_channels=3,
|
66 |
+
patch_size=14,
|
67 |
+
image_size=224,
|
68 |
+
qkv_bias=False,
|
69 |
+
hidden_size=3200,
|
70 |
+
num_attention_heads=25,
|
71 |
+
intermediate_size=12800,
|
72 |
+
qk_normalization=True,
|
73 |
+
num_hidden_layers=48,
|
74 |
+
use_flash_attn=True,
|
75 |
+
hidden_act='gelu',
|
76 |
+
norm_type='rms_norm',
|
77 |
+
layer_norm_eps=1e-6,
|
78 |
+
dropout=0.0,
|
79 |
+
drop_path_rate=0.0,
|
80 |
+
attention_dropout=0.0,
|
81 |
+
initializer_range=0.02,
|
82 |
+
initializer_factor=0.1,
|
83 |
+
**kwargs,
|
84 |
+
):
|
85 |
+
super().__init__(**kwargs)
|
86 |
+
|
87 |
+
self.hidden_size = hidden_size
|
88 |
+
self.intermediate_size = intermediate_size
|
89 |
+
self.dropout = dropout
|
90 |
+
self.drop_path_rate = drop_path_rate
|
91 |
+
self.num_hidden_layers = num_hidden_layers
|
92 |
+
self.num_attention_heads = num_attention_heads
|
93 |
+
self.num_channels = num_channels
|
94 |
+
self.patch_size = patch_size
|
95 |
+
self.image_size = image_size
|
96 |
+
self.initializer_range = initializer_range
|
97 |
+
self.initializer_factor = initializer_factor
|
98 |
+
self.attention_dropout = attention_dropout
|
99 |
+
self.layer_norm_eps = layer_norm_eps
|
100 |
+
self.hidden_act = hidden_act
|
101 |
+
self.norm_type = norm_type
|
102 |
+
self.qkv_bias = qkv_bias
|
103 |
+
self.qk_normalization = qk_normalization
|
104 |
+
self.use_flash_attn = use_flash_attn
|
105 |
+
|
106 |
+
@classmethod
|
107 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
108 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
109 |
+
|
110 |
+
if 'vision_config' in config_dict:
|
111 |
+
config_dict = config_dict['vision_config']
|
112 |
+
|
113 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
114 |
+
logger.warning(
|
115 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
116 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
117 |
+
)
|
118 |
+
|
119 |
+
return cls.from_dict(config_dict, **kwargs)
|