Spaces:
Running
Running
multimodal
/
transformers
/templates
/adding_a_new_model
/cookiecutter-template-{{cookiecutter.modelname}}
/configuration_{{cookiecutter.lowercase_modelname}}.py
# coding=utf-8 | |
# Copyright 2022 {{cookiecutter.authors}} and The HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" {{cookiecutter.modelname}} model configuration """ | |
from ...configuration_utils import PretrainedConfig | |
from ...utils import logging | |
logger = logging.get_logger(__name__) | |
{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP = { | |
"{{cookiecutter.checkpoint_identifier}}": "https://huggingface.co/{{cookiecutter.checkpoint_identifier}}/resolve/main/config.json", | |
# See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}} | |
} | |
class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig): | |
r""" | |
This is the configuration class to store the configuration of a [`~{{cookiecutter.camelcase_modelname}}Model`]. | |
It is used to instantiate an {{cookiecutter.modelname}} model according to the specified arguments, defining the model | |
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of | |
the {{cookiecutter.modelname}} [{{cookiecutter.checkpoint_identifier}}](https://huggingface.co/{{cookiecutter.checkpoint_identifier}}) architecture. | |
Configuration objects inherit from [`PretrainedConfig`] and can be used | |
to control the model outputs. Read the documentation from [`PretrainedConfig`] | |
for more information. | |
Args: | |
{% if cookiecutter.is_encoder_decoder_model == "False" -%} | |
vocab_size (`int`, *optional*, defaults to 30522): | |
Vocabulary size of the {{cookiecutter.modelname}} model. Defines the number of different tokens that can be represented by the | |
`inputs_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or | |
[`~TF{{cookiecutter.camelcase_modelname}}Model`]. | |
hidden_size (`int`, *optional*, defaults to 768): | |
Dimension of the encoder layers and the pooler layer. | |
num_hidden_layers (`int`, *optional*, defaults to 12): | |
Number of hidden layers in the Transformer encoder. | |
num_attention_heads (`int`, *optional*, defaults to 12): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
intermediate_size (`int`, *optional*, defaults to 3072): | |
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. | |
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): | |
The non-linear activation function (function or string) in the encoder and pooler. | |
If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. | |
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): | |
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. | |
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): | |
The dropout ratio for the attention probabilities. | |
max_position_embeddings (`int`, *optional*, defaults to 512): | |
The maximum sequence length that this model might ever be used with. | |
Typically set this to something large just in case (e.g., 512 or 1024 or 2048). | |
type_vocab_size (`int`, *optional*, defaults to 2): | |
The vocabulary size of the `token_type_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or | |
[`~TF{{cookiecutter.camelcase_modelname}}Model`]. | |
initializer_range (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
layer_norm_eps (`float`, *optional*, defaults to 1e-12): | |
The epsilon used by the layer normalization layers. | |
use_cache (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return the last key/values attentions (not used by all models). Only | |
relevant if `config.is_decoder=True`. | |
{% else -%} | |
vocab_size (`int`, *optional*, defaults to 50265): | |
Vocabulary size of the {{cookiecutter.modelname}} model. Defines the number of different tokens that can be represented by the | |
`inputs_ids` passed when calling [`~{{cookiecutter.camelcase_modelname}}Model`] or | |
[`~TF{{cookiecutter.camelcase_modelname}}Model`]. | |
d_model (`int`, *optional*, defaults to 1024): | |
Dimension of the layers and the pooler layer. | |
encoder_layers (`int`, *optional*, defaults to 12): | |
Number of encoder layers. | |
decoder_layers (`int`, *optional*, defaults to 12): | |
Number of decoder layers. | |
encoder_attention_heads (`int`, *optional*, defaults to 16): | |
Number of attention heads for each attention layer in the Transformer encoder. | |
decoder_attention_heads (`int`, *optional*, defaults to 16): | |
Number of attention heads for each attention layer in the Transformer decoder. | |
decoder_ffn_dim (`int`, *optional*, defaults to 4096): | |
Dimension of the "intermediate" (often named feed-forward) layer in decoder. | |
encoder_ffn_dim (`int`, *optional*, defaults to 4096): | |
Dimension of the "intermediate" (often named feed-forward) layer in decoder. | |
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): | |
The non-linear activation function (function or string) in the encoder and pooler. If string, | |
`"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. | |
dropout (`float`, *optional*, defaults to 0.1): | |
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. | |
attention_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for the attention probabilities. | |
activation_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for activations inside the fully connected layer. | |
classifier_dropout (`float`, *optional*, defaults to 0.0): | |
The dropout ratio for classifier. | |
max_position_embeddings (`int`, *optional*, defaults to 1024): | |
The maximum sequence length that this model might ever be used with. Typically set this to something large | |
just in case (e.g., 512 or 1024 or 2048). | |
init_std (`float`, *optional*, defaults to 0.02): | |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | |
encoder_layerdrop: (`float`, *optional*, defaults to 0.0): | |
The LayerDrop probability for the encoder. See the [LayerDrop paper](see | |
https://arxiv.org/abs/1909.11556) for more details. | |
decoder_layerdrop: (`float`, *optional*, defaults to 0.0): | |
The LayerDrop probability for the decoder. See the [LayerDrop paper](see | |
https://arxiv.org/abs/1909.11556) for more details. | |
use_cache (`bool`, *optional*, defaults to `True`): | |
Whether or not the model should return the last key/values attentions (not used by all models). | |
{% endif -%} | |
Example: | |
```python | |
>>> from transformers import {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}Config | |
>>> # Initializing a {{cookiecutter.modelname}} {{cookiecutter.checkpoint_identifier}} style configuration | |
>>> configuration = {{cookiecutter.camelcase_modelname}}Config() | |
>>> # Initializing a model from the {{cookiecutter.checkpoint_identifier}} style configuration | |
>>> model = {{cookiecutter.camelcase_modelname}}Model(configuration) | |
>>> # Accessing the model configuration | |
>>> configuration = model.config | |
``` | |
""" | |
model_type = "{{cookiecutter.lowercase_modelname}}" | |
{% if cookiecutter.is_encoder_decoder_model == "False" -%} | |
{% else -%} | |
keys_to_ignore_at_inference = ["past_key_values"] | |
{% endif -%} | |
{% if cookiecutter.is_encoder_decoder_model == "False" %} | |
{%- else %} | |
attribute_map = { | |
"num_attention_heads": "encoder_attention_heads", | |
"hidden_size": "d_model" | |
} | |
{%- endif %} | |
def __init__( | |
self, | |
{% if cookiecutter.is_encoder_decoder_model == "False" -%} | |
vocab_size=30522, | |
hidden_size=768, | |
num_hidden_layers=12, | |
num_attention_heads=12, | |
intermediate_size=3072, | |
hidden_act="gelu", | |
hidden_dropout_prob=0.1, | |
attention_probs_dropout_prob=0.1, | |
max_position_embeddings=512, | |
type_vocab_size=2, | |
initializer_range=0.02, | |
layer_norm_eps=1e-12, | |
use_cache=True, | |
{% else -%} | |
vocab_size=50265, | |
max_position_embeddings=1024, | |
encoder_layers=12, | |
encoder_ffn_dim=4096, | |
encoder_attention_heads=16, | |
decoder_layers=12, | |
decoder_ffn_dim=4096, | |
decoder_attention_heads=16, | |
encoder_layerdrop=0.0, | |
decoder_layerdrop=0.0, | |
use_cache=True, | |
is_encoder_decoder=True, | |
activation_function="gelu", | |
d_model=1024, | |
dropout=0.1, | |
attention_dropout=0.0, | |
activation_dropout=0.0, | |
init_std=0.02, | |
decoder_start_token_id=2, | |
classifier_dropout=0.0, | |
scale_embedding=False, | |
{% endif -%} | |
pad_token_id=1, | |
bos_token_id=0, | |
eos_token_id=2, | |
**kwargs | |
): | |
self.vocab_size = vocab_size | |
self.max_position_embeddings = max_position_embeddings | |
{% if cookiecutter.is_encoder_decoder_model == "False" -%} | |
self.hidden_size = hidden_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.intermediate_size = intermediate_size | |
self.hidden_act = hidden_act | |
self.hidden_dropout_prob = hidden_dropout_prob | |
self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
self.initializer_range = initializer_range | |
self.type_vocab_size = type_vocab_size | |
self.layer_norm_eps = layer_norm_eps | |
self.use_cache = use_cache | |
{% else -%} | |
self.d_model = d_model | |
self.encoder_ffn_dim = encoder_ffn_dim | |
self.encoder_layers = encoder_layers | |
self.encoder_attention_heads = encoder_attention_heads | |
self.decoder_ffn_dim = decoder_ffn_dim | |
self.decoder_layers = decoder_layers | |
self.decoder_attention_heads = decoder_attention_heads | |
self.dropout = dropout | |
self.attention_dropout = attention_dropout | |
self.activation_dropout = activation_dropout | |
self.activation_function = activation_function | |
self.init_std = init_std | |
self.encoder_layerdrop = encoder_layerdrop | |
self.decoder_layerdrop = decoder_layerdrop | |
self.classifier_dropout = classifier_dropout | |
self.use_cache = use_cache | |
self.num_hidden_layers = encoder_layers | |
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True | |
{% endif -%} | |
super().__init__( | |
pad_token_id=pad_token_id, | |
bos_token_id=bos_token_id, | |
eos_token_id=eos_token_id, | |
{% if cookiecutter.is_encoder_decoder_model == "False" -%} | |
{% else -%} | |
is_encoder_decoder=is_encoder_decoder, | |
decoder_start_token_id=decoder_start_token_id, | |
{% endif -%} | |
**kwargs | |
) | |