Source code for transformers.models.bart.configuration_bart

# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
""" BART model configuration """
import warnings

from ...configuration_utils import PretrainedConfig
from ...utils import logging

logger = logging.get_logger(__name__)

    "facebook/bart-large": "",
    # See all BART models at

[docs]class BartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.BartModel`. It is used to instantiate a BART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BART `facebook/bart-large <>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50265): Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.BartModel` or :class:`~transformers.TFBartModel`. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 12): Number of encoder layers. decoder_layers (:obj:`int`, `optional`, defaults to 12): Number of decoder layers. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see>`__ for more details. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Scale embeddings by diving by sqrt(d_model). use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). num_labels: (:obj:`int`, `optional`, defaults to 3): The number of labels to use in :class:`~transformers.BartForSequenceClassification`. forced_eos_token_id (:obj:`int`, `optional`, defaults to 2): The id of the token to force as the last generated token when :obj:`max_length` is reached. Usually set to :obj:`eos_token_id`. Example:: >>> from transformers import BartModel, BartConfig >>> # Initializing a BART facebook/bart-large style configuration >>> configuration = BartConfig() >>> # Initializing a model from the facebook/bart-large style configuration >>> model = BartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "bart" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, gradient_checkpointing=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs ): super().__init__( num_labels=num_labels, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.gradient_checkpointing = gradient_checkpointing self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True # ensure backward compatibilty for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False): self.forced_bos_token_id = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions." "The config can simply be saved and uploaded again to be fixed." ) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model