from transformers import PretrainedConfig class MT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the mT5 [google/mt5-small](https://huggingface.co/google/mt5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 250112): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`. But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 1024): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "mt5" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, encoder_vocab_size=250112, decoder_vocab_size=250112, shared_embedding=False, d_model=256, d_kv=64, d_ff=512, num_layers=4, num_decoder_layers=None, num_heads=3, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="gated-gelu", is_encoder_decoder=True, use_cache=True, tokenizer_class="ChemTokenizers.SMILES_IUPAC_FAST.FastTokenizer", tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=2, classifier_dropout=0.0, **kwargs, ): super().__init__( is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, ) self.encoder_vocab_size = encoder_vocab_size self.decoder_vocab_size = decoder_vocab_size self.shared_embedding = shared_embedding self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.num_heads @property def num_hidden_layers(self): return self.num_layers